From b7c2728c570e02d5a39e3ee9206bd8de4334d537 Mon Sep 17 00:00:00 2001 From: victorseptember <2452508171@qq.com> Date: Thu, 19 Oct 2023 20:47:25 +0800 Subject: [PATCH] [fix]tools-v2: update branch --- .bazelrc | 16 + .../{clang-format.yaml => clang-format.yml} | 2 +- .github/workflows/cppcheck.yml | 24 + .gitignore | 3 + Makefile | 20 +- curve-ansible/README.md | 4 +- curvefs/docker/openeuler/Dockerfile | 9 + curvefs/docker/openeuler/entrypoint.sh | 136 + .../provisioning/dashboards/client.json | 5053 ++++++++++------- curvefs/proto/common.proto | 8 + curvefs/proto/metaserver.proto | 9 + curvefs/src/client/common/common.cpp | 7 +- curvefs/src/client/common/common.h | 15 +- curvefs/src/client/curve_fuse_op.cpp | 247 +- curvefs/src/client/filesystem/meta.cpp | 1 - curvefs/src/client/fuse_client.h | 54 +- .../src/client/kvclient/kvclient_manager.h | 10 +- curvefs/src/client/metric/client_metric.h | 21 +- .../src/client/s3/client_s3_cache_manager.cpp | 16 +- curvefs/src/client/s3/disk_cache_manager.cpp | 34 +- curvefs/src/client/s3/disk_cache_manager.h | 30 +- .../src/client/s3/disk_cache_manager_impl.cpp | 1 + curvefs/src/client/s3/disk_cache_read.cpp | 2 +- curvefs/src/client/s3/disk_cache_write.cpp | 3 +- curvefs/src/client/warmup/warmup_manager.cpp | 101 +- curvefs/src/client/warmup/warmup_manager.h | 86 +- curvefs/src/client/xattr_manager.cpp | 3 +- curvefs/src/mds/fs_manager.cpp | 15 +- curvefs/src/mds/fs_manager.h | 2 + curvefs/src/mds/schedule/scheduler.cpp | 2 +- curvefs/src/mds/topology/topology.cpp | 22 +- curvefs/src/mds/topology/topology.h | 7 +- curvefs/src/mds/topology/topology_item.h | 10 + curvefs/src/mds/topology/topology_manager.cpp | 5 +- .../src/metaserver/copyset/copyset_node.cpp | 62 +- curvefs/src/metaserver/copyset/copyset_node.h | 16 +- .../src/metaserver/copyset/meta_operator.cpp | 77 +- .../src/metaserver/copyset/meta_operator.h | 64 +- curvefs/src/metaserver/dentry_manager.cpp | 61 +- curvefs/src/metaserver/dentry_manager.h | 16 +- curvefs/src/metaserver/dentry_storage.cpp | 760 ++- curvefs/src/metaserver/dentry_storage.h | 100 +- curvefs/src/metaserver/inode_manager.cpp | 231 +- curvefs/src/metaserver/inode_manager.h | 96 +- curvefs/src/metaserver/inode_storage.cpp | 527 +- curvefs/src/metaserver/inode_storage.h | 134 +- curvefs/src/metaserver/metastore.cpp | 207 +- curvefs/src/metaserver/metastore.h | 135 +- curvefs/src/metaserver/metastore_fstream.cpp | 182 +- curvefs/src/metaserver/metastore_fstream.h | 17 - curvefs/src/metaserver/partition.cpp | 250 +- curvefs/src/metaserver/partition.h | 78 +- curvefs/src/metaserver/s3compact_inode.cpp | 5 +- curvefs/src/metaserver/storage/converter.cpp | 107 +- curvefs/src/metaserver/storage/converter.h | 16 + curvefs/src/metaserver/storage/dumpfile.cpp | 2 +- curvefs/src/metaserver/storage/dumpfile.h | 4 + .../metaserver/storage/rocksdb_storage.cpp | 8 +- .../src/metaserver/storage/storage_fstream.h | 5 + curvefs/src/metaserver/transaction.cpp | 119 +- curvefs/src/metaserver/transaction.h | 30 +- curvefs/src/metaserver/trash.cpp | 2 +- .../create/curvefs_create_topology_tool.cpp | 8 +- curvefs/src/volume/free_extents.cpp | 2 +- curvefs/test/client/test_fuse_s3_client.cpp | 33 +- curvefs/test/mds/fs_manager_test.cpp | 9 +- curvefs/test/mds/mds_service_test.cpp | 3 +- .../mds/topology/test_topology_manager.cpp | 99 + .../copyset/concurrent_apply_queue_test.cpp | 1 + .../copyset/copyset_node_block_group_test.cpp | 2 +- .../copyset/copyset_node_snapshot_test.cpp | 220 +- .../metaserver/copyset/meta_operator_test.cpp | 116 +- .../test/metaserver/dentry_manager_test.cpp | 60 +- .../test/metaserver/dentry_storage_test.cpp | 356 +- .../test/metaserver/inode_manager_test.cpp | 153 +- .../test/metaserver/inode_storage_test.cpp | 174 +- curvefs/test/metaserver/metastore_test.cpp | 436 +- .../test/metaserver/mock/mock_kv_storage.h | 23 +- curvefs/test/metaserver/mock/mock_metastore.h | 123 +- curvefs/test/metaserver/mock/mock_partition.h | 2 +- .../test/metaserver/partition_clean_test.cpp | 36 +- curvefs/test/metaserver/partition_test.cpp | 87 +- .../test/metaserver/recycle_cleaner_test.cpp | 24 +- .../test/metaserver/recycle_manager_test.cpp | 19 +- .../metaserver/s3compact/s3compact_test.cpp | 12 +- curvefs/test/metaserver/space/utils.h | 4 + .../test/metaserver/storage/iterator_test.cpp | 2 +- curvefs/test/metaserver/transaction_test.cpp | 124 +- curvefs/test/metaserver/trash_test.cpp | 32 +- curvefs_python/configure.sh | 8 +- docker/openeuler/Dockerfile | 21 + docker/openeuler/base/Dockerfile | 19 + docker/openeuler/base/Makefile | 4 + docker/openeuler/compile/Dockerfile | 36 + docker/openeuler/compile/Makefile | 4 + docker/openeuler/curve-tgt/Dockerfile | 30 + docker/openeuler/curve-tgt/Makefile | 7 + docker/openeuler/entrypoint.sh | 137 + docs/cn/build_and_run.md | 10 +- docs/en/build_and_run_en.md | 10 +- docs/practical/curvebs_csi.md | 2 +- mk-deb.sh | 352 -- mk-tar.sh | 333 -- nebd/src/common/configuration.cpp | 3 +- nebd/src/part2/metafile_manager.cpp | 1 - replace-curve-repo.sh | 6 + robot/init_env.sh | 2 +- scripts/ci/check_coverage.sh | 83 + src/chunkserver/copyset_node_manager.cpp | 11 +- src/client/client_config.cpp | 14 +- src/common/configuration.cpp | 3 +- src/common/s3_adapter.h | 6 +- src/mds/schedule/leaderScheduler.cpp | 4 +- src/mds/schedule/recoverScheduler.cpp | 4 +- src/mds/schedule/scheduler.cpp | 1 - src/mds/schedule/scheduler_helper.cpp | 8 +- src/tools/curve_format_main.cpp | 4 +- src/tools/mds_client.cpp | 5 +- src/tools/status_tool.cpp | 1 + thirdparties/etcdclient/Makefile | 14 +- tools-v2/Makefile | 2 + tools-v2/README.md | 78 +- tools-v2/go.mod | 3 +- tools-v2/go.sum | 7 +- tools-v2/internal/error/error.go | 11 +- tools-v2/internal/utils/mountpoint.go | 11 + tools-v2/internal/utils/row.go | 2 + tools-v2/internal/utils/snapshot.go | 3 + .../curvebs/delete/volume/clone/clone.go | 18 +- .../curvebs/delete/volume/recover/recover.go | 18 +- .../pkg/cli/command/curvebs/update/update.go | 7 +- .../curvebs/update/volume/flatten/flatten.go | 123 + .../command/curvebs/update/volume/volume.go | 50 + .../pkg/cli/command/curvefs/warmup/add/add.go | 55 +- .../command/curvefs/warmup/cancel/cancel.go | 149 + .../cli/command/curvefs/warmup/list/list.go | 171 + .../pkg/cli/command/curvefs/warmup/warmup.go | 4 + tools-v2/pkg/config/bs.go | 8 + tools/curvefsTool.cpp | 27 +- ut.sh | 6 +- util/basic.sh | 0 util/build.sh | 9 +- util/build_in_image.sh | 30 +- util/check.sh | 0 util/cppcheck/cppcheck.suppressions | 41 + util/docker.sh | 14 +- util/docker_opts.sh | 16 + util/image.sh | 0 util/install.sh | 0 util/package.sh | 405 ++ util/playground.sh | 0 util/servicectl.sh | 0 util/test.sh | 0 util/ut_in_image.sh | 41 +- 154 files changed, 8781 insertions(+), 5120 deletions(-) rename .github/workflows/{clang-format.yaml => clang-format.yml} (95%) create mode 100644 .github/workflows/cppcheck.yml create mode 100644 curvefs/docker/openeuler/Dockerfile create mode 100644 curvefs/docker/openeuler/entrypoint.sh create mode 100644 docker/openeuler/Dockerfile create mode 100644 docker/openeuler/base/Dockerfile create mode 100644 docker/openeuler/base/Makefile create mode 100644 docker/openeuler/compile/Dockerfile create mode 100644 docker/openeuler/compile/Makefile create mode 100644 docker/openeuler/curve-tgt/Dockerfile create mode 100644 docker/openeuler/curve-tgt/Makefile create mode 100644 docker/openeuler/entrypoint.sh delete mode 100755 mk-deb.sh delete mode 100755 mk-tar.sh create mode 100755 scripts/ci/check_coverage.sh create mode 100644 tools-v2/pkg/cli/command/curvebs/update/volume/flatten/flatten.go create mode 100644 tools-v2/pkg/cli/command/curvebs/update/volume/volume.go create mode 100644 tools-v2/pkg/cli/command/curvefs/warmup/cancel/cancel.go create mode 100644 tools-v2/pkg/cli/command/curvefs/warmup/list/list.go mode change 100644 => 100755 util/basic.sh mode change 100644 => 100755 util/build.sh mode change 100644 => 100755 util/build_in_image.sh mode change 100644 => 100755 util/check.sh create mode 100644 util/cppcheck/cppcheck.suppressions mode change 100644 => 100755 util/docker.sh create mode 100755 util/docker_opts.sh mode change 100644 => 100755 util/image.sh mode change 100644 => 100755 util/install.sh create mode 100755 util/package.sh mode change 100644 => 100755 util/playground.sh mode change 100644 => 100755 util/servicectl.sh mode change 100644 => 100755 util/test.sh mode change 100644 => 100755 util/ut_in_image.sh diff --git a/.bazelrc b/.bazelrc index bd9e2fd766..7c1adc7ea9 100644 --- a/.bazelrc +++ b/.bazelrc @@ -7,4 +7,20 @@ build:gcc7-later --cxxopt -faligned-new build --incompatible_blacklisted_protos_requires_proto_info=false build --copt=-fdiagnostics-color=always +build:sanitize-common --strip=never +build:sanitize-common --copt -O1 +build:sanitize-common --copt -g +build:sanitize-common --copt -fno-omit-frame-pointer + +build:asan --config=sanitize-common +build:asan --copt -fsanitize=address +build:asan --copt -DADDRESS_SANITIZER +build:asan --linkopt -fsanitize=address + +build:asan --config=sanitize-common +build:msan --copt -fsanitize=memory +build:msan --copt -fsanitize=undefined +build:msan --linkopt -fsanitize=address +build:msan --linkopt -fsanitize=undefined + run --copt=-fdiagnostics-color=always diff --git a/.github/workflows/clang-format.yaml b/.github/workflows/clang-format.yml similarity index 95% rename from .github/workflows/clang-format.yaml rename to .github/workflows/clang-format.yml index d4d373c519..b0ceec4da5 100644 --- a/.github/workflows/clang-format.yaml +++ b/.github/workflows/clang-format.yml @@ -9,7 +9,7 @@ jobs: name: Clang Formatter runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 with: # fetch everything to be able to compare with any ref fetch-depth: 0 diff --git a/.github/workflows/cppcheck.yml b/.github/workflows/cppcheck.yml new file mode 100644 index 0000000000..cc0c5e25b4 --- /dev/null +++ b/.github/workflows/cppcheck.yml @@ -0,0 +1,24 @@ +name: Static Checker + +on: pull_request + +jobs: + cppcheck: + name: CppCheck + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: chmorgan/cppcheck-action@v1.4 + with: + enable: all + std: c++11 + inconclusive: disable + output_file: ./cppcheck_report.txt + other_options: "-j4 --suppressions-list=util/cppcheck/cppcheck.suppressions --error-exitcode=1 -itest -icurvefs/test -inebd/test -inbd/test -icurvefs_python -icurvesnapshot_python -ithirdparties" + + - name: Show cppcheck report + if: failure() + run: | + cat ./cppcheck_report.txt + exit 1 diff --git a/.gitignore b/.gitignore index 1a66484ca7..208afe07e7 100755 --- a/.gitignore +++ b/.gitignore @@ -82,6 +82,9 @@ runlog/ !curve-snapshotcloneserver-nginx/app/lib !nebd/nebd-package/usr/bin +# build output +build + # mac .DS_Store diff --git a/Makefile b/Makefile index f986bb4777..807b0a78c7 100644 --- a/Makefile +++ b/Makefile @@ -21,13 +21,14 @@ Examples: ## build Usage: - make build stor=bs/fs only=TARGET dep=0/1 release=0/1 os=OS + make build stor=bs/fs only=TARGET1,...,TARGETx dep=0/1 release=0/1 os=OS Examples: make build stor=bs only=//src/chunkserver:chunkserver - make build stor=bs only=src/* dep=0 + make build stor=bs only=src/*,test/* dep=0 make build stor=fs only=test/* os=debian9 make build stor=fs release=1 - +Note: + Extra build options can be specified using BUILD_OPTS environment variable, which will be passed to bazel build command. ## dep Usage: @@ -50,6 +51,14 @@ Usage: make image stor=bs/fs tag=TAG os=OS Examples: make image stor=bs tag=opencurvedocker/curvebs:v1.2 os=debian9 + + +## package +Usage: + make release=0/1 dep=0/1 os=OS +Examples: + make deb + make tar release=1 dep=1 os=debian11 endef export help_msg @@ -66,7 +75,7 @@ dep: @bash util/build.sh --stor=$(stor) --only="" --dep=1 ci-build: - @bash util/build_in_image.sh --stor=$(stor) --only=$(only) --dep=$(dep) --release=$(release) --ci=$(ci) --os=$(os) + @bash util/build_in_image.sh --stor=$(stor) --only=$(only) --dep=$(dep) --release=$(release) --ci=$(ci) --os=$(os) --sanitizer=$(sanitizer) ci-dep: @bash util/build_in_image.sh --stor=$(stor) --only="" --dep=1 @@ -77,6 +86,9 @@ install: image: @bash util/image.sh $(stor) $(tag) $(os) +tar deb: + @RELEASE=$(release) DEP=$(dep) OS=$(os) bash util/package.sh $@ + playground: @bash util/playground.sh diff --git a/curve-ansible/README.md b/curve-ansible/README.md index 4815a12153..1f52d2f43f 100644 --- a/curve-ansible/README.md +++ b/curve-ansible/README.md @@ -30,8 +30,8 @@ chunkserver_num=13 ### 3、包管理 curve-ansible同时提供了debian包和tar包的方式安装curve软件包。默认使用tar包方式安装,如果想用debian包的方式,需要在inventory或者命令行中指定install_with_deb=true。(debian的方式目前不支持格式化指定盘,只支持格式化全部ATA盘) -- debian包的方式下,需要使用mk-deb.sh打debian包,并将debian包上传到debian源中,使其能够用apt-get命令安装。 -- tar包的方式下,tar包可以是来自curve github release,也可以是自己执行mk-tar.sh脚本获取到的。 +- debian包的方式下,需要使用 make deb 打debian包,并将debian包上传到debian源中,使其能够用apt-get命令安装。 +- tar包的方式下,tar包可以是来自curve github release,也可以是自己执行 make tar 获取到的。 ## 三、使用命令 ### 1、集群部署 diff --git a/curvefs/docker/openeuler/Dockerfile b/curvefs/docker/openeuler/Dockerfile new file mode 100644 index 0000000000..4aa04b8f56 --- /dev/null +++ b/curvefs/docker/openeuler/Dockerfile @@ -0,0 +1,9 @@ +FROM opencurvedocker/curve-base:openeuler +COPY libmemcached.so libmemcached.so.11 libhashkit.so.2 libfuse3.so.3.10.5 libsnappy.so.1.1.9 libetcdclient.so /usr/lib/ +COPY curvefs /curvefs +RUN mkdir -p /etc/curvefs /core /etc/curve && chmod a+x /entrypoint.sh \ + && cp /curvefs/tools/sbin/curvefs_tool /usr/bin \ + && cp /curvefs/tools-v2/sbin/curve /usr/bin/ \ + && ln -s /usr/lib/libfuse3.so.3.10.5 /usr/lib/libfuse3.so.3 \ + && ln -s /usr/lib64/libsnappy.so.1.1.9 /usr/lib64/libsnappy.so.1 \ + && ldconfig diff --git a/curvefs/docker/openeuler/entrypoint.sh b/curvefs/docker/openeuler/entrypoint.sh new file mode 100644 index 0000000000..0ca397dace --- /dev/null +++ b/curvefs/docker/openeuler/entrypoint.sh @@ -0,0 +1,136 @@ +#!/usr/bin/env bash + +# Copyright (C) 2021 Jingli Chen (Wine93), NetEase Inc. + +############################ GLOBAL VARIABLES +g_role="" +g_args="" +g_prefix="" +g_binary="" +g_start_args="" +g_preexec="/curvefs/tools-v2/sbin/daemon" + +############################ BASIC FUNCTIONS +function msg() { + printf '%b' "$1" >&2 +} + +function success() { + msg "\33[32m[✔]\33[0m ${1}${2}" +} + +function die() { + msg "\33[31m[✘]\33[0m ${1}${2}" + exit 1 +} + +############################ FUNCTIONS +function usage () { + cat << _EOC_ +Usage: + entrypoint.sh --role=ROLE + entrypoint.sh --role=ROLE --args=ARGS + +Examples: + entrypoint.sh --role=etcd + entrypoint.sh --role=client --args="-o default_permissions" +_EOC_ +} + +function get_options() { + local long_opts="role:,args:,help" + local args=`getopt -o ra --long $long_opts -n "$0" -- "$@"` + eval set -- "${args}" + while true + do + case "$1" in + -r|--role) + g_role=$2 + shift 2 + ;; + -a|--args) + g_args=$2 + shift 2 + ;; + -h) + usage + exit 1 + ;; + --) + shift + break + ;; + *) + exit 1 + ;; + esac + done +} + +function prepare() { + g_prefix="/curvefs/$g_role" + conf_path="$g_prefix/conf/$g_role.conf" + + case $g_role in + etcd) + g_binary="$g_prefix/sbin/etcd" + g_start_args="--config-file $conf_path" + ;; + mds) + g_binary="$g_prefix/sbin/curvefs-mds" + g_start_args="--confPath $conf_path" + ;; + metaserver) + g_binary="$g_prefix/sbin/curvefs-metaserver" + g_start_args="--confPath $conf_path" + ;; + client) + g_binary="$g_prefix/sbin/curve-fuse" + g_start_args="--confPath $conf_path" + ;; + monitor) + g_binary="python3" + g_start_args="target_json.py" + ;; + *) + usage + exit 1 + ;; + esac + + if [ "$g_args" != "" ]; then + g_start_args=$g_args + fi +} + +function create_directory() { + chmod 700 "$g_prefix/data" + if [ "$g_role" == "etcd" ]; then + mkdir -p "$g_prefix/data/wal" + elif [ "$g_role" == "metaserver" ]; then + mkdir -p "$g_prefix/data/storage" + elif [ "$g_role" == "client" ]; then + mkdir -p "$g_prefix/mnt" + fi +} + +function main() { + get_options "$@" + + prepare + create_directory + [[ $(command -v crontab) ]] && cron + [[ ! -z $g_preexec ]] && $g_preexec & + if [ $g_role == "etcd" ]; then + exec $g_binary $g_start_args >>$g_prefix/logs/etcd.log 2>&1 + elif [ $g_role == "monitor" ]; then + cd $g_prefix + exec $g_binary $g_start_args + else + exec $g_binary $g_start_args + fi + +} + +############################ MAIN() +main "$@" diff --git a/curvefs/monitor/grafana/provisioning/dashboards/client.json b/curvefs/monitor/grafana/provisioning/dashboards/client.json index 02cba766d5..870a2b5bad 100644 --- a/curvefs/monitor/grafana/provisioning/dashboards/client.json +++ b/curvefs/monitor/grafana/provisioning/dashboards/client.json @@ -25,7 +25,6 @@ "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, - "id": 1, "links": [], "liveNow": false, "panels": [ @@ -37,7 +36,7 @@ "x": 0, "y": 0 }, - "id": 201, + "id": 208, "panels": [ { "aliasColors": {}, @@ -63,7 +62,7 @@ "y": 1 }, "hiddenSeries": false, - "id": 202, + "id": 116, "interval": "1s", "legend": { "alignAsTable": true, @@ -82,7 +81,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.0.3", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -96,18 +95,16 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "editorMode": "code", "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_read_from_kv_cache_qps\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs.*_read_data_cache_num\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", - "range": true, "refId": "A" } ], "thresholds": [], "timeRegions": [], - "title": "read_kv_cache_qps", + "title": "read_data_cache_num", "tooltip": { "shared": true, "sort": 0, @@ -161,7 +158,7 @@ "y": 1 }, "hiddenSeries": false, - "id": 203, + "id": 117, "interval": "1s", "legend": { "alignAsTable": true, @@ -180,7 +177,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.0.3", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -194,18 +191,16 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "editorMode": "code", "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_write_to_kv_cache_qps\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs.*_write_data_cache_num\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", - "range": true, "refId": "A" } ], "thresholds": [], "timeRegions": [], - "title": "write_kv_cache_qps", + "title": "write_data_cache_num", "tooltip": { "shared": true, "sort": 0, @@ -246,7 +241,7 @@ }, "fieldConfig": { "defaults": { - "unit": "binBps" + "unit": "short" }, "overrides": [] }, @@ -259,7 +254,7 @@ "y": 9 }, "hiddenSeries": false, - "id": 204, + "id": 118, "interval": "1s", "legend": { "alignAsTable": true, @@ -278,7 +273,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.0.3", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -292,18 +287,16 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "editorMode": "code", "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_read_from_kv_cache_bps\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs.*_read_data_cache_byte\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", - "range": true, "refId": "A" } ], "thresholds": [], "timeRegions": [], - "title": "read_kv_cache_bps", + "title": "read_data_cache_byte", "tooltip": { "shared": true, "sort": 0, @@ -318,7 +311,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "binBps", + "format": "short", "logBase": 1, "show": true }, @@ -344,7 +337,7 @@ }, "fieldConfig": { "defaults": { - "unit": "binBps" + "unit": "short" }, "overrides": [] }, @@ -357,7 +350,7 @@ "y": 9 }, "hiddenSeries": false, - "id": 205, + "id": 119, "interval": "1s", "legend": { "alignAsTable": true, @@ -376,7 +369,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.0.3", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -390,18 +383,16 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "editorMode": "code", "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_write_to_kv_cache_bps\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs.*_write_data_cache_byte\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", - "range": true, "refId": "A" } ], "thresholds": [], "timeRegions": [], - "title": "write_kv_cache_bps", + "title": "write_data_cache_byte", "tooltip": { "shared": true, "sort": 0, @@ -416,7 +407,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "binBps", + "format": "short", "logBase": 1, "show": true }, @@ -430,7 +421,21 @@ "yaxis": { "align": false } - }, + } + ], + "title": "memcache cache", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 1 + }, + "id": 201, + "panels": [ { "aliasColors": {}, "bars": false, @@ -442,7 +447,7 @@ }, "fieldConfig": { "defaults": { - "unit": "µs" + "unit": "short" }, "overrides": [] }, @@ -452,10 +457,10 @@ "h": 8, "w": 12, "x": 0, - "y": 17 + "y": 9 }, "hiddenSeries": false, - "id": 206, + "id": 202, "interval": "1s", "legend": { "alignAsTable": true, @@ -490,17 +495,16 @@ }, "editorMode": "code", "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_read_from_kv_cache_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", - "hide": false, + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_read_from_kv_cache_qps\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "range": true, - "refId": "B" + "refId": "A" } ], "thresholds": [], "timeRegions": [], - "title": "read_kv_cache_latency", + "title": "read_kv_cache_qps", "tooltip": { "shared": true, "sort": 0, @@ -515,7 +519,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "µs", + "format": "short", "logBase": 1, "show": true }, @@ -541,7 +545,7 @@ }, "fieldConfig": { "defaults": { - "unit": "µs" + "unit": "short" }, "overrides": [] }, @@ -551,10 +555,10 @@ "h": 8, "w": 12, "x": 12, - "y": 17 + "y": 9 }, "hiddenSeries": false, - "id": 207, + "id": 203, "interval": "1s", "legend": { "alignAsTable": true, @@ -589,17 +593,16 @@ }, "editorMode": "code", "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_write_to_kv_cache_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", - "hide": false, + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_write_to_kv_cache_qps\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "range": true, - "refId": "B" + "refId": "A" } ], "thresholds": [], "timeRegions": [], - "title": "write_kv_cache_latency", + "title": "write_kv_cache_qps", "tooltip": { "shared": true, "sort": 0, @@ -614,7 +617,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "µs", + "format": "short", "logBase": 1, "show": true }, @@ -628,1216 +631,1868 @@ "yaxis": { "align": false } - } - ], - "title": "client kv cache r/w performance", - "type": "row" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 1 - }, - "id": 194, - "panels": [], - "title": "client diskcache performance", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "fieldConfig": { - "defaults": { - "unit": "short" }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 2 - }, - "hiddenSeries": false, - "id": 195, - "interval": "1s", - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "max": true, - "min": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "editorMode": "code", - "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_read_from_disk_cache_qps\", instance=~\"$instance\"}", - "interval": "", - "legendFormat": "", - "range": true, - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "read_disk_cache_qps", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:213", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "fieldConfig": { - "defaults": { - "unit": "short" + "fieldConfig": { + "defaults": { + "unit": "binBps" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 17 + }, + "hiddenSeries": false, + "id": 204, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.0.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": true, + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_read_from_kv_cache_bps\", instance=~\"$instance\"}", + "interval": "", + "legendFormat": "", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "read_kv_cache_bps", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:212", + "format": "binBps", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:213", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 2 - }, - "hiddenSeries": false, - "id": 196, - "interval": "1s", - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "max": true, - "min": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "editorMode": "code", - "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_write_to_disk_cache_qps\", instance=~\"$instance\"}", - "interval": "", - "legendFormat": "", - "range": true, - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "write_disk_cache_qps", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:213", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" + "fieldConfig": { + "defaults": { + "unit": "binBps" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 17 + }, + "hiddenSeries": false, + "id": 205, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.0.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": true, + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_write_to_kv_cache_bps\", instance=~\"$instance\"}", + "interval": "", + "legendFormat": "", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "write_kv_cache_bps", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:212", + "format": "binBps", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:213", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "unit": "µs" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 25 + }, + "hiddenSeries": false, + "id": 206, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.0.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": true, + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_read_from_kv_cache_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "", + "range": true, + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "read_kv_cache_latency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:212", + "format": "µs", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:213", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "unit": "µs" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 25 + }, + "hiddenSeries": false, + "id": 207, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.0.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": true, + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_write_to_kv_cache_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "", + "range": true, + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "write_kv_cache_latency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:212", + "format": "µs", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:213", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + } + ], + "title": "client kv cache r/w performance", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 2 + }, + "id": 194, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "unit": "short" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 10 + }, + "hiddenSeries": false, + "id": 195, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.1.4", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": true, + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_read_from_disk_cache_qps\", instance=~\"$instance\"}", + "interval": "", + "legendFormat": "", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "read_disk_cache_qps", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:212", + "format": "short", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:213", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "unit": "short" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 10 + }, + "hiddenSeries": false, + "id": 196, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.1.4", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": true, + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_write_to_disk_cache_qps\", instance=~\"$instance\"}", + "interval": "", + "legendFormat": "", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "write_disk_cache_qps", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:212", + "format": "short", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:213", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "unit": "binBps" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 18 + }, + "hiddenSeries": false, + "id": 197, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.1.4", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": true, + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_read_from_disk_cache_bps\", instance=~\"$instance\"}", + "interval": "", + "legendFormat": "", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "read_disk_cache_bps", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:212", + "format": "binBps", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:213", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "unit": "binBps" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 18 + }, + "hiddenSeries": false, + "id": 198, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.1.4", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": true, + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_write_to_disk_cache_bps\", instance=~\"$instance\"}", + "interval": "", + "legendFormat": "", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "write_disk_cache_bps", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:212", + "format": "binBps", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:213", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "unit": "µs" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 26 + }, + "hiddenSeries": false, + "id": 199, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.1.4", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": true, + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_read_from_disk_cache_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "", + "range": true, + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "read_disk_cache_latency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:212", + "format": "µs", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:213", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "unit": "µs" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 26 + }, + "hiddenSeries": false, + "id": 200, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.1.4", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": true, + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_write_to_disk_cache_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "", + "range": true, + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "write_disk_cache_latency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:212", + "format": "µs", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:213", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + } + ], + "title": "client diskcache performance", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 3 }, - "fieldConfig": { - "defaults": { - "unit": "binBps" + "id": 187, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "unit": "short" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 11 + }, + "hiddenSeries": false, + "id": 188, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.1.4", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": true, + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_read_from_s3_qps\", instance=~\"$instance\"}", + "interval": "", + "legendFormat": "", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "read_s3_qps", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:212", + "format": "short", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:213", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "unit": "short" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 11 + }, + "hiddenSeries": false, + "id": 189, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.1.4", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": true, + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_write_to_s3_qps\", instance=~\"$instance\"}", + "interval": "", + "legendFormat": "", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "write_s3_qps", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:212", + "format": "short", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:213", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 10 - }, - "hiddenSeries": false, - "id": 197, - "interval": "1s", - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "max": true, - "min": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "editorMode": "code", - "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_read_from_disk_cache_bps\", instance=~\"$instance\"}", - "interval": "", - "legendFormat": "", - "range": true, - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "read_disk_cache_bps", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "binBps", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:213", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "fieldConfig": { - "defaults": { - "unit": "binBps" + "fieldConfig": { + "defaults": { + "unit": "binBps" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 19 + }, + "hiddenSeries": false, + "id": 190, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.1.4", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": true, + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_read_from_s3_bps\", instance=~\"$instance\"}", + "interval": "", + "legendFormat": "", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "read_s3_bps", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:212", + "format": "binBps", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:213", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 10 - }, - "hiddenSeries": false, - "id": 198, - "interval": "1s", - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "max": true, - "min": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "editorMode": "code", - "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_write_to_disk_cache_bps\", instance=~\"$instance\"}", - "interval": "", - "legendFormat": "", - "range": true, - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "write_disk_cache_bps", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "binBps", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:213", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "fieldConfig": { - "defaults": { - "unit": "µs" + "fieldConfig": { + "defaults": { + "unit": "binBps" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 19 + }, + "hiddenSeries": false, + "id": 191, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.1.4", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": true, + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_write_to_s3_bps\", instance=~\"$instance\"}", + "interval": "", + "legendFormat": "", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "write_s3_bps", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:212", + "format": "binBps", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:213", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 18 - }, - "hiddenSeries": false, - "id": 199, - "interval": "1s", - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "max": true, - "min": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "editorMode": "code", - "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_read_from_disk_cache_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", - "hide": false, - "interval": "", - "legendFormat": "", - "range": true, - "refId": "B" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "read_disk_cache_latency", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "µs", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:213", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "fieldConfig": { - "defaults": { - "unit": "µs" - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 18 - }, - "hiddenSeries": false, - "id": 200, - "interval": "1s", - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "max": true, - "min": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" + "fieldConfig": { + "defaults": { + "unit": "µs" + }, + "overrides": [] }, - "editorMode": "code", - "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_write_to_disk_cache_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", - "hide": false, - "interval": "", - "legendFormat": "", - "range": true, - "refId": "B" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "write_disk_cache_latency", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "µs", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:213", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 26 - }, - "id": 187, - "panels": [], - "title": "client s3 r/w performance", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "fieldConfig": { - "defaults": { - "unit": "short" - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 27 - }, - "hiddenSeries": false, - "id": 188, - "interval": "1s", - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "max": true, - "min": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 27 }, - "editorMode": "code", - "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_read_from_s3_qps\", instance=~\"$instance\"}", - "interval": "", - "legendFormat": "", - "range": true, - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "read_s3_qps", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:213", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "fieldConfig": { - "defaults": { - "unit": "short" + "hiddenSeries": false, + "id": 192, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.1.4", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": true, + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_read_from_s3_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "", + "range": true, + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "read_s3_latency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:212", + "format": "µs", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:213", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 27 - }, - "hiddenSeries": false, - "id": 189, - "interval": "1s", - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "max": true, - "min": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "editorMode": "code", - "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_write_to_s3_qps\", instance=~\"$instance\"}", - "interval": "", - "legendFormat": "", - "range": true, - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "write_s3_qps", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:213", - "format": "short", - "logBase": 1, - "show": true + "fieldConfig": { + "defaults": { + "unit": "µs" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 27 + }, + "hiddenSeries": false, + "id": 193, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.1.4", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": true, + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_write_to_s3_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "", + "range": true, + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "write_s3_latency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:212", + "format": "µs", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:213", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } } ], - "yaxis": { - "align": false - } + "title": "client s3 r/w performance", + "type": "row" }, { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, + "collapsed": true, "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "fieldConfig": { - "defaults": { - "unit": "binBps" - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, "gridPos": { - "h": 8, - "w": 12, + "h": 1, + "w": 24, "x": 0, - "y": 35 - }, - "hiddenSeries": false, - "id": 190, - "interval": "1s", - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "max": true, - "min": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true + "y": 4 }, - "percentage": false, - "pluginVersion": "10.0.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ + "id": 6, + "panels": [ { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "editorMode": "code", - "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_read_from_s3_bps\", instance=~\"$instance\"}", - "interval": "", - "legendFormat": "", - "range": true, - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "read_s3_bps", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "binBps", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:213", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "fieldConfig": { - "defaults": { - "unit": "binBps" + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 161 + }, + "hiddenSeries": false, + "id": 4, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "exemplar": true, + "expr": "process_memory_resident{instance=~\"$instance\"}", + "interval": "", + "legendFormat": "process_memory_resident {{instance}}", + "refId": "process_memory_resident" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "exemplar": true, + "expr": "process_memory_virtual{instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "process_memory_virtual {{instance}}", + "refId": "process_memory_virtual" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "exemplar": true, + "expr": "process_memory_shared{instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "process_memory_shared {{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "process memory usage", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:63", + "format": "decbytes", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:64", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 35 - }, - "hiddenSeries": false, - "id": 191, - "interval": "1s", - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "max": true, - "min": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "editorMode": "code", - "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_write_to_s3_bps\", instance=~\"$instance\"}", - "interval": "", - "legendFormat": "", - "range": true, - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "write_s3_bps", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "binBps", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:213", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "fieldConfig": { - "defaults": { - "unit": "µs" - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 43 - }, - "hiddenSeries": false, - "id": 192, - "interval": "1s", - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "max": true, - "min": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 161 + }, + "hiddenSeries": false, + "id": 2, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "exemplar": true, + "expr": "process_cpu_usage{instance=~\"$instance\"}", + "interval": "", + "legendFormat": "process_cpu_usage {{instance}}", + "refId": "process_cpu_usage" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "exemplar": true, + "expr": "process_cpu_usage_system{instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "process_cpu_usage_system {{instance}}", + "refId": "process_cpu_usage_system" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "exemplar": true, + "expr": "process_cpu_usage_user{instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "process_cpu_usage_user {{instance}}", + "refId": "process_cpu_usage_user" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "process cpu usage", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] }, - "editorMode": "code", - "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_read_from_s3_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", - "hide": false, - "interval": "", - "legendFormat": "", - "range": true, - "refId": "B" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "read_s3_latency", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "µs", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:213", - "format": "short", - "logBase": 1, - "show": true + "yaxes": [ + { + "$$hashKey": "object:495", + "format": "percentunit", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:496", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } } ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "fieldConfig": { - "defaults": { - "unit": "µs" - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 43 - }, - "hiddenSeries": false, - "id": 193, - "interval": "1s", - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "max": true, - "min": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, "targets": [ { "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "editorMode": "code", - "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_write_to_s3_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", - "hide": false, - "interval": "", - "legendFormat": "", - "range": true, - "refId": "B" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "write_s3_latency", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "µs", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:213", - "format": "short", - "logBase": 1, - "show": true + "refId": "A" } ], - "yaxis": { - "align": false - } + "title": "process usage", + "type": "row" }, { "collapsed": true, @@ -1849,9 +2504,9 @@ "h": 1, "w": 24, "x": 0, - "y": 51 + "y": 5 }, - "id": 6, + "id": 85, "panels": [ { "aliasColors": {}, @@ -1862,16 +2517,22 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "fieldConfig": { + "defaults": { + "unit": "µs" + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 0, - "y": 153 + "y": 6 }, "hiddenSeries": false, - "id": 4, + "id": 107, "interval": "1s", "legend": { "alignAsTable": true, @@ -1890,7 +2551,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.4.3", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -1905,39 +2566,203 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "process_memory_resident{instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_read_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", - "legendFormat": "process_memory_resident {{instance}}", - "refId": "process_memory_resident" + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "read_latency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "µs", + "logBase": 1, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "unit": "µs" }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 6 + }, + "hiddenSeries": false, + "id": 108, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.1.4", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ { "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "process_memory_virtual{instance=~\"$instance\"}", - "hide": false, + "expr": "{__name__=~\"curvefs_client_op_write_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", - "legendFormat": "process_memory_virtual {{instance}}", - "refId": "process_memory_virtual" + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "write_latency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "µs", + "logBase": 1, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "unit": "µs" }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 14 + }, + "hiddenSeries": false, + "id": 87, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.1.4", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ { "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "process_memory_shared{instance=~\"$instance\"}", - "hide": false, + "expr": "{__name__=~\"curvefs_client_op_mk_dir_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", - "legendFormat": "process_memory_shared {{instance}}", + "legendFormat": "", "refId": "A" } ], "thresholds": [], "timeRegions": [], - "title": "process memory usage", + "title": "mkdir_latency", "tooltip": { "shared": true, "sort": 0, @@ -1951,13 +2776,11 @@ }, "yaxes": [ { - "$$hashKey": "object:63", - "format": "decbytes", + "format": "µs", "logBase": 1, "show": true }, { - "$$hashKey": "object:64", "format": "short", "logBase": 1, "show": true @@ -1976,16 +2799,22 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "fieldConfig": { + "defaults": { + "unit": "µs" + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 12, - "y": 153 + "y": 14 }, "hiddenSeries": false, - "id": 2, + "id": 91, "interval": "1s", "legend": { "alignAsTable": true, @@ -1993,7 +2822,6 @@ "current": false, "max": true, "min": true, - "rightSide": false, "show": true, "total": false, "values": true @@ -2005,7 +2833,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.4.3", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -2020,39 +2848,15 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "process_cpu_usage{instance=~\"$instance\"}", - "interval": "", - "legendFormat": "process_cpu_usage {{instance}}", - "refId": "process_cpu_usage" - }, - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": true, - "expr": "process_cpu_usage_system{instance=~\"$instance\"}", - "hide": false, - "interval": "", - "legendFormat": "process_cpu_usage_system {{instance}}", - "refId": "process_cpu_usage_system" - }, - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": true, - "expr": "process_cpu_usage_user{instance=~\"$instance\"}", - "hide": false, + "expr": "{__name__=~\"curvefs_client_op_rm_dir_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", - "legendFormat": "process_cpu_usage_user {{instance}}", - "refId": "process_cpu_usage_user" + "legendFormat": "", + "refId": "A" } ], "thresholds": [], "timeRegions": [], - "title": "process cpu usage", + "title": "rmdir_latency", "tooltip": { "shared": true, "sort": 0, @@ -2066,13 +2870,11 @@ }, "yaxes": [ { - "$$hashKey": "object:495", - "format": "percentunit", + "format": "µs", "logBase": 1, "show": true }, { - "$$hashKey": "object:496", "format": "short", "logBase": 1, "show": true @@ -2081,34 +2883,7 @@ "yaxis": { "align": false } - } - ], - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "refId": "A" - } - ], - "title": "process usage", - "type": "row" - }, - { - "collapsed": true, - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 52 - }, - "id": 85, - "panels": [ + }, { "aliasColors": {}, "bars": false, @@ -2130,10 +2905,10 @@ "h": 8, "w": 12, "x": 0, - "y": 154 + "y": 22 }, "hiddenSeries": false, - "id": 107, + "id": 88, "interval": "1s", "legend": { "alignAsTable": true, @@ -2152,7 +2927,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.4.3", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -2167,7 +2942,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_read_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_create_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -2175,7 +2950,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "read_latency", + "title": "create_latency", "tooltip": { "shared": true, "sort": 0, @@ -2224,10 +2999,10 @@ "h": 8, "w": 12, "x": 12, - "y": 154 + "y": 22 }, "hiddenSeries": false, - "id": 108, + "id": 92, "interval": "1s", "legend": { "alignAsTable": true, @@ -2246,7 +3021,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.4.3", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -2261,7 +3036,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_write_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_unlink_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -2269,7 +3044,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "write_latency", + "title": "unlink_latency", "tooltip": { "shared": true, "sort": 0, @@ -2318,10 +3093,10 @@ "h": 8, "w": 12, "x": 0, - "y": 162 + "y": 30 }, "hiddenSeries": false, - "id": 87, + "id": 89, "interval": "1s", "legend": { "alignAsTable": true, @@ -2340,7 +3115,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.4.3", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -2355,7 +3130,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_mk_dir_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_open_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -2363,7 +3138,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "mkdir_latency", + "title": "open_latency", "tooltip": { "shared": true, "sort": 0, @@ -2412,10 +3187,10 @@ "h": 8, "w": 12, "x": 12, - "y": 162 + "y": 30 }, "hiddenSeries": false, - "id": 91, + "id": 90, "interval": "1s", "legend": { "alignAsTable": true, @@ -2434,7 +3209,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.4.3", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -2449,7 +3224,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_rm_dir_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_release_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -2457,7 +3232,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "rmdir_latency", + "title": "release_latency", "tooltip": { "shared": true, "sort": 0, @@ -2506,10 +3281,10 @@ "h": 8, "w": 12, "x": 0, - "y": 170 + "y": 38 }, "hiddenSeries": false, - "id": 88, + "id": 93, "interval": "1s", "legend": { "alignAsTable": true, @@ -2528,7 +3303,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.4.3", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -2543,7 +3318,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_create_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_open_dir_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -2551,7 +3326,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "create_latency", + "title": "opendir_latency", "tooltip": { "shared": true, "sort": 0, @@ -2600,10 +3375,10 @@ "h": 8, "w": 12, "x": 12, - "y": 170 + "y": 38 }, "hiddenSeries": false, - "id": 92, + "id": 94, "interval": "1s", "legend": { "alignAsTable": true, @@ -2622,7 +3397,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.4.3", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -2637,7 +3412,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_unlink_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_release_dir_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -2645,7 +3420,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "unlink_latency", + "title": "releasedir_latency", "tooltip": { "shared": true, "sort": 0, @@ -2694,10 +3469,10 @@ "h": 8, "w": 12, "x": 0, - "y": 178 + "y": 46 }, "hiddenSeries": false, - "id": 89, + "id": 99, "interval": "1s", "legend": { "alignAsTable": true, @@ -2716,7 +3491,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.4.3", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -2731,7 +3506,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_open_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_read_dir_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -2739,7 +3514,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "open_latency", + "title": "readdir_latency", "tooltip": { "shared": true, "sort": 0, @@ -2788,10 +3563,10 @@ "h": 8, "w": 12, "x": 12, - "y": 178 + "y": 46 }, "hiddenSeries": false, - "id": 90, + "id": 100, "interval": "1s", "legend": { "alignAsTable": true, @@ -2810,7 +3585,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.4.3", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -2825,7 +3600,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_release_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_read_link_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -2833,7 +3608,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "release_latency", + "title": "readlink_latency", "tooltip": { "shared": true, "sort": 0, @@ -2882,10 +3657,10 @@ "h": 8, "w": 12, "x": 0, - "y": 186 + "y": 54 }, "hiddenSeries": false, - "id": 93, + "id": 95, "interval": "1s", "legend": { "alignAsTable": true, @@ -2904,7 +3679,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -2919,7 +3694,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_open_dir_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_lookup_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -2927,7 +3702,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "opendir_latency", + "title": "lookup_latency", "tooltip": { "shared": true, "sort": 0, @@ -2976,10 +3751,10 @@ "h": 8, "w": 12, "x": 12, - "y": 186 + "y": 54 }, "hiddenSeries": false, - "id": 94, + "id": 96, "interval": "1s", "legend": { "alignAsTable": true, @@ -2998,7 +3773,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -3013,7 +3788,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_release_dir_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_mk_nod_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -3021,7 +3796,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "releasedir_latency", + "title": "mknod_latency", "tooltip": { "shared": true, "sort": 0, @@ -3070,10 +3845,10 @@ "h": 8, "w": 12, "x": 0, - "y": 194 + "y": 62 }, "hiddenSeries": false, - "id": 99, + "id": 97, "interval": "1s", "legend": { "alignAsTable": true, @@ -3092,7 +3867,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -3107,7 +3882,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_read_dir_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_link_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -3115,7 +3890,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "readdir_latency", + "title": "link_latency", "tooltip": { "shared": true, "sort": 0, @@ -3164,10 +3939,10 @@ "h": 8, "w": 12, "x": 12, - "y": 194 + "y": 62 }, "hiddenSeries": false, - "id": 100, + "id": 98, "interval": "1s", "legend": { "alignAsTable": true, @@ -3186,7 +3961,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -3201,7 +3976,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_read_link_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_symlink_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -3209,7 +3984,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "readlink_latency", + "title": "symlink_latency", "tooltip": { "shared": true, "sort": 0, @@ -3258,10 +4033,10 @@ "h": 8, "w": 12, "x": 0, - "y": 202 + "y": 70 }, "hiddenSeries": false, - "id": 95, + "id": 101, "interval": "1s", "legend": { "alignAsTable": true, @@ -3280,7 +4055,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -3295,7 +4070,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_lookup_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_get_attr_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -3303,7 +4078,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "lookup_latency", + "title": "getattr_latency", "tooltip": { "shared": true, "sort": 0, @@ -3352,10 +4127,10 @@ "h": 8, "w": 12, "x": 12, - "y": 202 + "y": 70 }, "hiddenSeries": false, - "id": 96, + "id": 102, "interval": "1s", "legend": { "alignAsTable": true, @@ -3374,7 +4149,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -3389,7 +4164,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_mk_nod_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_set_attr_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -3397,7 +4172,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "mknod_latency", + "title": "setattr_latency", "tooltip": { "shared": true, "sort": 0, @@ -3446,10 +4221,10 @@ "h": 8, "w": 12, "x": 0, - "y": 210 + "y": 78 }, "hiddenSeries": false, - "id": 97, + "id": 103, "interval": "1s", "legend": { "alignAsTable": true, @@ -3468,7 +4243,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -3483,7 +4258,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_link_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_get_xattr_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -3491,7 +4266,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "link_latency", + "title": "getxattr_latency", "tooltip": { "shared": true, "sort": 0, @@ -3540,10 +4315,10 @@ "h": 8, "w": 12, "x": 12, - "y": 210 + "y": 78 }, "hiddenSeries": false, - "id": 98, + "id": 104, "interval": "1s", "legend": { "alignAsTable": true, @@ -3562,7 +4337,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -3577,7 +4352,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_symlink_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_list_xattr_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -3585,7 +4360,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "symlink_latency", + "title": "listxattr_latency", "tooltip": { "shared": true, "sort": 0, @@ -3634,10 +4409,10 @@ "h": 8, "w": 12, "x": 0, - "y": 218 + "y": 86 }, "hiddenSeries": false, - "id": 101, + "id": 105, "interval": "1s", "legend": { "alignAsTable": true, @@ -3656,7 +4431,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -3671,7 +4446,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_get_attr_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_fsync_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -3679,7 +4454,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "getattr_latency", + "title": "fsync_latency", "tooltip": { "shared": true, "sort": 0, @@ -3728,10 +4503,10 @@ "h": 8, "w": 12, "x": 12, - "y": 218 + "y": 86 }, "hiddenSeries": false, - "id": 102, + "id": 106, "interval": "1s", "legend": { "alignAsTable": true, @@ -3750,7 +4525,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -3765,7 +4540,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_set_attr_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_flush_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -3773,7 +4548,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "setattr_latency", + "title": "flush_latency", "tooltip": { "shared": true, "sort": 0, @@ -3822,10 +4597,10 @@ "h": 8, "w": 12, "x": 0, - "y": 226 + "y": 94 }, "hiddenSeries": false, - "id": 103, + "id": 109, "interval": "1s", "legend": { "alignAsTable": true, @@ -3844,7 +4619,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -3859,7 +4634,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_get_xattr_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_rename_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -3867,7 +4642,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "getxattr_latency", + "title": "rename_latency", "tooltip": { "shared": true, "sort": 0, @@ -3894,7 +4669,34 @@ "yaxis": { "align": false } - }, + } + ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "refId": "A" + } + ], + "title": "op latency", + "type": "row" + }, + { + "collapsed": true, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 6 + }, + "id": 176, + "panels": [ { "aliasColors": {}, "bars": false, @@ -3906,7 +4708,7 @@ }, "fieldConfig": { "defaults": { - "unit": "µs" + "unit": "short" }, "overrides": [] }, @@ -3914,12 +4716,12 @@ "fillGradient": 0, "gridPos": { "h": 8, - "w": 12, - "x": 12, - "y": 226 + "w": 24, + "x": 0, + "y": 203 }, "hiddenSeries": false, - "id": 104, + "id": 177, "interval": "1s", "legend": { "alignAsTable": true, @@ -3953,7 +4755,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_list_xattr_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_[[op:regex]]_inflight_num\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -3961,7 +4763,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "listxattr_latency", + "title": "op _inflight_num", "tooltip": { "shared": true, "sort": 0, @@ -3975,7 +4777,7 @@ }, "yaxes": [ { - "format": "µs", + "format": "short", "logBase": 1, "show": true }, @@ -3988,7 +4790,34 @@ "yaxis": { "align": false } - }, + } + ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "refId": "A" + } + ], + "title": "op inflight number", + "type": "row" + }, + { + "collapsed": true, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 7 + }, + "id": 47, + "panels": [ { "aliasColors": {}, "bars": false, @@ -4000,7 +4829,7 @@ }, "fieldConfig": { "defaults": { - "unit": "µs" + "unit": "short" }, "overrides": [] }, @@ -4010,10 +4839,10 @@ "h": 8, "w": 12, "x": 0, - "y": 234 + "y": 164 }, "hiddenSeries": false, - "id": 105, + "id": 48, "interval": "1s", "legend": { "alignAsTable": true, @@ -4032,7 +4861,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "10.0.3", "pointradius": 2, "points": false, "renderer": "flot", @@ -4047,7 +4876,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_fsync_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_.*[[fs:regex]]_user_read_qps\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -4055,7 +4884,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "fsync_latency", + "title": "read_qps", "tooltip": { "shared": true, "sort": 0, @@ -4069,11 +4898,13 @@ }, "yaxes": [ { - "format": "µs", + "$$hashKey": "object:212", + "format": "short", "logBase": 1, "show": true }, { + "$$hashKey": "object:213", "format": "short", "logBase": 1, "show": true @@ -4094,7 +4925,7 @@ }, "fieldConfig": { "defaults": { - "unit": "µs" + "unit": "short" }, "overrides": [] }, @@ -4104,10 +4935,10 @@ "h": 8, "w": 12, "x": 12, - "y": 234 + "y": 164 }, "hiddenSeries": false, - "id": 106, + "id": 49, "interval": "1s", "legend": { "alignAsTable": true, @@ -4126,7 +4957,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "10.0.3", "pointradius": 2, "points": false, "renderer": "flot", @@ -4141,7 +4972,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_flush_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_.*[[fs:regex]]_user_write_qps\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -4149,7 +4980,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "flush_latency", + "title": "write_qps", "tooltip": { "shared": true, "sort": 0, @@ -4163,11 +4994,13 @@ }, "yaxes": [ { - "format": "µs", + "$$hashKey": "object:212", + "format": "short", "logBase": 1, "show": true }, { + "$$hashKey": "object:213", "format": "short", "logBase": 1, "show": true @@ -4188,7 +5021,7 @@ }, "fieldConfig": { "defaults": { - "unit": "µs" + "unit": "binBps" }, "overrides": [] }, @@ -4198,10 +5031,10 @@ "h": 8, "w": 12, "x": 0, - "y": 242 + "y": 172 }, "hiddenSeries": false, - "id": 109, + "id": 50, "interval": "1s", "legend": { "alignAsTable": true, @@ -4220,7 +5053,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "10.0.3", "pointradius": 2, "points": false, "renderer": "flot", @@ -4235,7 +5068,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_rename_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs.*[[fs:regex]]_user_read_bps\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -4243,7 +5076,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "rename_latency", + "title": "read_bps", "tooltip": { "shared": true, "sort": 0, @@ -4257,11 +5090,13 @@ }, "yaxes": [ { - "format": "µs", + "$$hashKey": "object:212", + "format": "binBps", "logBase": 1, "show": true }, { + "$$hashKey": "object:213", "format": "short", "logBase": 1, "show": true @@ -4270,34 +5105,7 @@ "yaxis": { "align": false } - } - ], - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "refId": "A" - } - ], - "title": "op latency", - "type": "row" - }, - { - "collapsed": true, - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 53 - }, - "id": 176, - "panels": [ + }, { "aliasColors": {}, "bars": false, @@ -4309,7 +5117,7 @@ }, "fieldConfig": { "defaults": { - "unit": "short" + "unit": "binBps" }, "overrides": [] }, @@ -4317,12 +5125,12 @@ "fillGradient": 0, "gridPos": { "h": 8, - "w": 24, - "x": 0, - "y": 195 + "w": 12, + "x": 12, + "y": 172 }, "hiddenSeries": false, - "id": 177, + "id": 51, "interval": "1s", "legend": { "alignAsTable": true, @@ -4341,7 +5149,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "10.0.3", "pointradius": 2, "points": false, "renderer": "flot", @@ -4356,7 +5164,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_[[op:regex]]_inflight_num\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs.*[[fs:regex]]_user_write_bps\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -4364,7 +5172,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "op _inflight_num", + "title": "write_bps", "tooltip": { "shared": true, "sort": 0, @@ -4378,11 +5186,13 @@ }, "yaxes": [ { - "format": "short", + "$$hashKey": "object:212", + "format": "binBps", "logBase": 1, "show": true }, { + "$$hashKey": "object:213", "format": "short", "logBase": 1, "show": true @@ -4391,34 +5201,7 @@ "yaxis": { "align": false } - } - ], - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "refId": "A" - } - ], - "title": "op inflight number", - "type": "row" - }, - { - "collapsed": true, - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 54 - }, - "id": 47, - "panels": [ + }, { "aliasColors": {}, "bars": false, @@ -4440,10 +5223,10 @@ "h": 8, "w": 12, "x": 0, - "y": 156 + "y": 180 }, "hiddenSeries": false, - "id": 48, + "id": 52, "interval": "1s", "legend": { "alignAsTable": true, @@ -4477,7 +5260,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_.*[[fs:regex]]_user_read_qps\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs.*[[fs:regex]]_user_read_eps\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -4485,7 +5268,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "read_qps", + "title": "read_eps", "tooltip": { "shared": true, "sort": 0, @@ -4524,22 +5307,16 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "fieldConfig": { - "defaults": { - "unit": "short" - }, - "overrides": [] - }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 12, - "y": 156 + "y": 180 }, "hiddenSeries": false, - "id": 49, + "id": 53, "interval": "1s", "legend": { "alignAsTable": true, @@ -4573,7 +5350,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_.*[[fs:regex]]_user_write_qps\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs.*[[fs:regex]]_adaptor_write_eps\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -4581,7 +5358,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "write_qps", + "title": "write_eps", "tooltip": { "shared": true, "sort": 0, @@ -4622,7 +5399,7 @@ }, "fieldConfig": { "defaults": { - "unit": "binBps" + "unit": "reqps" }, "overrides": [] }, @@ -4632,10 +5409,10 @@ "h": 8, "w": 12, "x": 0, - "y": 164 + "y": 188 }, "hiddenSeries": false, - "id": 50, + "id": 54, "interval": "1s", "legend": { "alignAsTable": true, @@ -4669,7 +5446,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs.*[[fs:regex]]_user_read_bps\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs.*[[fs:regex]]_user_read_rps\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -4677,7 +5454,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "read_bps", + "title": "read_rps", "tooltip": { "shared": true, "sort": 0, @@ -4692,7 +5469,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "binBps", + "format": "reqps", "logBase": 1, "show": true }, @@ -4718,7 +5495,7 @@ }, "fieldConfig": { "defaults": { - "unit": "binBps" + "unit": "reqps" }, "overrides": [] }, @@ -4728,10 +5505,10 @@ "h": 8, "w": 12, "x": 12, - "y": 164 + "y": 188 }, "hiddenSeries": false, - "id": 51, + "id": 43, "interval": "1s", "legend": { "alignAsTable": true, @@ -4765,7 +5542,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs.*[[fs:regex]]_user_write_bps\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs.*[[fs:regex]]_user_write_rps\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -4773,7 +5550,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "write_bps", + "title": "write_rps", "tooltip": { "shared": true, "sort": 0, @@ -4788,7 +5565,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "binBps", + "format": "reqps", "logBase": 1, "show": true }, @@ -4814,7 +5591,7 @@ }, "fieldConfig": { "defaults": { - "unit": "short" + "unit": "bytes" }, "overrides": [] }, @@ -4824,10 +5601,10 @@ "h": 8, "w": 12, "x": 0, - "y": 172 + "y": 196 }, "hiddenSeries": false, - "id": 52, + "id": 56, "interval": "1s", "legend": { "alignAsTable": true, @@ -4861,7 +5638,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs.*[[fs:regex]]_user_read_eps\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs.*[[fs:regex]]_user_read_io_size\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -4869,7 +5646,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "read_eps", + "title": "read_io_size", "tooltip": { "shared": true, "sort": 0, @@ -4884,7 +5661,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "short", + "format": "bytes", "logBase": 1, "show": true }, @@ -4908,16 +5685,22 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "fieldConfig": { + "defaults": { + "unit": "bytes" + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 12, - "y": 172 + "y": 196 }, "hiddenSeries": false, - "id": 53, + "id": 57, "interval": "1s", "legend": { "alignAsTable": true, @@ -4951,7 +5734,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs.*[[fs:regex]]_adaptor_write_eps\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs.*[[fs:regex]]_user_write_io_size\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -4959,7 +5742,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "write_eps", + "title": "write_io_size", "tooltip": { "shared": true, "sort": 0, @@ -4974,7 +5757,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "short", + "format": "bytes", "logBase": 1, "show": true }, @@ -5000,7 +5783,7 @@ }, "fieldConfig": { "defaults": { - "unit": "reqps" + "unit": "µs" }, "overrides": [] }, @@ -5010,10 +5793,10 @@ "h": 8, "w": 12, "x": 0, - "y": 180 + "y": 204 }, "hiddenSeries": false, - "id": 54, + "id": 58, "interval": "1s", "legend": { "alignAsTable": true, @@ -5047,15 +5830,16 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs.*[[fs:regex]]_user_read_rps\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs.*[[fs:regex]]_user_read_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "hide": false, "interval": "", "legendFormat": "", - "refId": "A" + "refId": "B" } ], "thresholds": [], "timeRegions": [], - "title": "read_rps", + "title": "read_latency", "tooltip": { "shared": true, "sort": 0, @@ -5070,7 +5854,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "reqps", + "format": "µs", "logBase": 1, "show": true }, @@ -5096,7 +5880,7 @@ }, "fieldConfig": { "defaults": { - "unit": "reqps" + "unit": "µs" }, "overrides": [] }, @@ -5106,10 +5890,10 @@ "h": 8, "w": 12, "x": 12, - "y": 180 + "y": 204 }, "hiddenSeries": false, - "id": 43, + "id": 59, "interval": "1s", "legend": { "alignAsTable": true, @@ -5143,15 +5927,16 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs.*[[fs:regex]]_user_write_rps\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs.*[[fs:regex]]_user_write_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "hide": false, "interval": "", "legendFormat": "", - "refId": "A" + "refId": "B" } ], "thresholds": [], "timeRegions": [], - "title": "write_rps", + "title": "write_latency", "tooltip": { "shared": true, "sort": 0, @@ -5166,7 +5951,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "reqps", + "format": "µs", "logBase": 1, "show": true }, @@ -5180,7 +5965,34 @@ "yaxis": { "align": false } - }, + } + ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "refId": "A" + } + ], + "title": "client interface performance", + "type": "row" + }, + { + "collapsed": true, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 8 + }, + "id": 8, + "panels": [ { "aliasColors": {}, "bars": false, @@ -5192,7 +6004,7 @@ }, "fieldConfig": { "defaults": { - "unit": "bytes" + "unit": "short" }, "overrides": [] }, @@ -5202,10 +6014,10 @@ "h": 8, "w": 12, "x": 0, - "y": 188 + "y": 9 }, "hiddenSeries": false, - "id": 56, + "id": 10, "interval": "1s", "legend": { "alignAsTable": true, @@ -5224,7 +6036,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.0.3", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -5239,7 +6051,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs.*[[fs:regex]]_user_read_io_size\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_qps\", job=\"client\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -5247,7 +6059,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "read_io_size", + "title": "read_qps", "tooltip": { "shared": true, "sort": 0, @@ -5262,7 +6074,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "bytes", + "format": "short", "logBase": 1, "show": true }, @@ -5288,7 +6100,7 @@ }, "fieldConfig": { "defaults": { - "unit": "bytes" + "unit": "short" }, "overrides": [] }, @@ -5298,10 +6110,10 @@ "h": 8, "w": 12, "x": 12, - "y": 188 + "y": 9 }, "hiddenSeries": false, - "id": 57, + "id": 11, "interval": "1s", "legend": { "alignAsTable": true, @@ -5320,7 +6132,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.0.3", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -5335,7 +6147,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs.*[[fs:regex]]_user_write_io_size\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_qps\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -5343,7 +6155,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "write_io_size", + "title": "write_qps", "tooltip": { "shared": true, "sort": 0, @@ -5358,7 +6170,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "bytes", + "format": "short", "logBase": 1, "show": true }, @@ -5384,7 +6196,7 @@ }, "fieldConfig": { "defaults": { - "unit": "µs" + "unit": "binBps" }, "overrides": [] }, @@ -5394,10 +6206,10 @@ "h": 8, "w": 12, "x": 0, - "y": 196 + "y": 17 }, "hiddenSeries": false, - "id": 58, + "id": 12, "interval": "1s", "legend": { "alignAsTable": true, @@ -5416,7 +6228,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.0.3", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -5431,16 +6243,15 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs.*[[fs:regex]]_user_read_lat_[[quantile:regex]]\", instance=~\"$instance\"}", - "hide": false, + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_bps\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", - "refId": "B" + "refId": "A" } ], "thresholds": [], "timeRegions": [], - "title": "read_latency", + "title": "read_bps", "tooltip": { "shared": true, "sort": 0, @@ -5455,7 +6266,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "µs", + "format": "binBps", "logBase": 1, "show": true }, @@ -5481,7 +6292,7 @@ }, "fieldConfig": { "defaults": { - "unit": "µs" + "unit": "binBps" }, "overrides": [] }, @@ -5491,10 +6302,10 @@ "h": 8, "w": 12, "x": 12, - "y": 196 + "y": 17 }, "hiddenSeries": false, - "id": 59, + "id": 13, "interval": "1s", "legend": { "alignAsTable": true, @@ -5513,7 +6324,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.0.3", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -5528,16 +6339,15 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs.*[[fs:regex]]_user_write_lat_[[quantile:regex]]\", instance=~\"$instance\"}", - "hide": false, + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_bps\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", - "refId": "B" + "refId": "A" } ], "thresholds": [], "timeRegions": [], - "title": "write_latency", + "title": "write_bps", "tooltip": { "shared": true, "sort": 0, @@ -5552,7 +6362,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "µs", + "format": "binBps", "logBase": 1, "show": true }, @@ -5566,34 +6376,7 @@ "yaxis": { "align": false } - } - ], - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "refId": "A" - } - ], - "title": "client interface performance", - "type": "row" - }, - { - "collapsed": true, - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 55 - }, - "id": 8, - "panels": [ + }, { "aliasColors": {}, "bars": false, @@ -5615,10 +6398,10 @@ "h": 8, "w": 12, "x": 0, - "y": 80 + "y": 25 }, "hiddenSeries": false, - "id": 10, + "id": 14, "interval": "1s", "legend": { "alignAsTable": true, @@ -5637,7 +6420,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.0.3", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -5652,7 +6435,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_qps\", job=\"client\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_eps\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -5660,7 +6443,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "read_qps", + "title": "read_eps", "tooltip": { "shared": true, "sort": 0, @@ -5699,22 +6482,16 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "fieldConfig": { - "defaults": { - "unit": "short" - }, - "overrides": [] - }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 12, - "y": 80 + "y": 25 }, "hiddenSeries": false, - "id": 11, + "id": 15, "interval": "1s", "legend": { "alignAsTable": true, @@ -5733,7 +6510,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.0.3", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -5748,7 +6525,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_qps\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_eps\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -5756,7 +6533,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "write_qps", + "title": "write_eps", "tooltip": { "shared": true, "sort": 0, @@ -5797,7 +6574,7 @@ }, "fieldConfig": { "defaults": { - "unit": "binBps" + "unit": "reqps" }, "overrides": [] }, @@ -5807,10 +6584,10 @@ "h": 8, "w": 12, "x": 0, - "y": 88 + "y": 33 }, "hiddenSeries": false, - "id": 12, + "id": 42, "interval": "1s", "legend": { "alignAsTable": true, @@ -5829,7 +6606,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.0.3", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -5844,7 +6621,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_bps\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_rps\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -5852,7 +6629,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "read_bps", + "title": "read_rps", "tooltip": { "shared": true, "sort": 0, @@ -5867,7 +6644,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "binBps", + "format": "reqps", "logBase": 1, "show": true }, @@ -5893,7 +6670,7 @@ }, "fieldConfig": { "defaults": { - "unit": "binBps" + "unit": "reqps" }, "overrides": [] }, @@ -5903,10 +6680,10 @@ "h": 8, "w": 12, "x": 12, - "y": 88 + "y": 33 }, "hiddenSeries": false, - "id": 13, + "id": 55, "interval": "1s", "legend": { "alignAsTable": true, @@ -5925,7 +6702,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.0.3", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -5940,7 +6717,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_bps\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_rps\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -5948,7 +6725,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "write_bps", + "title": "write_rps", "tooltip": { "shared": true, "sort": 0, @@ -5963,7 +6740,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "binBps", + "format": "reqps", "logBase": 1, "show": true }, @@ -5989,7 +6766,7 @@ }, "fieldConfig": { "defaults": { - "unit": "short" + "unit": "bytes" }, "overrides": [] }, @@ -5999,10 +6776,10 @@ "h": 8, "w": 12, "x": 0, - "y": 96 + "y": 41 }, "hiddenSeries": false, - "id": 14, + "id": 60, "interval": "1s", "legend": { "alignAsTable": true, @@ -6021,7 +6798,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.0.3", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -6036,7 +6813,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_eps\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_size\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -6044,7 +6821,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "read_eps", + "title": "read_io_size", "tooltip": { "shared": true, "sort": 0, @@ -6059,7 +6836,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "short", + "format": "bytes", "logBase": 1, "show": true }, @@ -6083,16 +6860,22 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "fieldConfig": { + "defaults": { + "unit": "bytes" + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 12, - "y": 96 + "y": 41 }, "hiddenSeries": false, - "id": 15, + "id": 61, "interval": "1s", "legend": { "alignAsTable": true, @@ -6111,7 +6894,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.0.3", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -6126,7 +6909,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_eps\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_size\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -6134,7 +6917,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "write_eps", + "title": "write_io_size", "tooltip": { "shared": true, "sort": 0, @@ -6149,7 +6932,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "short", + "format": "bytes", "logBase": 1, "show": true }, @@ -6175,7 +6958,7 @@ }, "fieldConfig": { "defaults": { - "unit": "reqps" + "unit": "µs" }, "overrides": [] }, @@ -6185,10 +6968,10 @@ "h": 8, "w": 12, "x": 0, - "y": 104 + "y": 49 }, "hiddenSeries": false, - "id": 42, + "id": 26, "interval": "1s", "legend": { "alignAsTable": true, @@ -6207,7 +6990,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.0.3", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -6222,15 +7005,16 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_rps\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", + "hide": false, "interval": "", "legendFormat": "", - "refId": "A" + "refId": "B" } ], "thresholds": [], "timeRegions": [], - "title": "read_rps", + "title": "read_latency", "tooltip": { "shared": true, "sort": 0, @@ -6245,7 +7029,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "reqps", + "format": "µs", "logBase": 1, "show": true }, @@ -6271,7 +7055,7 @@ }, "fieldConfig": { "defaults": { - "unit": "reqps" + "unit": "µs" }, "overrides": [] }, @@ -6281,10 +7065,10 @@ "h": 8, "w": 12, "x": 12, - "y": 104 + "y": 49 }, "hiddenSeries": false, - "id": 55, + "id": 27, "interval": "1s", "legend": { "alignAsTable": true, @@ -6303,7 +7087,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.0.3", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -6318,15 +7102,16 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_rps\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", + "hide": false, "interval": "", "legendFormat": "", - "refId": "A" + "refId": "B" } ], "thresholds": [], "timeRegions": [], - "title": "write_rps", + "title": "write_latency", "tooltip": { "shared": true, "sort": 0, @@ -6341,7 +7126,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "reqps", + "format": "µs", "logBase": 1, "show": true }, @@ -6355,7 +7140,34 @@ "yaxis": { "align": false } - }, + } + ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "refId": "A" + } + ], + "title": "s3_adaptor r/w performance", + "type": "row" + }, + { + "collapsed": true, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 9 + }, + "id": 72, + "panels": [ { "aliasColors": {}, "bars": false, @@ -6367,7 +7179,7 @@ }, "fieldConfig": { "defaults": { - "unit": "bytes" + "unit": "short" }, "overrides": [] }, @@ -6377,10 +7189,10 @@ "h": 8, "w": 12, "x": 0, - "y": 112 + "y": 206 }, "hiddenSeries": false, - "id": 60, + "id": 73, "interval": "1s", "legend": { "alignAsTable": true, @@ -6399,7 +7211,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.0.3", + "pluginVersion": "8.0.6", "pointradius": 2, "points": false, "renderer": "flot", @@ -6414,7 +7226,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_size\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_disk_cache_qps\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -6422,7 +7234,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "read_io_size", + "title": "read_diskcache_qps", "tooltip": { "shared": true, "sort": 0, @@ -6437,7 +7249,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "bytes", + "format": "short", "logBase": 1, "show": true }, @@ -6463,7 +7275,7 @@ }, "fieldConfig": { "defaults": { - "unit": "bytes" + "unit": "short" }, "overrides": [] }, @@ -6473,10 +7285,10 @@ "h": 8, "w": 12, "x": 12, - "y": 112 + "y": 206 }, "hiddenSeries": false, - "id": 61, + "id": 74, "interval": "1s", "legend": { "alignAsTable": true, @@ -6495,7 +7307,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.0.3", + "pluginVersion": "8.0.6", "pointradius": 2, "points": false, "renderer": "flot", @@ -6510,7 +7322,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_size\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_disk_cache_qps\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -6518,7 +7330,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "write_io_size", + "title": "write_diskcache_qps", "tooltip": { "shared": true, "sort": 0, @@ -6533,7 +7345,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "bytes", + "format": "short", "logBase": 1, "show": true }, @@ -6559,7 +7371,7 @@ }, "fieldConfig": { "defaults": { - "unit": "µs" + "unit": "binBps" }, "overrides": [] }, @@ -6569,10 +7381,10 @@ "h": 8, "w": 12, "x": 0, - "y": 120 + "y": 214 }, "hiddenSeries": false, - "id": 26, + "id": 75, "interval": "1s", "legend": { "alignAsTable": true, @@ -6591,7 +7403,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.0.3", + "pluginVersion": "8.0.6", "pointradius": 2, "points": false, "renderer": "flot", @@ -6606,16 +7418,15 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", - "hide": false, + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_disk_cache_bps\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", - "refId": "B" + "refId": "A" } ], "thresholds": [], "timeRegions": [], - "title": "read_latency", + "title": "read_diskcache_bps", "tooltip": { "shared": true, "sort": 0, @@ -6630,7 +7441,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "µs", + "format": "binBps", "logBase": 1, "show": true }, @@ -6656,7 +7467,7 @@ }, "fieldConfig": { "defaults": { - "unit": "µs" + "unit": "binBps" }, "overrides": [] }, @@ -6666,10 +7477,10 @@ "h": 8, "w": 12, "x": 12, - "y": 120 + "y": 214 }, "hiddenSeries": false, - "id": 27, + "id": 76, "interval": "1s", "legend": { "alignAsTable": true, @@ -6688,7 +7499,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.0.3", + "pluginVersion": "8.0.6", "pointradius": 2, "points": false, "renderer": "flot", @@ -6703,16 +7514,15 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", - "hide": false, + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_disk_cache_bps\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", - "refId": "B" + "refId": "A" } ], "thresholds": [], "timeRegions": [], - "title": "write_latency", + "title": "write_diskcache_bps", "tooltip": { "shared": true, "sort": 0, @@ -6727,7 +7537,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "µs", + "format": "binBps", "logBase": 1, "show": true }, @@ -6741,34 +7551,7 @@ "yaxis": { "align": false } - } - ], - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "refId": "A" - } - ], - "title": "s3_adaptor r/w performance", - "type": "row" - }, - { - "collapsed": true, - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 56 - }, - "id": 72, - "panels": [ + }, { "aliasColors": {}, "bars": false, @@ -6780,7 +7563,7 @@ }, "fieldConfig": { "defaults": { - "unit": "short" + "unit": "µs" }, "overrides": [] }, @@ -6790,10 +7573,10 @@ "h": 8, "w": 12, "x": 0, - "y": 198 + "y": 222 }, "hiddenSeries": false, - "id": 73, + "id": 77, "interval": "1s", "legend": { "alignAsTable": true, @@ -6827,15 +7610,16 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_disk_cache_qps\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_disk_cache_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", + "hide": false, "interval": "", "legendFormat": "", - "refId": "A" + "refId": "B" } ], "thresholds": [], "timeRegions": [], - "title": "read_diskcache_qps", + "title": "read_diskcache_latency", "tooltip": { "shared": true, "sort": 0, @@ -6850,7 +7634,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "short", + "format": "µs", "logBase": 1, "show": true }, @@ -6876,7 +7660,7 @@ }, "fieldConfig": { "defaults": { - "unit": "short" + "unit": "µs" }, "overrides": [] }, @@ -6886,10 +7670,10 @@ "h": 8, "w": 12, "x": 12, - "y": 198 + "y": 222 }, "hiddenSeries": false, - "id": 74, + "id": 78, "interval": "1s", "legend": { "alignAsTable": true, @@ -6923,15 +7707,16 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_disk_cache_qps\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_disk_cache_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", + "hide": false, "interval": "", "legendFormat": "", - "refId": "A" + "refId": "B" } ], "thresholds": [], "timeRegions": [], - "title": "write_diskcache_qps", + "title": "write_diskcache_latency", "tooltip": { "shared": true, "sort": 0, @@ -6946,7 +7731,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "short", + "format": "µs", "logBase": 1, "show": true }, @@ -6960,7 +7745,34 @@ "yaxis": { "align": false } - }, + } + ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "refId": "A" + } + ], + "title": "s3_adaptor r/w diskcache performance", + "type": "row" + }, + { + "collapsed": true, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 10 + }, + "id": 69, + "panels": [ { "aliasColors": {}, "bars": false, @@ -6972,7 +7784,7 @@ }, "fieldConfig": { "defaults": { - "unit": "binBps" + "unit": "short" }, "overrides": [] }, @@ -6982,10 +7794,10 @@ "h": 8, "w": 12, "x": 0, - "y": 206 + "y": 65 }, "hiddenSeries": false, - "id": 75, + "id": 62, "interval": "1s", "legend": { "alignAsTable": true, @@ -7004,7 +7816,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "10.0.3", "pointradius": 2, "points": false, "renderer": "flot", @@ -7019,7 +7831,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_disk_cache_bps\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_s3_qps\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -7027,7 +7839,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "read_diskcache_bps", + "title": "read_s3_qps", "tooltip": { "shared": true, "sort": 0, @@ -7042,7 +7854,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "binBps", + "format": "short", "logBase": 1, "show": true }, @@ -7068,7 +7880,7 @@ }, "fieldConfig": { "defaults": { - "unit": "binBps" + "unit": "short" }, "overrides": [] }, @@ -7078,10 +7890,10 @@ "h": 8, "w": 12, "x": 12, - "y": 206 + "y": 65 }, "hiddenSeries": false, - "id": 76, + "id": 63, "interval": "1s", "legend": { "alignAsTable": true, @@ -7100,7 +7912,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "10.0.3", "pointradius": 2, "points": false, "renderer": "flot", @@ -7115,7 +7927,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_disk_cache_bps\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_s3_qps\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -7123,7 +7935,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "write_diskcache_bps", + "title": "write_s3_qps", "tooltip": { "shared": true, "sort": 0, @@ -7138,7 +7950,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "binBps", + "format": "short", "logBase": 1, "show": true }, @@ -7164,7 +7976,7 @@ }, "fieldConfig": { "defaults": { - "unit": "µs" + "unit": "binBps" }, "overrides": [] }, @@ -7174,10 +7986,10 @@ "h": 8, "w": 12, "x": 0, - "y": 214 + "y": 73 }, "hiddenSeries": false, - "id": 77, + "id": 66, "interval": "1s", "legend": { "alignAsTable": true, @@ -7196,7 +8008,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "10.0.3", "pointradius": 2, "points": false, "renderer": "flot", @@ -7211,16 +8023,15 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_disk_cache_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", - "hide": false, + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_s3_bps\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", - "refId": "B" + "refId": "A" } ], "thresholds": [], "timeRegions": [], - "title": "read_diskcache_latency", + "title": "read_s3_bps", "tooltip": { "shared": true, "sort": 0, @@ -7235,7 +8046,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "µs", + "format": "binBps", "logBase": 1, "show": true }, @@ -7261,7 +8072,7 @@ }, "fieldConfig": { "defaults": { - "unit": "µs" + "unit": "binBps" }, "overrides": [] }, @@ -7271,10 +8082,10 @@ "h": 8, "w": 12, "x": 12, - "y": 214 + "y": 73 }, "hiddenSeries": false, - "id": 78, + "id": 65, "interval": "1s", "legend": { "alignAsTable": true, @@ -7293,7 +8104,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "10.0.3", "pointradius": 2, "points": false, "renderer": "flot", @@ -7308,72 +8119,44 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_disk_cache_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", - "hide": false, + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_s3_bps\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", - "refId": "B" + "refId": "A" } ], "thresholds": [], "timeRegions": [], - "title": "write_diskcache_latency", + "title": "write_s3_bps", "tooltip": { "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "µs", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:213", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - } - ], - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "refId": "A" - } - ], - "title": "s3_adaptor r/w diskcache performance", - "type": "row" - }, - { - "collapsed": true, - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 57 - }, - "id": 69, - "panels": [ + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:212", + "format": "binBps", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:213", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, { "aliasColors": {}, "bars": false, @@ -7385,7 +8168,7 @@ }, "fieldConfig": { "defaults": { - "unit": "short" + "unit": "µs" }, "overrides": [] }, @@ -7395,10 +8178,10 @@ "h": 8, "w": 12, "x": 0, - "y": 57 + "y": 81 }, "hiddenSeries": false, - "id": 62, + "id": 64, "interval": "1s", "legend": { "alignAsTable": true, @@ -7432,15 +8215,16 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_s3_qps\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_s3_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", + "hide": false, "interval": "", "legendFormat": "", - "refId": "A" + "refId": "B" } ], "thresholds": [], "timeRegions": [], - "title": "read_s3_qps", + "title": "read_s3_latency", "tooltip": { "shared": true, "sort": 0, @@ -7455,7 +8239,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "short", + "format": "µs", "logBase": 1, "show": true }, @@ -7481,7 +8265,7 @@ }, "fieldConfig": { "defaults": { - "unit": "short" + "unit": "µs" }, "overrides": [] }, @@ -7491,10 +8275,10 @@ "h": 8, "w": 12, "x": 12, - "y": 57 + "y": 81 }, "hiddenSeries": false, - "id": 63, + "id": 67, "interval": "1s", "legend": { "alignAsTable": true, @@ -7528,15 +8312,16 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_s3_qps\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_s3_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", + "hide": false, "interval": "", "legendFormat": "", - "refId": "A" + "refId": "B" } ], "thresholds": [], "timeRegions": [], - "title": "write_s3_qps", + "title": "write_s3_latency", "tooltip": { "shared": true, "sort": 0, @@ -7551,7 +8336,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "short", + "format": "µs", "logBase": 1, "show": true }, @@ -7565,7 +8350,34 @@ "yaxis": { "align": false } - }, + } + ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "refId": "A" + } + ], + "title": "s3_adaptor r/w s3 performance", + "type": "row" + }, + { + "collapsed": true, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 11 + }, + "id": 80, + "panels": [ { "aliasColors": {}, "bars": false, @@ -7577,7 +8389,7 @@ }, "fieldConfig": { "defaults": { - "unit": "binBps" + "unit": "short" }, "overrides": [] }, @@ -7587,10 +8399,10 @@ "h": 8, "w": 12, "x": 0, - "y": 65 + "y": 256 }, "hiddenSeries": false, - "id": 66, + "id": 81, "interval": "1s", "legend": { "alignAsTable": true, @@ -7609,7 +8421,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.0.3", + "pluginVersion": "8.0.6", "pointradius": 2, "points": false, "renderer": "flot", @@ -7624,7 +8436,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_s3_bps\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_disk_cache.*[[fs:regex]]_write_s3_qps\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -7632,7 +8444,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "read_s3_bps", + "title": "write_s3_qps", "tooltip": { "shared": true, "sort": 0, @@ -7647,7 +8459,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "binBps", + "format": "short", "logBase": 1, "show": true }, @@ -7683,10 +8495,10 @@ "h": 8, "w": 12, "x": 12, - "y": 65 + "y": 256 }, "hiddenSeries": false, - "id": 65, + "id": 82, "interval": "1s", "legend": { "alignAsTable": true, @@ -7705,7 +8517,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.0.3", + "pluginVersion": "8.0.6", "pointradius": 2, "points": false, "renderer": "flot", @@ -7720,7 +8532,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_s3_bps\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_disk_cache.*[[fs:regex]]_write_s3_bps\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -7769,7 +8581,7 @@ }, "fieldConfig": { "defaults": { - "unit": "µs" + "unit": "short" }, "overrides": [] }, @@ -7779,10 +8591,10 @@ "h": 8, "w": 12, "x": 0, - "y": 73 + "y": 264 }, "hiddenSeries": false, - "id": 64, + "id": 83, "interval": "1s", "legend": { "alignAsTable": true, @@ -7801,7 +8613,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.0.3", + "pluginVersion": "8.0.6", "pointradius": 2, "points": false, "renderer": "flot", @@ -7816,7 +8628,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_s3_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_disk_cache.*[[fs:regex]]_write_s3_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", "hide": false, "interval": "", "legendFormat": "", @@ -7825,7 +8637,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "read_s3_latency", + "title": "write_s3_latency", "tooltip": { "shared": true, "sort": 0, @@ -7840,7 +8652,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "µs", + "format": "short", "logBase": 1, "show": true }, @@ -7854,7 +8666,34 @@ "yaxis": { "align": false } - }, + } + ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "refId": "A" + } + ], + "title": "diskcache r/w s3 performance", + "type": "row" + }, + { + "collapsed": true, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 12 + }, + "id": 111, + "panels": [ { "aliasColors": {}, "bars": false, @@ -7866,7 +8705,7 @@ }, "fieldConfig": { "defaults": { - "unit": "µs" + "unit": "short" }, "overrides": [] }, @@ -7875,11 +8714,11 @@ "gridPos": { "h": 8, "w": 12, - "x": 12, - "y": 73 + "x": 0, + "y": 273 }, "hiddenSeries": false, - "id": 67, + "id": 112, "interval": "1s", "legend": { "alignAsTable": true, @@ -7898,7 +8737,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.0.3", + "pluginVersion": "8.0.6", "pointradius": 2, "points": false, "renderer": "flot", @@ -7913,16 +8752,15 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_s3_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", - "hide": false, + "expr": "{__name__=~\"curvefs.*_file_manager_num\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", - "refId": "B" + "refId": "A" } ], "thresholds": [], "timeRegions": [], - "title": "write_s3_latency", + "title": "file_manager_num", "tooltip": { "shared": true, "sort": 0, @@ -7937,7 +8775,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "µs", + "format": "short", "logBase": 1, "show": true }, @@ -7951,34 +8789,7 @@ "yaxis": { "align": false } - } - ], - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "refId": "A" - } - ], - "title": "s3_adaptor r/w s3 performance", - "type": "row" - }, - { - "collapsed": true, - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 58 - }, - "id": 80, - "panels": [ + }, { "aliasColors": {}, "bars": false, @@ -7999,11 +8810,11 @@ "gridPos": { "h": 8, "w": 12, - "x": 0, - "y": 248 + "x": 12, + "y": 273 }, "hiddenSeries": false, - "id": 81, + "id": 113, "interval": "1s", "legend": { "alignAsTable": true, @@ -8037,7 +8848,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_disk_cache.*[[fs:regex]]_write_s3_qps\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs.*_chunk_manager_num\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -8045,7 +8856,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "write_s3_qps", + "title": "chunk_manager_num", "tooltip": { "shared": true, "sort": 0, @@ -8074,7 +8885,34 @@ "yaxis": { "align": false } - }, + } + ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "refId": "A" + } + ], + "title": "manager metric", + "type": "row" + }, + { + "collapsed": true, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 13 + }, + "id": 121, + "panels": [ { "aliasColors": {}, "bars": false, @@ -8086,7 +8924,7 @@ }, "fieldConfig": { "defaults": { - "unit": "binBps" + "unit": "short" }, "overrides": [] }, @@ -8095,11 +8933,11 @@ "gridPos": { "h": 8, "w": 12, - "x": 12, - "y": 248 + "x": 0, + "y": 210 }, "hiddenSeries": false, - "id": 82, + "id": 122, "interval": "1s", "legend": { "alignAsTable": true, @@ -8133,7 +8971,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_disk_cache.*[[fs:regex]]_write_s3_bps\", instance=~\"$instance\"}", + "expr": "{__name__=~\"diskcache_cache_count\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -8141,7 +8979,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "write_s3_bps", + "title": "diskcache_cache_count", "tooltip": { "shared": true, "sort": 0, @@ -8156,7 +8994,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "binBps", + "format": "short", "logBase": 1, "show": true }, @@ -8182,7 +9020,7 @@ }, "fieldConfig": { "defaults": { - "unit": "short" + "unit": "bytes" }, "overrides": [] }, @@ -8191,11 +9029,11 @@ "gridPos": { "h": 8, "w": 12, - "x": 0, - "y": 256 + "x": 12, + "y": 210 }, "hiddenSeries": false, - "id": 83, + "id": 123, "interval": "1s", "legend": { "alignAsTable": true, @@ -8229,16 +9067,15 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs_disk_cache.*[[fs:regex]]_write_s3_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", - "hide": false, + "expr": "{__name__=~\"diskcache_cache_bytes\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", - "refId": "B" + "refId": "A" } ], "thresholds": [], "timeRegions": [], - "title": "write_s3_latency", + "title": "diskcache_cache_bytes", "tooltip": { "shared": true, "sort": 0, @@ -8253,7 +9090,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "short", + "format": "bytes", "logBase": 1, "show": true }, @@ -8267,34 +9104,7 @@ "yaxis": { "align": false } - } - ], - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "refId": "A" - } - ], - "title": "diskcache r/w s3 performance", - "type": "row" - }, - { - "collapsed": true, - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 59 - }, - "id": 111, - "panels": [ + }, { "aliasColors": {}, "bars": false, @@ -8306,7 +9116,7 @@ }, "fieldConfig": { "defaults": { - "unit": "short" + "unit": "none" }, "overrides": [] }, @@ -8316,10 +9126,10 @@ "h": 8, "w": 12, "x": 0, - "y": 265 + "y": 218 }, "hiddenSeries": false, - "id": 112, + "id": 124, "interval": "1s", "legend": { "alignAsTable": true, @@ -8353,7 +9163,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs.*_file_manager_num\", instance=~\"$instance\"}", + "expr": "{__name__=~\"icache_cache_count\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -8361,7 +9171,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "file_manager_num", + "title": "icache_cache_count", "tooltip": { "shared": true, "sort": 0, @@ -8376,7 +9186,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "short", + "format": "none", "logBase": 1, "show": true }, @@ -8402,7 +9212,7 @@ }, "fieldConfig": { "defaults": { - "unit": "short" + "unit": "none" }, "overrides": [] }, @@ -8412,10 +9222,10 @@ "h": 8, "w": 12, "x": 12, - "y": 265 + "y": 218 }, "hiddenSeries": false, - "id": 113, + "id": 125, "interval": "1s", "legend": { "alignAsTable": true, @@ -8449,7 +9259,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"curvefs.*_chunk_manager_num\", instance=~\"$instance\"}", + "expr": "{__name__=~\"inode_s3_chunk_info_size\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -8457,7 +9267,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "chunk_manager_num", + "title": "inode_s3_chunk_info_size", "tooltip": { "shared": true, "sort": 0, @@ -8472,7 +9282,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "short", + "format": "none", "logBase": 1, "show": true }, @@ -8486,34 +9296,7 @@ "yaxis": { "align": false } - } - ], - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "refId": "A" - } - ], - "title": "manager metric", - "type": "row" - }, - { - "collapsed": true, - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 60 - }, - "id": 121, - "panels": [ + }, { "aliasColors": {}, "bars": false, @@ -8535,10 +9318,10 @@ "h": 8, "w": 12, "x": 0, - "y": 202 + "y": 226 }, "hiddenSeries": false, - "id": 122, + "id": 126, "interval": "1s", "legend": { "alignAsTable": true, @@ -8572,7 +9355,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"diskcache_cache_count\", instance=~\"$instance\"}", + "expr": "{__name__=~\"dcache_cache_count\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -8580,7 +9363,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "diskcache_cache_count", + "title": "dcache_cache_count", "tooltip": { "shared": true, "sort": 0, @@ -8621,7 +9404,7 @@ }, "fieldConfig": { "defaults": { - "unit": "bytes" + "unit": "short" }, "overrides": [] }, @@ -8631,10 +9414,10 @@ "h": 8, "w": 12, "x": 12, - "y": 202 + "y": 226 }, "hiddenSeries": false, - "id": 123, + "id": 127, "interval": "1s", "legend": { "alignAsTable": true, @@ -8668,7 +9451,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"diskcache_cache_bytes\", instance=~\"$instance\"}", + "expr": "{__name__=~\"dcache_cache_bytes\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -8676,7 +9459,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "diskcache_cache_bytes", + "title": "dcache_cache_bytes", "tooltip": { "shared": true, "sort": 0, @@ -8691,7 +9474,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "bytes", + "format": "short", "logBase": 1, "show": true }, @@ -8705,7 +9488,34 @@ "yaxis": { "align": false } - }, + } + ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "refId": "A" + } + ], + "title": "lru cache", + "type": "row" + }, + { + "collapsed": true, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 14 + }, + "id": 115, + "panels": [ { "aliasColors": {}, "bars": false, @@ -8715,9 +9525,10 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "description": "", "fieldConfig": { "defaults": { - "unit": "none" + "unit": "bytes" }, "overrides": [] }, @@ -8727,10 +9538,10 @@ "h": 8, "w": 12, "x": 0, - "y": 210 + "y": 15 }, "hiddenSeries": false, - "id": 124, + "id": 209, "interval": "1s", "legend": { "alignAsTable": true, @@ -8749,7 +9560,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -8763,16 +9574,18 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "editorMode": "code", "exemplar": true, - "expr": "{__name__=~\"icache_cache_count\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_disk_cache_[[fs:regex]]_diskcache_totalbytes\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", + "range": true, "refId": "A" } ], "thresholds": [], "timeRegions": [], - "title": "icache_cache_count", + "title": "total", "tooltip": { "shared": true, "sort": 0, @@ -8787,7 +9600,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "none", + "format": "bytes", "logBase": 1, "show": true }, @@ -8813,7 +9626,7 @@ }, "fieldConfig": { "defaults": { - "unit": "none" + "unit": "bytes" }, "overrides": [] }, @@ -8823,10 +9636,10 @@ "h": 8, "w": 12, "x": 12, - "y": 210 + "y": 15 }, "hiddenSeries": false, - "id": 125, + "id": 178, "interval": "1s", "legend": { "alignAsTable": true, @@ -8845,7 +9658,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -8860,7 +9673,7 @@ "uid": "PBFA97CFB590B2093" }, "exemplar": true, - "expr": "{__name__=~\"inode_s3_chunk_info_size\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_disk_cache_[[fs:regex]]_diskcache_usedbytes\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" @@ -8868,7 +9681,7 @@ ], "thresholds": [], "timeRegions": [], - "title": "inode_s3_chunk_info_size", + "title": "used", "tooltip": { "shared": true, "sort": 0, @@ -8883,7 +9696,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "none", + "format": "bytes", "logBase": 1, "show": true }, @@ -8907,6 +9720,7 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "description": "Number of objects in cache", "fieldConfig": { "defaults": { "unit": "short" @@ -8919,10 +9733,10 @@ "h": 8, "w": 12, "x": 0, - "y": 218 + "y": 23 }, "hiddenSeries": false, - "id": 126, + "id": 210, "interval": "1s", "legend": { "alignAsTable": true, @@ -8941,7 +9755,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -8955,16 +9769,18 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "editorMode": "code", "exemplar": true, - "expr": "{__name__=~\"dcache_cache_count\", instance=~\"$instance\"}", + "expr": "{__name__=~\"diskcache_cache_count\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", + "range": true, "refId": "A" } ], "thresholds": [], "timeRegions": [], - "title": "dcache_cache_count", + "title": "cache count", "tooltip": { "shared": true, "sort": 0, @@ -9003,9 +9819,10 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "description": "Cache hit rate", "fieldConfig": { "defaults": { - "unit": "short" + "unit": "percentunit" }, "overrides": [] }, @@ -9015,10 +9832,10 @@ "h": 8, "w": 12, "x": 12, - "y": 218 + "y": 23 }, "hiddenSeries": false, - "id": 127, + "id": 213, "interval": "1s", "legend": { "alignAsTable": true, @@ -9037,7 +9854,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -9051,16 +9868,44 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "editorMode": "code", "exemplar": true, - "expr": "{__name__=~\"dcache_cache_bytes\", instance=~\"$instance\"}", + "expr": "{__name__=~\"diskcache_cache_hit\", instance=~\"$instance\"}", + "hide": true, "interval": "", "legendFormat": "", - "refId": "A" + "range": true, + "refId": "hit" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": true, + "expr": "{__name__=~\"diskcache_cache_miss\", instance=~\"$instance\"}", + "hide": true, + "interval": "", + "legendFormat": "", + "range": true, + "refId": "miss" + }, + { + "datasource": { + "name": "Expression", + "type": "__expr__", + "uid": "__expr__" + }, + "expression": "$hit/($hit+$miss)", + "hide": false, + "refId": "A", + "type": "math" } ], "thresholds": [], "timeRegions": [], - "title": "dcache_cache_bytes", + "title": "cache hit rate", "tooltip": { "shared": true, "sort": 0, @@ -9075,7 +9920,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "short", + "format": "percentunit", "logBase": 1, "show": true }, @@ -9089,34 +9934,7 @@ "yaxis": { "align": false } - } - ], - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "refId": "A" - } - ], - "title": "lru cache", - "type": "row" - }, - { - "collapsed": true, - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 61 - }, - "id": 115, - "panels": [ + }, { "aliasColors": {}, "bars": false, @@ -9126,6 +9944,7 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "description": "Number of cache hits", "fieldConfig": { "defaults": { "unit": "short" @@ -9138,10 +9957,10 @@ "h": 8, "w": 12, "x": 0, - "y": 299 + "y": 31 }, "hiddenSeries": false, - "id": 116, + "id": 211, "interval": "1s", "legend": { "alignAsTable": true, @@ -9160,7 +9979,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -9174,16 +9993,18 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "editorMode": "code", "exemplar": true, - "expr": "{__name__=~\"curvefs.*_read_data_cache_num\", instance=~\"$instance\"}", + "expr": "{__name__=~\"diskcache_cache_hit\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", + "range": true, "refId": "A" } ], "thresholds": [], "timeRegions": [], - "title": "read_data_cache_num", + "title": "cache hit", "tooltip": { "shared": true, "sort": 0, @@ -9222,6 +10043,7 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "description": "Number of cache misses", "fieldConfig": { "defaults": { "unit": "short" @@ -9234,10 +10056,10 @@ "h": 8, "w": 12, "x": 12, - "y": 299 + "y": 31 }, "hiddenSeries": false, - "id": 117, + "id": 212, "interval": "1s", "legend": { "alignAsTable": true, @@ -9256,7 +10078,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -9270,16 +10092,18 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "editorMode": "code", "exemplar": true, - "expr": "{__name__=~\"curvefs.*_write_data_cache_num\", instance=~\"$instance\"}", + "expr": "{__name__=~\"diskcache_cache_miss\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", + "range": true, "refId": "A" } ], "thresholds": [], "timeRegions": [], - "title": "write_data_cache_num", + "title": "cache miss", "tooltip": { "shared": true, "sort": 0, @@ -9318,6 +10142,7 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "description": "This metric measures the efficiency of evicting objects from the cache disk, recorded as the number of objects evicted from the cache per second", "fieldConfig": { "defaults": { "unit": "short" @@ -9330,10 +10155,10 @@ "h": 8, "w": 12, "x": 0, - "y": 307 + "y": 39 }, "hiddenSeries": false, - "id": 118, + "id": 214, "interval": "1s", "legend": { "alignAsTable": true, @@ -9352,7 +10177,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -9366,16 +10191,18 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "editorMode": "code", "exemplar": true, - "expr": "{__name__=~\"curvefs.*_read_data_cache_byte\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_disk_cache_[[fs:regex]]_diskcache_trim_qps\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", + "range": true, "refId": "A" } ], "thresholds": [], "timeRegions": [], - "title": "read_data_cache_byte", + "title": "trim qps", "tooltip": { "shared": true, "sort": 0, @@ -9414,9 +10241,10 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "description": "This metric tracks the cache eviction throughput in terms of bandwidth. It measures how much data is being evicted from the cache disk per second, recorded in units like megabytes/second or gigabytes/second.", "fieldConfig": { "defaults": { - "unit": "short" + "unit": "binBps" }, "overrides": [] }, @@ -9426,10 +10254,10 @@ "h": 8, "w": 12, "x": 12, - "y": 307 + "y": 39 }, "hiddenSeries": false, - "id": 119, + "id": 215, "interval": "1s", "legend": { "alignAsTable": true, @@ -9448,7 +10276,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -9462,16 +10290,18 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "editorMode": "code", "exemplar": true, - "expr": "{__name__=~\"curvefs.*_write_data_cache_byte\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_disk_cache_[[fs:regex]]_diskcache_trim_bps\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", + "range": true, "refId": "A" } ], "thresholds": [], "timeRegions": [], - "title": "write_data_cache_byte", + "title": "trim bps", "tooltip": { "shared": true, "sort": 0, @@ -9486,7 +10316,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "short", + "format": "binBps", "logBase": 1, "show": true }, @@ -9510,9 +10340,10 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "description": "This metric tracks the cache eviction latency. It measures how long it takes to evict objects from the cache disk, recorded in units like milliseconds or microseconds per eviction.", "fieldConfig": { "defaults": { - "unit": "bytes" + "unit": "µs" }, "overrides": [] }, @@ -9522,10 +10353,10 @@ "h": 8, "w": 12, "x": 0, - "y": 315 + "y": 47 }, "hiddenSeries": false, - "id": 178, + "id": 216, "interval": "1s", "legend": { "alignAsTable": true, @@ -9544,7 +10375,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "10.1.4", "pointradius": 2, "points": false, "renderer": "flot", @@ -9558,16 +10389,18 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "editorMode": "code", "exemplar": true, - "expr": "{__name__=~\"curvefs_disk_cache_[[fs:regex]]_diskcache_usedbytes\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_disk_cache_[[fs:regex]]_diskcache_trim_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", + "range": true, "refId": "A" } ], "thresholds": [], "timeRegions": [], - "title": "diskcache_usedbytes", + "title": "trim latency", "tooltip": { "shared": true, "sort": 0, @@ -9582,7 +10415,7 @@ "yaxes": [ { "$$hashKey": "object:212", - "format": "bytes", + "format": "µs", "logBase": 1, "show": true }, @@ -9607,7 +10440,7 @@ "refId": "A" } ], - "title": "diskcache cache num/byte", + "title": "diskcache cache", "type": "row" }, { @@ -9620,7 +10453,7 @@ "h": 1, "w": 24, "x": 0, - "y": 62 + "y": 15 }, "id": 32, "panels": [ @@ -9645,7 +10478,7 @@ "h": 6, "w": 12, "x": 0, - "y": 324 + "y": 332 }, "hiddenSeries": false, "id": 36, @@ -9741,7 +10574,7 @@ "h": 6, "w": 12, "x": 12, - "y": 324 + "y": 332 }, "hiddenSeries": false, "id": 128, @@ -9837,7 +10670,7 @@ "h": 6, "w": 12, "x": 0, - "y": 330 + "y": 338 }, "hiddenSeries": false, "id": 129, @@ -9933,7 +10766,7 @@ "h": 6, "w": 12, "x": 12, - "y": 330 + "y": 338 }, "hiddenSeries": false, "id": 130, @@ -10029,7 +10862,7 @@ "h": 6, "w": 12, "x": 0, - "y": 336 + "y": 344 }, "hiddenSeries": false, "id": 131, @@ -10125,7 +10958,7 @@ "h": 6, "w": 12, "x": 12, - "y": 336 + "y": 344 }, "hiddenSeries": false, "id": 132, @@ -10221,7 +11054,7 @@ "h": 6, "w": 12, "x": 0, - "y": 342 + "y": 350 }, "hiddenSeries": false, "id": 133, @@ -10317,7 +11150,7 @@ "h": 6, "w": 12, "x": 12, - "y": 342 + "y": 350 }, "hiddenSeries": false, "id": 134, @@ -10413,7 +11246,7 @@ "h": 6, "w": 12, "x": 0, - "y": 348 + "y": 356 }, "hiddenSeries": false, "id": 135, @@ -10509,7 +11342,7 @@ "h": 6, "w": 12, "x": 12, - "y": 348 + "y": 356 }, "hiddenSeries": false, "id": 136, @@ -10605,7 +11438,7 @@ "h": 6, "w": 12, "x": 0, - "y": 354 + "y": 362 }, "hiddenSeries": false, "id": 137, @@ -10701,7 +11534,7 @@ "h": 6, "w": 12, "x": 12, - "y": 354 + "y": 362 }, "hiddenSeries": false, "id": 138, @@ -10797,7 +11630,7 @@ "h": 6, "w": 12, "x": 0, - "y": 360 + "y": 368 }, "hiddenSeries": false, "id": 139, @@ -10893,7 +11726,7 @@ "h": 6, "w": 12, "x": 12, - "y": 360 + "y": 368 }, "hiddenSeries": false, "id": 140, @@ -10988,7 +11821,7 @@ "h": 6, "w": 12, "x": 0, - "y": 366 + "y": 374 }, "hiddenSeries": false, "id": 141, @@ -11083,7 +11916,7 @@ "h": 6, "w": 12, "x": 12, - "y": 366 + "y": 374 }, "hiddenSeries": false, "id": 142, @@ -11179,7 +12012,7 @@ "h": 6, "w": 12, "x": 0, - "y": 372 + "y": 380 }, "hiddenSeries": false, "id": 143, @@ -11275,7 +12108,7 @@ "h": 6, "w": 12, "x": 12, - "y": 372 + "y": 380 }, "hiddenSeries": false, "id": 144, @@ -11371,7 +12204,7 @@ "h": 6, "w": 12, "x": 0, - "y": 378 + "y": 386 }, "hiddenSeries": false, "id": 145, @@ -11467,7 +12300,7 @@ "h": 6, "w": 12, "x": 12, - "y": 378 + "y": 386 }, "hiddenSeries": false, "id": 146, @@ -11563,7 +12396,7 @@ "h": 6, "w": 12, "x": 0, - "y": 384 + "y": 392 }, "hiddenSeries": false, "id": 147, @@ -11659,7 +12492,7 @@ "h": 6, "w": 12, "x": 12, - "y": 384 + "y": 392 }, "hiddenSeries": false, "id": 148, @@ -11755,7 +12588,7 @@ "h": 6, "w": 12, "x": 0, - "y": 390 + "y": 398 }, "hiddenSeries": false, "id": 149, @@ -11851,7 +12684,7 @@ "h": 6, "w": 12, "x": 12, - "y": 390 + "y": 398 }, "hiddenSeries": false, "id": 150, @@ -11949,7 +12782,7 @@ "h": 1, "w": 24, "x": 0, - "y": 63 + "y": 16 }, "id": 34, "panels": [ @@ -11974,7 +12807,7 @@ "h": 6, "w": 12, "x": 0, - "y": 397 + "y": 405 }, "hiddenSeries": false, "id": 155, @@ -12070,7 +12903,7 @@ "h": 6, "w": 12, "x": 12, - "y": 397 + "y": 405 }, "hiddenSeries": false, "id": 152, @@ -12166,7 +12999,7 @@ "h": 6, "w": 12, "x": 0, - "y": 403 + "y": 411 }, "hiddenSeries": false, "id": 153, @@ -12262,7 +13095,7 @@ "h": 6, "w": 12, "x": 12, - "y": 403 + "y": 411 }, "hiddenSeries": false, "id": 156, @@ -12358,7 +13191,7 @@ "h": 6, "w": 12, "x": 0, - "y": 409 + "y": 417 }, "hiddenSeries": false, "id": 157, @@ -12454,7 +13287,7 @@ "h": 6, "w": 12, "x": 12, - "y": 409 + "y": 417 }, "hiddenSeries": false, "id": 154, @@ -12550,7 +13383,7 @@ "h": 6, "w": 12, "x": 0, - "y": 415 + "y": 423 }, "hiddenSeries": false, "id": 151, @@ -12646,7 +13479,7 @@ "h": 6, "w": 12, "x": 12, - "y": 415 + "y": 423 }, "hiddenSeries": false, "id": 158, @@ -12742,7 +13575,7 @@ "h": 6, "w": 12, "x": 0, - "y": 421 + "y": 429 }, "hiddenSeries": false, "id": 159, @@ -12838,7 +13671,7 @@ "h": 6, "w": 12, "x": 12, - "y": 421 + "y": 429 }, "hiddenSeries": false, "id": 160, @@ -12934,7 +13767,7 @@ "h": 6, "w": 12, "x": 0, - "y": 427 + "y": 435 }, "hiddenSeries": false, "id": 161, @@ -13030,7 +13863,7 @@ "h": 6, "w": 12, "x": 12, - "y": 427 + "y": 435 }, "hiddenSeries": false, "id": 162, @@ -13126,7 +13959,7 @@ "h": 6, "w": 12, "x": 0, - "y": 433 + "y": 441 }, "hiddenSeries": false, "id": 163, @@ -13222,7 +14055,7 @@ "h": 6, "w": 12, "x": 12, - "y": 433 + "y": 441 }, "hiddenSeries": false, "id": 164, @@ -13318,7 +14151,7 @@ "h": 6, "w": 12, "x": 0, - "y": 439 + "y": 447 }, "hiddenSeries": false, "id": 165, @@ -13414,7 +14247,7 @@ "h": 6, "w": 12, "x": 12, - "y": 439 + "y": 447 }, "hiddenSeries": false, "id": 166, @@ -13510,7 +14343,7 @@ "h": 6, "w": 12, "x": 0, - "y": 445 + "y": 453 }, "hiddenSeries": false, "id": 167, @@ -13606,7 +14439,7 @@ "h": 6, "w": 12, "x": 12, - "y": 445 + "y": 453 }, "hiddenSeries": false, "id": 168, @@ -13702,7 +14535,7 @@ "h": 6, "w": 12, "x": 0, - "y": 451 + "y": 459 }, "hiddenSeries": false, "id": 169, @@ -13798,7 +14631,7 @@ "h": 6, "w": 12, "x": 12, - "y": 451 + "y": 459 }, "hiddenSeries": false, "id": 170, @@ -13894,7 +14727,7 @@ "h": 6, "w": 12, "x": 0, - "y": 457 + "y": 465 }, "hiddenSeries": false, "id": 171, @@ -13990,7 +14823,7 @@ "h": 6, "w": 12, "x": 12, - "y": 457 + "y": 465 }, "hiddenSeries": false, "id": 172, @@ -14086,7 +14919,7 @@ "h": 6, "w": 12, "x": 0, - "y": 463 + "y": 471 }, "hiddenSeries": false, "id": 173, @@ -14182,7 +15015,7 @@ "h": 6, "w": 12, "x": 12, - "y": 463 + "y": 471 }, "hiddenSeries": false, "id": 174, @@ -14276,7 +15109,7 @@ "h": 1, "w": 24, "x": 0, - "y": 64 + "y": 17 }, "id": 180, "panels": [ @@ -14341,7 +15174,7 @@ "h": 8, "w": 12, "x": 0, - "y": 166 + "y": 174 }, "id": 182, "options": { @@ -14436,7 +15269,7 @@ "h": 8, "w": 12, "x": 12, - "y": 166 + "y": 174 }, "id": 184, "options": { @@ -14532,7 +15365,7 @@ "h": 8, "w": 12, "x": 0, - "y": 174 + "y": 182 }, "id": 186, "options": { @@ -14577,13 +15410,9 @@ "list": [ { "current": { - "selected": true, - "text": [ - "fuxi-hangyan-01:9000" - ], - "value": [ - "fuxi-hangyan-01:9000" - ] + "selected": false, + "text": "All", + "value": "$__all" }, "datasource": { "type": "prometheus", @@ -14642,10 +15471,10 @@ "current": { "selected": true, "text": [ - "All" + "latency" ], "value": [ - "$__all" + "latency" ] }, "datasource": { @@ -14710,6 +15539,6 @@ "timezone": "", "title": "client", "uid": "I2_uSSenk", - "version": 2, + "version": 1, "weekStart": "" } \ No newline at end of file diff --git a/curvefs/proto/common.proto b/curvefs/proto/common.proto index d2772dbf46..a56402c744 100644 --- a/curvefs/proto/common.proto +++ b/curvefs/proto/common.proto @@ -92,6 +92,14 @@ message PartitionInfo { optional bool manageFlag = 13; // if a partition has recyclebin inode, set this flag true } +message AppliedIndex { + required int64 index = 1; +} + +message ItemCount { + required uint64 count = 1; +} + message Peer { optional uint64 id = 1; optional string address = 2; diff --git a/curvefs/proto/metaserver.proto b/curvefs/proto/metaserver.proto index 266b6d8407..f0ab8167a2 100644 --- a/curvefs/proto/metaserver.proto +++ b/curvefs/proto/metaserver.proto @@ -145,6 +145,15 @@ message PrepareRenameTxRequest { repeated Dentry dentrys = 4; } +message TransactionRequest { + enum TransactionType { + None = 0; + Rename = 1; + } + required TransactionType type = 1; + required string rawPayload = 2; +} + message PrepareRenameTxResponse { required MetaStatusCode statusCode = 1; optional uint64 appliedIndex = 2; diff --git a/curvefs/src/client/common/common.cpp b/curvefs/src/client/common/common.cpp index 6567792c0c..b50898a630 100644 --- a/curvefs/src/client/common/common.cpp +++ b/curvefs/src/client/common/common.cpp @@ -79,7 +79,7 @@ std::ostream &operator<<(std::ostream &os, MetaServerOpType optype) { } const char kCurveFsWarmupOpAdd[] = "add"; -const char kCurveFsWarmupOpQuery[] = "query"; +const char kCurveFsWarmupOpCancel[] = "cancel"; const char kCurveFsWarmupTypeList[] = "list"; const char kCurveFsWarmupTypeSingle[] = "single"; @@ -87,9 +87,8 @@ WarmupOpType GetWarmupOpType(const std::string& op) { auto ret = WarmupOpType::kWarmupOpUnknown; if (op == kCurveFsWarmupOpAdd) { ret = WarmupOpType::kWarmupOpAdd; - } - if (op == kCurveFsWarmupOpQuery) { - ret = WarmupOpType::kWarmupOpQuery; + } else if (op == kCurveFsWarmupOpCancel) { + ret = WarmupOpType::kWarmupOpCancel; } return ret; } diff --git a/curvefs/src/client/common/common.h b/curvefs/src/client/common/common.h index b8b6cf9d7d..0a1dd00d09 100644 --- a/curvefs/src/client/common/common.h +++ b/curvefs/src/client/common/common.h @@ -69,18 +69,27 @@ const uint32_t MAX_XATTR_NAME_LENGTH = 255; const uint32_t MAX_XATTR_VALUE_LENGTH = 64 * 1024; const char kCurveFsWarmupXAttr[] = "curvefs.warmup.op"; +const char kCurveFsWarmupXAttrList[] = "curvefs.warmup.op.list"; - -constexpr int kWarmupOpNum = 4; +constexpr size_t kMinWarmupOpArgsNum = 1; +constexpr size_t kWarmupAddArgsNum = 6; +constexpr size_t kWarmupCancelArgsNum = 2; enum class WarmupOpType { kWarmupOpUnknown = 0, kWarmupOpAdd = 1, - kWarmupOpQuery = 2, + kWarmupOpCancel = 2, }; WarmupOpType GetWarmupOpType(const std::string& op); +constexpr int kWarmupOpType = 0; +constexpr int kWarmupDataType = 1; +constexpr int kEntryFilePathInClient = 2; +constexpr int kWarmupCacheStorageType = 3; +constexpr int kMountPointInCurvefs = 4; +constexpr int kRootPathInCurvefs = 5; + enum class WarmupType { kWarmupTypeUnknown = 0, kWarmupTypeList = 1, diff --git a/curvefs/src/client/curve_fuse_op.cpp b/curvefs/src/client/curve_fuse_op.cpp index c81cdd5a6d..44e3f16a71 100644 --- a/curvefs/src/client/curve_fuse_op.cpp +++ b/curvefs/src/client/curve_fuse_op.cpp @@ -21,30 +21,32 @@ * Author: xuchaojie */ -#include -#include +#include "curvefs/src/client/curve_fuse_op.h" + #include +#include +#include +#include #include #include -#include "curvefs/src/client/curve_fuse_op.h" -#include "curvefs/src/client/fuse_client.h" -#include "curvefs/src/client/filesystem/error.h" -#include "curvefs/src/client/common/config.h" #include "curvefs/src/client/common/common.h" -#include "src/common/configuration.h" -#include "src/common/gflags_helper.h" -#include "curvefs/src/client/s3/client_s3_adaptor.h" -#include "curvefs/src/client/fuse_volume_client.h" +#include "curvefs/src/client/common/config.h" +#include "curvefs/src/client/filesystem/access_log.h" +#include "curvefs/src/client/filesystem/error.h" +#include "curvefs/src/client/filesystem/meta.h" +#include "curvefs/src/client/fuse_client.h" #include "curvefs/src/client/fuse_s3_client.h" -#include "curvefs/src/client/rpcclient/mds_client.h" -#include "curvefs/src/client/rpcclient/base_client.h" +#include "curvefs/src/client/fuse_volume_client.h" #include "curvefs/src/client/metric/client_metric.h" -#include "curvefs/src/common/metric_utils.h" -#include "curvefs/src/common/dynamic_vlog.h" +#include "curvefs/src/client/rpcclient/base_client.h" +#include "curvefs/src/client/rpcclient/mds_client.h" +#include "curvefs/src/client/s3/client_s3_adaptor.h" #include "curvefs/src/client/warmup/warmup_manager.h" -#include "curvefs/src/client/filesystem/meta.h" -#include "curvefs/src/client/filesystem/access_log.h" +#include "curvefs/src/common/dynamic_vlog.h" +#include "curvefs/src/common/metric_utils.h" +#include "src/common/configuration.h" +#include "src/common/gflags_helper.h" using ::curve::common::Configuration; using ::curvefs::client::CURVEFS_ERROR; @@ -52,21 +54,29 @@ using ::curvefs::client::FuseClient; using ::curvefs::client::FuseS3Client; using ::curvefs::client::FuseVolumeClient; using ::curvefs::client::common::FuseClientOption; -using ::curvefs::client::rpcclient::MdsClientImpl; -using ::curvefs::client::rpcclient::MDSBaseClient; -using ::curvefs::client::metric::ClientOpMetric; -using ::curvefs::common::LatencyUpdater; -using ::curvefs::client::metric::InflightGuard; -using ::curvefs::client::filesystem::EntryOut; +using ::curvefs::client::common::kEntryFilePathInClient; +using ::curvefs::client::common::kMountPointInCurvefs; +using ::curvefs::client::common::kRootPathInCurvefs; +using ::curvefs::client::common::kWarmupCacheStorageType; +using ::curvefs::client::common::kWarmupDataType; +using ::curvefs::client::common::kWarmupOpType; +using ::curvefs::client::common::WarmupStorageType; +using ::curvefs::client::filesystem::AccessLogGuard; using ::curvefs::client::filesystem::AttrOut; +using ::curvefs::client::filesystem::EntryOut; using ::curvefs::client::filesystem::FileOut; -using ::curvefs::client::filesystem::AccessLogGuard; -using ::curvefs::client::filesystem::StrFormat; using ::curvefs::client::filesystem::InitAccessLog; using ::curvefs::client::filesystem::Logger; -using ::curvefs::client::filesystem::StrEntry; using ::curvefs::client::filesystem::StrAttr; +using ::curvefs::client::filesystem::StrEntry; +using ::curvefs::client::filesystem::StrFormat; using ::curvefs::client::filesystem::StrMode; +using ::curvefs::client::metric::ClientOpMetric; +using ::curvefs::client::metric::InflightGuard; +using ::curvefs::client::rpcclient::MDSBaseClient; +using ::curvefs::client::rpcclient::MdsClientImpl; +using ::curvefs::client::warmup::WarmupProgress; +using ::curvefs::common::LatencyUpdater; using ::curvefs::common::FLAGS_vlog_level; @@ -221,13 +231,15 @@ void UnInitFuseClient() { } int AddWarmupTask(curvefs::client::common::WarmupType type, fuse_ino_t key, - const std::string &path, - curvefs::client::common::WarmupStorageType storageType) { + const std::string& path, + curvefs::client::common::WarmupStorageType storageType, + const std::string& mount_point, const std::string& root) { int ret = 0; bool result = true; switch (type) { case curvefs::client::common::WarmupType::kWarmupTypeList: - result = g_ClientInstance->PutWarmFilelistTask(key, storageType); + result = g_ClientInstance->PutWarmFilelistTask(key, storageType, path, + mount_point, root); break; case curvefs::client::common::WarmupType::kWarmupTypeSingle: result = g_ClientInstance->PutWarmFileTask(key, path, storageType); @@ -243,8 +255,28 @@ int AddWarmupTask(curvefs::client::common::WarmupType type, fuse_ino_t key, return ret; } +int CancelWarmupTask(curvefs::client::common::WarmupType type, fuse_ino_t key) { + int ret = 0; + bool result = true; + switch (type) { + case curvefs::client::common::WarmupType::kWarmupTypeList: + case curvefs::client::common::WarmupType::kWarmupTypeSingle: + result = g_ClientInstance->RemoveWarmFileOrFilelistTask(key); + break; + default: + // not support cancel warmup type (warmup single file/dir or + // filelist) + LOG(ERROR) << "not support warmup type, only support single/list"; + ret = EOPNOTSUPP; + } + if (!result) { + ret = ERANGE; + } + return ret; +} + void QueryWarmupTask(fuse_ino_t key, std::string *data) { - curvefs::client::warmup::WarmupProgress progress; + WarmupProgress progress; bool ret = g_ClientInstance->GetWarmupProgress(key, &progress); if (!ret) { *data = "finished"; @@ -255,40 +287,115 @@ void QueryWarmupTask(fuse_ino_t key, std::string *data) { VLOG(9) << "Warmup [" << key << "]" << *data; } -int Warmup(fuse_ino_t key, const std::string& name, const std::string& value) { - // warmup +void ListWarmupTasks(std::string* data) { + WarmupProgress progress; + std::unordered_map filepath2progress; + + bool ret = g_ClientInstance->GetAllWarmupProgress(&filepath2progress); + + std::ostringstream filepath2warmupProgress; + + for (auto it = filepath2progress.begin(); it != filepath2progress.end(); + ++it) { + filepath2warmupProgress + << fmt::format("{}:{}/{};", it->first, it->second.GetFinished(), + it->second.GetTotal()); + VLOG(9) << fmt::format("Warmup [\"{}\"]: {}/{};", it->first, + it->second.GetFinished(), it->second.GetTotal()); + } + if (!ret) { + *data = "finished"; + } else { + *data = filepath2warmupProgress.str(); + } +} + +int Warmup(fuse_ino_t key, const char* name, const std::string& values) { if (g_ClientInstance->GetFsInfo()->fstype() != FSType::TYPE_S3) { LOG(ERROR) << "warmup only support s3"; return EOPNOTSUPP; } std::vector opTypePath; - curve::common::SplitString(value, "\n", &opTypePath); - if (opTypePath.size() != curvefs::client::common::kWarmupOpNum) { - LOG(ERROR) << name << " has invalid xattr value " << value; - return ERANGE; - } - auto storageType = - curvefs::client::common::GetWarmupStorageType(opTypePath[3]); - if (storageType == - curvefs::client::common::WarmupStorageType::kWarmupStorageTypeUnknown) { - LOG(ERROR) << name << " not support storage type: " << value; + curve::common::SplitString(values, "\n", &opTypePath); + + /* + * opTypePath[0]: warmupOpType: [add, cancel] (e.g., add) + * opTypePath[1]: warmupDataType: [single, list] (e.g., single) + * opTypePath[2]: entryFilePathInClient (e.g., /mnt/hello_world.txt) + * opTypePath[3]: storageType: [disk, mem] (e.g., disk) + * opTypePath[4]: mountPointInCurvefs: (e.g., /mnt) + * opTypePath[5]: rootPathInCurvefs: (e.g., /) + */ + + auto warmupOpType = opTypePath[kWarmupOpType]; + auto warmupDataType = opTypePath[kWarmupDataType]; + + if (opTypePath.size() < curvefs::client::common::kMinWarmupOpArgsNum) { + LOG(ERROR) << name + << " did not provide enough required xattr values (expected " + << curvefs::client::common::kMinWarmupOpArgsNum << " actual " + << opTypePath.size() << ") " << values; return ERANGE; } + int ret = 0; - switch (curvefs::client::common::GetWarmupOpType(opTypePath[0])) { - case curvefs::client::common::WarmupOpType::kWarmupOpAdd: - ret = - AddWarmupTask(curvefs::client::common::GetWarmupType(opTypePath[1]), - key, opTypePath[2], storageType); - if (ret != 0) { - LOG(ERROR) << name << " has invalid xattr value " << value; + + switch (curvefs::client::common::GetWarmupOpType(warmupOpType)) { + case curvefs::client::common::WarmupOpType::kWarmupOpAdd: { + if (opTypePath.size() != + curvefs::client::common::kWarmupAddArgsNum) { + LOG(ERROR) + << name + << " has an incorrect number of xattr values (expected " + << curvefs::client::common::kWarmupAddArgsNum << " actual " + << opTypePath.size() << ") " << values; + ret = ERANGE; + break; + } + auto entryFilePathInClient = opTypePath[kEntryFilePathInClient]; + + auto storageType = curvefs::client::common::GetWarmupStorageType( + opTypePath[kWarmupCacheStorageType]); + + if (storageType == WarmupStorageType::kWarmupStorageTypeUnknown) { + LOG(ERROR) << name << " not support storage type: " << values; + ret = ERANGE; + break; + } + auto mountPointInCurvefs = opTypePath[kMountPointInCurvefs]; + auto rootPathInCurvefs = opTypePath[kRootPathInCurvefs]; + + ret = AddWarmupTask( + curvefs::client::common::GetWarmupType(warmupDataType), key, + entryFilePathInClient, storageType, mountPointInCurvefs, + rootPathInCurvefs); + break; + } + case curvefs::client::common::WarmupOpType::kWarmupOpCancel: { + if (opTypePath.size() != + curvefs::client::common::kWarmupCancelArgsNum) { + LOG(ERROR) + << name + << " has an incorrect number of xattr values (expected " + << curvefs::client::common::kWarmupCancelArgsNum + << " actual " << opTypePath.size() << ") " << values; + ret = ERANGE; + break; + } + ret = CancelWarmupTask( + curvefs::client::common::GetWarmupType(warmupDataType), key); + break; + } + default: { + ret = ERANGE; } - break; - default: - LOG(ERROR) << name << " has invalid xattr value " << value; - ret = ERANGE; } + + if (ret != 0) { + LOG(ERROR) << name << " has invalid xattr values " << values; + } + return ret; } @@ -312,12 +419,6 @@ FuseClient* Client() { return g_ClientInstance; } -const char* warmupXAttr = ::curvefs::client::common::kCurveFsWarmupXAttr; - -bool IsWamupReq(const char* name) { - return strcmp(name, warmupXAttr) == 0; -} - void TriggerWarmup(fuse_req_t req, fuse_ino_t ino, const char* name, @@ -341,6 +442,17 @@ void QueryWarmup(fuse_req_t req, fuse_ino_t ino, size_t size) { return fs->ReplyBuffer(req, data.data(), data.length()); } +void ListWarmup(fuse_req_t req, size_t size) { + auto fs = Client()->GetFileSystem(); + + std::string data; + ListWarmupTasks(&data); + if (size == 0) { + return fs->ReplyXattr(req, data.length()); + } + return fs->ReplyBuffer(req, data.data(), data.length()); +} + void ReadThrottleAdd(size_t size) { Client()->Add(true, size); } void WriteThrottleAdd(size_t size) { Client()->Add(false, size); } @@ -789,6 +901,21 @@ void FuseOpStatFs(fuse_req_t req, fuse_ino_t ino) { return fs->ReplyStatfs(req, &stbuf); } +const char* warmupXAttr = ::curvefs::client::common::kCurveFsWarmupXAttr; +const char* warmupListXAttr = + ::curvefs::client::common::kCurveFsWarmupXAttrList; + +bool IsWamupReq(const char* name) { + if (strlen(name) < strlen(warmupXAttr)) { + return false; + } + return strncmp(name, warmupXAttr, strlen(warmupXAttr)) == 0; +} + +bool IsWarmupListReq(const char* name) { + return IsWamupReq(name) && strcmp(name, warmupListXAttr) == 0; +} + void FuseOpSetXattr(fuse_req_t req, fuse_ino_t ino, const char* name, @@ -824,7 +951,9 @@ void FuseOpGetXattr(fuse_req_t req, ino, name, size, StrErr(rc), value.size()); }); - if (IsWamupReq(name)) { + if (IsWarmupListReq(name)) { + return ListWarmup(req, size); + } else if (IsWamupReq(name)) { return QueryWarmup(req, ino, size); } diff --git a/curvefs/src/client/filesystem/meta.cpp b/curvefs/src/client/filesystem/meta.cpp index 721c7c859b..47bbeced83 100644 --- a/curvefs/src/client/filesystem/meta.cpp +++ b/curvefs/src/client/filesystem/meta.cpp @@ -106,7 +106,6 @@ std::string Attr2Str(const InodeAttr& attr) { return ""; } - std::string smode; return absl::StrFormat(" (%d,[%s:0%06o,%d,%d,%d,%d,%d,%d,%d])", attr.inodeid(), StrMode(attr.mode()).c_str(), attr.mode(), attr.nlink(), attr.uid(), attr.gid(), diff --git a/curvefs/src/client/fuse_client.h b/curvefs/src/client/fuse_client.h index 3988b77866..ea8971f9ba 100644 --- a/curvefs/src/client/fuse_client.h +++ b/curvefs/src/client/fuse_client.h @@ -23,41 +23,42 @@ #ifndef CURVEFS_SRC_CLIENT_FUSE_CLIENT_H_ #define CURVEFS_SRC_CLIENT_FUSE_CLIENT_H_ -#include -#include #include +#include +#include +#include +#include #include #include #include -#include +#include #include #include -#include #include "curvefs/proto/common.pb.h" #include "curvefs/proto/mds.pb.h" +#include "curvefs/src/client/client_operator.h" +#include "curvefs/src/client/common/common.h" #include "curvefs/src/client/common/config.h" #include "curvefs/src/client/dentry_cache_manager.h" #include "curvefs/src/client/dir_buffer.h" +#include "curvefs/src/client/filesystem/filesystem.h" +#include "curvefs/src/client/filesystem/meta.h" #include "curvefs/src/client/fuse_common.h" #include "curvefs/src/client/inode_cache_manager.h" +#include "curvefs/src/client/lease/lease_excutor.h" +#include "curvefs/src/client/metric/client_metric.h" #include "curvefs/src/client/rpcclient/mds_client.h" #include "curvefs/src/client/rpcclient/metaserver_client.h" #include "curvefs/src/client/s3/client_s3_adaptor.h" -#include "curvefs/src/common/fast_align.h" -#include "curvefs/src/client/metric/client_metric.h" -#include "src/common/concurrent/concurrent.h" +#include "curvefs/src/client/warmup/warmup_manager.h" +#include "curvefs/src/client/xattr_manager.h" #include "curvefs/src/common/define.h" +#include "curvefs/src/common/fast_align.h" #include "curvefs/src/common/s3util.h" -#include "curvefs/src/client/common/common.h" -#include "curvefs/src/client/client_operator.h" -#include "curvefs/src/client/lease/lease_excutor.h" -#include "curvefs/src/client/xattr_manager.h" -#include "curvefs/src/client/warmup/warmup_manager.h" +#include "src/common/concurrent/concurrent.h" #include "src/common/throttle.h" -#include "curvefs/src/client/filesystem/meta.h" -#include "curvefs/src/client/filesystem/filesystem.h" #define DirectIOAlignment 512 @@ -90,6 +91,9 @@ using ::curvefs::client::filesystem::FileOut; using curvefs::common::is_aligned; +using Filepath2WarmupProgressMap = + std::unordered_map; + const uint32_t kMaxHostNameLength = 255u; using mds::Mountpoint; @@ -305,9 +309,13 @@ class FuseClient { enableSumInDir_ = enable; } - bool PutWarmFilelistTask(fuse_ino_t key, common::WarmupStorageType type) { + bool PutWarmFilelistTask(fuse_ino_t key, common::WarmupStorageType type, + const std::string& path, + const std::string& mount_point, + const std::string& root) { if (fsInfo_->fstype() == FSType::TYPE_S3) { - return warmupManager_->AddWarmupFilelist(key, type); + return warmupManager_->AddWarmupFilelist(key, type, path, + mount_point, root); } // only support s3 return true; } @@ -320,6 +328,13 @@ class FuseClient { return true; } + bool RemoveWarmFileOrFilelistTask(fuse_ino_t key) { + if (fsInfo_->fstype() == FSType::TYPE_S3) { + return warmupManager_->CancelWarmupFileOrFilelist(key); + } // only support s3 + return true; + } + bool GetWarmupProgress(fuse_ino_t key, warmup::WarmupProgress *progress) { if (fsInfo_->fstype() == FSType::TYPE_S3) { return warmupManager_->QueryWarmupProgress(key, progress); @@ -327,6 +342,13 @@ class FuseClient { return false; } + bool GetAllWarmupProgress(Filepath2WarmupProgressMap* filepath2progress) { + if (fsInfo_->fstype() == FSType::TYPE_S3) { + return warmupManager_->ListWarmupProgress(filepath2progress); + } + return false; + } + CURVEFS_ERROR SetMountStatus(const struct MountOption *mountOption); void Add(bool isRead, size_t size) { throttle_.Add(isRead, size); } diff --git a/curvefs/src/client/kvclient/kvclient_manager.h b/curvefs/src/client/kvclient/kvclient_manager.h index c0ed0e051e..de2ae588a6 100644 --- a/curvefs/src/client/kvclient/kvclient_manager.h +++ b/curvefs/src/client/kvclient/kvclient_manager.h @@ -64,7 +64,12 @@ struct SetKVCacheTask { explicit SetKVCacheTask( const std::string& k, const char* val, const uint64_t len, SetKVCacheDone done = [](const std::shared_ptr&) {}) - : key(k), value(val), length(len), res(false), done(std::move(done)) {} + : key(k), + value(val), + length(len), + res(false), + done(std::move(done)), + timer(butil::Timer::STARTED) {} }; struct GetKVCacheTask { @@ -84,7 +89,8 @@ struct GetKVCacheTask { offset(off), length(len), res(false), - done(std::move(done)) {} + done(std::move(done)), + timer(butil::Timer::STARTED) {} }; class KVClientManager { diff --git a/curvefs/src/client/metric/client_metric.h b/curvefs/src/client/metric/client_metric.h index 98d0ae11e6..40b235a729 100644 --- a/curvefs/src/client/metric/client_metric.h +++ b/curvefs/src/client/metric/client_metric.h @@ -26,6 +26,7 @@ #include +#include #include #include @@ -275,18 +276,32 @@ struct S3Metric { writeSize(prefix, fsName + "_adaptor_write_size", 0) {} }; +template +uint64_t LoadAtomicValue(void* atomValue) { + std::atomic* bytes = reinterpret_cast*>(atomValue); + return static_cast(bytes->load()); +} + struct DiskCacheMetric { static const std::string prefix; std::string fsName; InterfaceMetric writeS3; - bvar::Status diskUsedBytes; + bvar::PassiveStatus usedBytes_; + bvar::PassiveStatus totalBytes_; + InterfaceMetric trim_; - explicit DiskCacheMetric(const std::string &name = "") + explicit DiskCacheMetric(const std::string& name = "", + std::atomic* usedBytes = nullptr, + std::atomic* totalBytes = nullptr) : fsName(!name.empty() ? name : prefix + curve::common::ToHexString(this)), writeS3(prefix, fsName + "_write_s3"), - diskUsedBytes(prefix, fsName + "_diskcache_usedbytes", 0) {} + usedBytes_(prefix, fsName + "_diskcache_usedbytes", + LoadAtomicValue, usedBytes), + totalBytes_(prefix, fsName + "_diskcache_totalbytes", + LoadAtomicValue, totalBytes), + trim_(prefix, fsName + "_diskcache_trim") {} }; struct KVClientMetric { diff --git a/curvefs/src/client/s3/client_s3_cache_manager.cpp b/curvefs/src/client/s3/client_s3_cache_manager.cpp index 56c5ed5ca5..4eab99161e 100644 --- a/curvefs/src/client/s3/client_s3_cache_manager.cpp +++ b/curvefs/src/client/s3/client_s3_cache_manager.cpp @@ -1039,7 +1039,8 @@ void FileCacheManager::ReleaseCache() { } chunkCacheMap_.clear(); - g_s3MultiManagerMetric->chunkManagerNum << -1 * chunNum; + g_s3MultiManagerMetric->chunkManagerNum + << -1 * static_cast(chunNum); return; } @@ -1509,7 +1510,7 @@ DataCachePtr ChunkCacheManager::FindWriteableDataCache( } std::vector::iterator iterDel = waitDelVec.begin(); - for (; iterDel != waitDelVec.end(); iterDel++) { + for (; iterDel != waitDelVec.end(); ++iterDel) { auto iter = dataWCacheMap_.find(*iterDel); VLOG(9) << "delete data cache chunkPos:" << iter->second->GetChunkPos() @@ -1576,7 +1577,8 @@ void ChunkCacheManager::AddReadDataCache(DataCachePtr dataCache) { uint64_t actualLen = (*dcpIter)->GetActualLen(); if (s3ClientAdaptor_->GetFsCacheManager()->Delete(dcpIter)) { g_s3MultiManagerMetric->readDataCacheNum << -1; - g_s3MultiManagerMetric->readDataCacheByte << -1 * actualLen; + g_s3MultiManagerMetric->readDataCacheByte + << -1 * static_cast(actualLen); dataRCacheMap_.erase(iter); } } @@ -1669,7 +1671,8 @@ void ChunkCacheManager::TruncateReadCache(uint64_t chunkPos) { if ((dcChunkPos + dcLen) > chunkPos) { if (s3ClientAdaptor_->GetFsCacheManager()->Delete(rIter->second)) { g_s3MultiManagerMetric->readDataCacheNum << -1; - g_s3MultiManagerMetric->readDataCacheByte << -1 * dcActualLen; + g_s3MultiManagerMetric->readDataCacheByte + << -1 * static_cast(dcActualLen); dataRCacheMap_.erase(next(rIter).base()); } } else { @@ -1691,7 +1694,6 @@ void ChunkCacheManager::ReleaseWriteDataCache(const DataCachePtr &dataCache) { CURVEFS_ERROR ChunkCacheManager::Flush(uint64_t inodeId, bool force, bool toS3) { - std::map tmp; curve::common::LockGuard lg(flushMtx_); CURVEFS_ERROR ret = CURVEFS_ERROR::OK; // DataCachePtr dataCache; @@ -2076,7 +2078,7 @@ void DataCache::Write(uint64_t chunkPos, uint64_t len, const char *data, } else { std::vector::const_iterator iter = mergeDataCacheVer.begin(); - for (; iter != mergeDataCacheVer.end(); iter++) { + for (; iter != mergeDataCacheVer.end(); ++iter) { /* ------ ------ DataCache --------------------- WriteData @@ -2126,7 +2128,7 @@ void DataCache::Write(uint64_t chunkPos, uint64_t len, const char *data, } else { std::vector::const_iterator iter = mergeDataCacheVer.begin(); - for (; iter != mergeDataCacheVer.end(); iter++) { + for (; iter != mergeDataCacheVer.end(); ++iter) { /* ------ ------ DataCache ---------------- WriteData diff --git a/curvefs/src/client/s3/disk_cache_manager.cpp b/curvefs/src/client/s3/disk_cache_manager.cpp index 3f229587c8..e394643489 100644 --- a/curvefs/src/client/s3/disk_cache_manager.cpp +++ b/curvefs/src/client/s3/disk_cache_manager.cpp @@ -20,17 +20,20 @@ * Author: hzwuhongsong */ +#include "curvefs/src/client/s3/disk_cache_manager.h" + +#include #include #include -#include -#include + +#include #include -#include #include -#include +#include +#include +#include "curvefs/src/client/metric/client_metric.h" #include "curvefs/src/client/s3/client_s3_adaptor.h" -#include "curvefs/src/client/s3/disk_cache_manager.h" #include "curvefs/src/common/s3util.h" namespace curvefs { @@ -244,8 +247,9 @@ int DiskCacheManager::WriteDiskFile(const std::string fileName, const char *buf, // write throttle diskCacheThrottle_.Add(false, length); int ret = cacheWrite_->WriteDiskFile(fileName, buf, length, force); - if (ret > 0) - AddDiskUsedBytes(ret); + if (ret > 0) { + UpdateDiskUsedBytes(ret); + } return ret; } @@ -265,8 +269,9 @@ int DiskCacheManager::WriteReadDirect(const std::string fileName, // write hrottle diskCacheThrottle_.Add(false, length); int ret = cacheRead_->WriteDiskFile(fileName, buf, length); - if (ret > 0) - AddDiskUsedBytes(ret); + if (ret > 0) { + UpdateDiskUsedBytes(ret); + } return ret; } @@ -296,6 +301,7 @@ int64_t DiskCacheManager::UpdateDiskFsUsedRatio() { } int64_t usedPercent = 100 * usedBytes / (usedBytes + availableBytes) + 1; diskFsUsedRatio_.store(usedPercent); + totalBytes_.store(totalBytes); return usedPercent; } @@ -316,8 +322,6 @@ void DiskCacheManager::SetDiskInitUsedBytes() { return; } usedBytes_.fetch_add(usedBytes); - if (metric_.get() != nullptr) - metric_->diskUsedBytes.set_value(usedBytes_); diskUsedInit_.store(true); VLOG(9) << "cache disk used size is: " << result; return; @@ -429,6 +433,7 @@ void DiskCacheManager::TrimCache() { break; } + uint64_t start = butil::cpuwide_time_us(); VLOG(6) << "obj will be removed01: " << cacheKey; cacheReadFile = cacheReadFullDir + "/" + curvefs::common::s3util::GenPathByObjName( @@ -470,7 +475,9 @@ void DiskCacheManager::TrimCache() { << "error is: " << errno; continue; } - DecDiskUsedBytes(statReadFile.st_size); + metric::CollectMetrics(&metric_->trim_, statReadFile.st_size, + butil::cpuwide_time_us() - start); + UpdateDiskUsedBytes(-statReadFile.st_size); VLOG(6) << "remove disk file success, file is: " << cacheKey; } } @@ -507,7 +514,8 @@ int DiskCacheManager::TrimStop() { void DiskCacheManager::InitMetrics(const std::string& fsName, std::shared_ptr s3Metric) { - metric_ = std::make_shared(fsName); + metric_ = + std::make_shared(fsName, &usedBytes_, &totalBytes_); cacheWrite_->InitMetrics(metric_, s3Metric); cacheRead_->InitMetrics(metric_); // this function move to here from init, diff --git a/curvefs/src/client/s3/disk_cache_manager.h b/curvefs/src/client/s3/disk_cache_manager.h index 68b104f4f2..b31c4e793f 100644 --- a/curvefs/src/client/s3/disk_cache_manager.h +++ b/curvefs/src/client/s3/disk_cache_manager.h @@ -138,31 +138,14 @@ class DiskCacheManager { */ bool IsDiskCacheSafe(uint32_t baseRatio); - /** - * @brief add the used bytes of disk cache. - */ - void AddDiskUsedBytes(uint64_t length) { + // update disk used bytes by length + void UpdateDiskUsedBytes(int64_t length) { usedBytes_.fetch_add(length); - if (metric_.get() != nullptr) - metric_->diskUsedBytes.set_value(usedBytes_/1024/1024); - VLOG(9) << "add disk used size is: " << length - << ", now is: " << usedBytes_.load(); - return; - } - /** - * @brief dec the used bytes of disk cache. - * can not dec disk used bytes after file have been loaded, - * because there are link in read cache - */ - void DecDiskUsedBytes(uint64_t length) { - usedBytes_.fetch_sub(length); assert(usedBytes_ >= 0); - if (metric_.get() != nullptr) - metric_->diskUsedBytes.set_value(usedBytes_); - VLOG(9) << "dec disk used size is: " << length + VLOG(9) << "update disk used size is: " << length << ", now is: " << usedBytes_.load(); - return; } + void SetDiskInitUsedBytes(); uint64_t GetDiskUsedbytes() { return usedBytes_.load(); @@ -194,8 +177,11 @@ class DiskCacheManager { curve::common::InterruptibleSleeper sleeper_; curve::common::WaitInterval waitIntervalSec_; uint32_t objectPrefix_; - // used bytes of disk cache + // The size of the object cached to the local disk, + // rather than the size of the used capacity of the cache disk std::atomic usedBytes_; + // Cache disk size + std::atomic totalBytes_; // used ratio of the file system in disk cache std::atomic diskFsUsedRatio_; uint32_t cmdTimeoutSec_; diff --git a/curvefs/src/client/s3/disk_cache_manager_impl.cpp b/curvefs/src/client/s3/disk_cache_manager_impl.cpp index 2d6c1e545a..f99a70e33e 100644 --- a/curvefs/src/client/s3/disk_cache_manager_impl.cpp +++ b/curvefs/src/client/s3/disk_cache_manager_impl.cpp @@ -98,6 +98,7 @@ int DiskCacheManagerImpl::WriteClosure( // set the returned value // it is need in CallBack context->retCode = ret; + context->timer.stop(); context->cb(context); VLOG(9) << "WriteClosure end, name: " << context->key; return 0; diff --git a/curvefs/src/client/s3/disk_cache_read.cpp b/curvefs/src/client/s3/disk_cache_read.cpp index 4f452a3181..27bee46776 100644 --- a/curvefs/src/client/s3/disk_cache_read.cpp +++ b/curvefs/src/client/s3/disk_cache_read.cpp @@ -85,7 +85,7 @@ int DiskCacheRead::LinkWriteToRead(const std::string fileName, const std::string fullWriteDir, const std::string fullReadDir) { VLOG(6) << "LinkWriteToRead start. name = " << fileName; - std::string fullReadPath, fullWritePath, dirPath; + std::string fullReadPath, fullWritePath; fullWritePath = fullWriteDir + "/" + fileName; fullReadPath = fullReadDir + "/" + fileName; int ret; diff --git a/curvefs/src/client/s3/disk_cache_write.cpp b/curvefs/src/client/s3/disk_cache_write.cpp index cbcb6179c1..5b650bfa83 100644 --- a/curvefs/src/client/s3/disk_cache_write.cpp +++ b/curvefs/src/client/s3/disk_cache_write.cpp @@ -176,7 +176,7 @@ int DiskCacheWrite::UploadFile(const std::string &name, void DiskCacheWrite::UploadFile(const std::list &toUpload, std::shared_ptr syncTask) { std::list::const_iterator iter; - for (iter = toUpload.begin(); iter != toUpload.end(); iter++) { + for (iter = toUpload.begin(); iter != toUpload.end(); ++iter) { UploadFile(*iter, syncTask); } } @@ -448,7 +448,6 @@ int DiskCacheWrite::RemoveFile(const std::string fileName) { << ", errno = " << errno; return -1; } - cachedObjName_->MoveBack(fileName); VLOG(9) << "remove file success, file = " << fileName; return 0; } diff --git a/curvefs/src/client/warmup/warmup_manager.cpp b/curvefs/src/client/warmup/warmup_manager.cpp index 42d6ce77ff..4ffd9d13bd 100644 --- a/curvefs/src/client/warmup/warmup_manager.cpp +++ b/curvefs/src/client/warmup/warmup_manager.cpp @@ -61,13 +61,16 @@ DEFINE_uint32(warmupMaxSymLink, 1 << 2, DEFINE_validator(warmupMaxSymLink, &pass_uint32); bool WarmupManagerS3Impl::AddWarmupFilelist(fuse_ino_t key, - WarmupStorageType type) { + WarmupStorageType type, + const std::string& path, + const std::string& mount_point, + const std::string& root) { if (!mounted_.load(std::memory_order_acquire)) { LOG(ERROR) << "not mounted"; return false; } // add warmup Progress - if (AddWarmupProcess(key, type)) { + if (AddWarmupProcess(key, path, type)) { LOG(INFO) << "add warmup list task:" << key; WriteLockGuard lock(warmupFilelistDequeMutex_); auto iter = FindWarmupFilelistByKeyLocked(key); @@ -80,7 +83,7 @@ bool WarmupManagerS3Impl::AddWarmupFilelist(fuse_ino_t key, return false; } uint64_t len = inodeWrapper->GetLength(); - warmupFilelistDeque_.emplace_back(key, len); + warmupFilelistDeque_.emplace_back(key, len, mount_point, root); } } // Skip already added return true; @@ -93,13 +96,74 @@ bool WarmupManagerS3Impl::AddWarmupFile(fuse_ino_t key, const std::string& path, return false; } // add warmup Progress - if (AddWarmupProcess(key, type)) { + if (AddWarmupProcess(key, path, type)) { LOG(INFO) << "add warmup single task:" << key; FetchDentryEnqueue(key, path); } return true; } +bool WarmupManagerS3Impl::CancelWarmupFileOrFilelist(fuse_ino_t key) { + if (!mounted_.load(std::memory_order_acquire)) { + LOG(ERROR) << "not mounted"; + return false; + } + + VLOG(9) << "cancel warmup file or filelist task: " << key; + + return CancelWarmupDependentQueue(key) && CancelWarmupProcess(key); +} + +bool WarmupManagerS3Impl::CancelWarmupDependentQueue(fuse_ino_t key) { + WriteLockGuard lock(warmupFilelistDequeMutex_); + auto filelistIt = FindWarmupFilelistByKeyLocked(key); + if (filelistIt != warmupFilelistDeque_.end()) { + std::shared_ptr inodeWrapper; + CURVEFS_ERROR ret = inodeManager_->GetInode(key, inodeWrapper); + if (ret != CURVEFS_ERROR::OK) { + LOG(ERROR) << "inodeManager get inode fail, ret = " << ret + << ", inodeid = " << key; + return false; + } + uint64_t len = inodeWrapper->GetLength(); + + for (auto it = warmupFilelistDeque_.begin(); + it != warmupFilelistDeque_.end();) { + if (it->GetKey() == key) { + it = warmupFilelistDeque_.erase(it); + } else { + ++it; + } + } + } + + auto fetchDentryIt = inode2FetchDentryPool_.find(key); + if (fetchDentryIt != inode2FetchDentryPool_.end()) { + inode2FetchDentryPool_[key]->Stop(); + WriteLockGuard lockDentry(inode2FetchDentryPoolMutex_); + inode2FetchDentryPool_.erase(key); + } + + WriteLockGuard lockInodes(warmupInodesDequeMutex_); + for (auto it = warmupInodesDeque_.begin(); + it != warmupInodesDeque_.end();) { + if (it->GetKey() == key) { + it = warmupInodesDeque_.erase(it); + } else { + ++it; + } + } + + auto fetchS3ObjIt = inode2FetchS3ObjectsPool_.find(key); + if (fetchS3ObjIt != inode2FetchS3ObjectsPool_.end()) { + inode2FetchS3ObjectsPool_[key]->Stop(); + WriteLockGuard lockDentry(inode2FetchS3ObjectsPoolMutex_); + inode2FetchS3ObjectsPool_.erase(key); + } + + return true; +} + void WarmupManagerS3Impl::UnInit() { bgFetchStop_.store(true, std::memory_order_release); if (initbgFetchThread_) { @@ -509,7 +573,8 @@ void WarmupManagerS3Impl::WarmUpAllObjs( { ReadLockGuard lock(inode2ProgressMutex_); auto iterProgress = FindWarmupProgressByKeyLocked(key); - if (iterProgress->second.GetStorageType() == + if (iterProgress != inode2Progress_.end() && + iterProgress->second.GetStorageType() == curvefs::client::common::WarmupStorageType:: kWarmupStorageTypeDisk && s3Adaptor_->GetDiskCacheManager()->IsCached(name)) { @@ -562,7 +627,6 @@ void WarmupManagerS3Impl::ScanCleanFetchDentryPool() { WriteLockGuard lock(inode2FetchDentryPoolMutex_); for (auto iter = inode2FetchDentryPool_.begin(); iter != inode2FetchDentryPool_.end();) { - std::deque::iterator iterInode; if (iter->second->QueueSize() == 0) { VLOG(9) << "remove FetchDentry task: " << iter->first; iter->second->Stop(); @@ -615,19 +679,42 @@ void WarmupManagerS3Impl::ScanWarmupInodes() { } } +void WarmupManagerS3Impl::AlignFilelistPathsToCurveFs( + const WarmupFilelist& filelist, std::vector* list) { + for (auto filePathIt = list->begin(); filePathIt != list->end();) { + size_t found = filePathIt->find(filelist.GetMountPoint()); + + if (found != std::string::npos) { + filePathIt->replace(found, filelist.GetMountPoint().length(), + filelist.GetRoot()); + ++filePathIt; + } else { + filePathIt = list->erase(filePathIt); + } + } +} + void WarmupManagerS3Impl::ScanWarmupFilelist() { // Use a write lock to ensure that all parsing tasks are added. WriteLockGuard lock(warmupFilelistDequeMutex_); if (!warmupFilelistDeque_.empty()) { WarmupFilelist warmupFilelist = warmupFilelistDeque_.front(); VLOG(9) << "warmup ino: " << warmupFilelist.GetKey() - << " len is: " << warmupFilelist.GetFileLen(); + << " len is: " << warmupFilelist.GetFileLen() + << " mount point is: " << warmupFilelist.GetMountPoint() + << " fs root is: " << warmupFilelist.GetRoot(); std::vector warmuplist; GetWarmupList(warmupFilelist, &warmuplist); VLOG(9) << "warmup ino: " << warmupFilelist.GetKey() << " warmup list is: " << fmt::format("{}", fmt::join(warmuplist, ",")); + + AlignFilelistPathsToCurveFs(warmupFilelist, &warmuplist); + VLOG(9) << "warmup ino: " << warmupFilelist.GetKey() + << " aligned warmup list is: " + << fmt::format("{}", fmt::join(warmuplist, ",")); + for (auto filePath : warmuplist) { FetchDentryEnqueue(warmupFilelist.GetKey(), filePath); } diff --git a/curvefs/src/client/warmup/warmup_manager.h b/curvefs/src/client/warmup/warmup_manager.h index baafccfa0d..5cb7342fd0 100644 --- a/curvefs/src/client/warmup/warmup_manager.h +++ b/curvefs/src/client/warmup/warmup_manager.h @@ -80,7 +80,20 @@ class WarmupFile { uint64_t fileLen_; }; -using WarmupFilelist = WarmupFile; +class WarmupFilelist : public WarmupFile { + public: + explicit WarmupFilelist(fuse_ino_t key = 0, uint64_t fileLen = 0, + const std::string& mountPoint = "", + const std::string& root = "") + : WarmupFile(key, fileLen), mountPoint_(mountPoint), root_(root) {} + + std::string GetMountPoint() const { return mountPoint_; } + std::string GetRoot() const { return root_; } + + private: + std::string mountPoint_; + std::string root_; +}; class WarmupInodes { public: @@ -103,13 +116,18 @@ class WarmupInodes { class WarmupProgress { public: explicit WarmupProgress(WarmupStorageType type = curvefs::client::common:: - WarmupStorageType::kWarmupStorageTypeUnknown) - : total_(0), finished_(0), storageType_(type) {} + WarmupStorageType::kWarmupStorageTypeUnknown, + std::string filePath = "") + : total_(0), + finished_(0), + storageType_(type), + filePathInClient_(filePath) {} WarmupProgress(const WarmupProgress& wp) : total_(wp.total_), finished_(wp.finished_), - storageType_(wp.storageType_) {} + storageType_(wp.storageType_), + filePathInClient_(wp.filePathInClient_) {} void AddTotal(uint64_t add) { std::lock_guard lock(totalMutex_); @@ -144,6 +162,8 @@ class WarmupProgress { ",finished:" + std::to_string(finished_); } + std::string GetFilePathInClient() { return filePathInClient_; } + WarmupStorageType GetStorageType() { return storageType_; } private: @@ -152,6 +172,7 @@ class WarmupProgress { uint64_t finished_; std::mutex finishedMutex_; WarmupStorageType storageType_; + std::string filePathInClient_; }; using FuseOpReadFunctionType = @@ -191,10 +212,16 @@ class WarmupManager { virtual void Init(const FuseClientOption& option) { option_ = option; } virtual void UnInit() { ClearWarmupProcess(); } - virtual bool AddWarmupFilelist(fuse_ino_t key, WarmupStorageType type) = 0; + virtual bool AddWarmupFilelist(fuse_ino_t key, WarmupStorageType type, + const std::string& path, + const std::string& mount_point, + const std::string& root) = 0; virtual bool AddWarmupFile(fuse_ino_t key, const std::string& path, WarmupStorageType type) = 0; + virtual bool CancelWarmupFileOrFilelist(fuse_ino_t key) = 0; + virtual bool CancelWarmupDependentQueue(fuse_ino_t key) = 0; + void SetMounted(bool mounted) { mounted_.store(mounted, std::memory_order_release); } @@ -213,6 +240,8 @@ class WarmupManager { kvClientManager_ = std::move(kvClientManager); } + using Filepath2WarmupProgressMap = + std::unordered_map; /** * @brief * @@ -233,6 +262,22 @@ class WarmupManager { return ret; } + bool ListWarmupProgress(Filepath2WarmupProgressMap* filepath2progress) { + ReadLockGuard lock(inode2ProgressMutex_); + + for (auto fileProgressInfoIt = inode2Progress_.begin(); + fileProgressInfoIt != inode2Progress_.end(); + ++fileProgressInfoIt) { + filepath2progress->emplace( + fileProgressInfoIt->second.GetFilePathInClient(), + WarmupProgress(fileProgressInfoIt->second)); + } + + return !inode2Progress_.empty(); + } + + void CollectMetrics(InterfaceMetric* interface, int count, uint64_t start); + protected: /** * @brief Add warmupProcess @@ -240,10 +285,23 @@ class WarmupManager { * @return true * @return false warmupProcess has been added */ - virtual bool AddWarmupProcess(fuse_ino_t key, WarmupStorageType type) { + virtual bool AddWarmupProcess(fuse_ino_t key, const std::string& path, + WarmupStorageType type) { + WriteLockGuard lock(inode2ProgressMutex_); + auto retPg = inode2Progress_.emplace(key, WarmupProgress(type, path)); + return retPg.second; + } + + virtual bool CancelWarmupProcess(fuse_ino_t key) { WriteLockGuard lock(inode2ProgressMutex_); - auto ret = inode2Progress_.emplace(key, WarmupProgress(type)); - return ret.second; + + bool keyExists = inode2Progress_.find(key) != inode2Progress_.end(); + + if (keyExists) { + inode2Progress_.erase(key); + } + + return keyExists; } /** @@ -285,6 +343,7 @@ class WarmupManager { // warmup progress std::unordered_map inode2Progress_; + BthreadRWLock inode2ProgressMutex_; std::shared_ptr kvClientManager_ = nullptr; @@ -308,10 +367,16 @@ class WarmupManagerS3Impl : public WarmupManager { std::move(kvClientManager)), s3Adaptor_(std::move(s3Adaptor)) {} - bool AddWarmupFilelist(fuse_ino_t key, WarmupStorageType type) override; + bool AddWarmupFilelist(fuse_ino_t key, WarmupStorageType type, + const std::string& path, + const std::string& mount_point, + const std::string& root) override; bool AddWarmupFile(fuse_ino_t key, const std::string& path, WarmupStorageType type) override; + bool CancelWarmupFileOrFilelist(fuse_ino_t key) override; + bool CancelWarmupDependentQueue(fuse_ino_t key) override; + void Init(const FuseClientOption& option) override; void UnInit() override; @@ -321,6 +386,9 @@ class WarmupManagerS3Impl : public WarmupManager { void GetWarmupList(const WarmupFilelist& filelist, std::vector* list); + void AlignFilelistPathsToCurveFs(const WarmupFilelist& filelist, + std::vector* list); + void FetchDentryEnqueue(fuse_ino_t key, const std::string& file); void LookPath(fuse_ino_t key, std::string file); diff --git a/curvefs/src/client/xattr_manager.cpp b/curvefs/src/client/xattr_manager.cpp index 4f9c3bf928..61500e38eb 100644 --- a/curvefs/src/client/xattr_manager.cpp +++ b/curvefs/src/client/xattr_manager.cpp @@ -46,8 +46,7 @@ bool IsOneLayer(const char *name) { return false; } -CURVEFS_ERROR XattrManager::CalOneLayerSumInfo(InodeAttr *attr) { - std::stack iStack; +CURVEFS_ERROR XattrManager::CalOneLayerSumInfo(InodeAttr* attr) { // use set can deal with hard link std::set inodeIds; std::list attrs; diff --git a/curvefs/src/mds/fs_manager.cpp b/curvefs/src/mds/fs_manager.cpp index b5d67b2bf3..3af5b118d3 100644 --- a/curvefs/src/mds/fs_manager.cpp +++ b/curvefs/src/mds/fs_manager.cpp @@ -257,6 +257,19 @@ bool FsManager::CheckFsName(const std::string& fsName) { return true; } +bool FsManager::TestS3(const std::string& fsName) { + const Aws::String aws_key(fsName.c_str(), fsName.size()); + std::string value(1024, 'a'); + if (0 != s3Adapter_->PutObject(aws_key, value)) { + return false; + } else { + if (0 != s3Adapter_->DeleteObject(aws_key)) { + return false; + } + } + return true; +} + FSStatusCode FsManager::CreateFs(const ::curvefs::mds::CreateFsRequest* request, FsInfo* fsInfo) { const auto& fsName = request->fsname(); @@ -308,7 +321,7 @@ FSStatusCode FsManager::CreateFs(const ::curvefs::mds::CreateFsRequest* request, option_.s3AdapterOption.s3Address = s3Info.endpoint(); option_.s3AdapterOption.bucketName = s3Info.bucketname(); s3Adapter_->Reinit(option_.s3AdapterOption); - if (!s3Adapter_->BucketExist()) { + if (!TestS3(fsName)) { LOG(ERROR) << "CreateFs " << fsName << " error, s3info is not available!"; return FSStatusCode::S3_INFO_ERROR; diff --git a/curvefs/src/mds/fs_manager.h b/curvefs/src/mds/fs_manager.h index 5b20aec6c2..2183786017 100644 --- a/curvefs/src/mds/fs_manager.h +++ b/curvefs/src/mds/fs_manager.h @@ -248,6 +248,8 @@ class FsManager { bool FillVolumeInfo(common::Volume* volume); + bool TestS3(const std::string& fsName); + private: std::shared_ptr fsStorage_; std::shared_ptr spaceManager_; diff --git a/curvefs/src/mds/schedule/scheduler.cpp b/curvefs/src/mds/schedule/scheduler.cpp index ef2aca1a97..b2f3e3c387 100644 --- a/curvefs/src/mds/schedule/scheduler.cpp +++ b/curvefs/src/mds/schedule/scheduler.cpp @@ -79,7 +79,7 @@ MetaServerIdType Scheduler::SelectBestPlacementMetaServer( } uint16_t standardZoneNum = topo_->GetStandardZoneNumInPool(poolId); - if (standardZoneNum <= 0) { + if (standardZoneNum == 0) { LOG(ERROR) << "topoAdapter find pool " << poolId << " standard zone num: " << standardZoneNum << " invalid"; return UNINITIALIZE_ID; diff --git a/curvefs/src/mds/topology/topology.cpp b/curvefs/src/mds/topology/topology.cpp index 334845580f..a929336918 100644 --- a/curvefs/src/mds/topology/topology.cpp +++ b/curvefs/src/mds/topology/topology.cpp @@ -532,15 +532,19 @@ bool TopologyImpl::GetMetaServer(MetaServerIdType metaserverId, bool TopologyImpl::GetMetaServer(const std::string &hostIp, uint32_t port, MetaServer *out) const { ReadLockGuard rlockMetaServerMap(metaServerMutex_); + bool find = false; for (auto it = metaServerMap_.begin(); it != metaServerMap_.end(); it++) { ReadLockGuard rlockMetaServer(it->second.GetRWLockRef()); if (it->second.GetInternalIp() == hostIp && it->second.GetInternalPort() == port) { *out = it->second; - return true; + find = true; + if (it->second.GetOnlineState() == OnlineState::ONLINE) { + return find; + } } } - return false; + return find; } TopoStatusCode TopologyImpl::AddPartition(const Partition &data) { @@ -565,6 +569,8 @@ TopoStatusCode TopologyImpl::AddPartition(const Partition &data) { // update fs partition number clusterInfo_.AddPartitionIndexOfFs(data.GetFsId()); + clusterInfo_.UpdateFsNextInodeId(data.GetFsId(), + data.GetIdEnd()); if (!storage_->StorageClusterInfo(clusterInfo_)) { LOG(ERROR) << "AddPartitionIndexOfFs failed, fsId = " << data.GetFsId(); @@ -1044,6 +1050,12 @@ TopoStatusCode TopologyImpl::Init(const TopologyOption &option) { } idGenerator_->initPartitionIdGenerator(maxPartitionId); + // update fs next inodeId + for (const auto& p : partitionMap_) { + clusterInfo_.UpdateFsNextInodeId(p.second.GetFsId(), + p.second.GetIdEnd()); + } + // MemcacheCluster MemcacheClusterIdType maxMemcacheClusterId; if (!storage_->LoadMemcacheCluster(&memcacheClusterMap_, @@ -1626,8 +1638,14 @@ uint32_t TopologyImpl::GetPartitionIndexOfFS(FsIdType fsId) { return clusterInfo_.GetPartitionIndexOfFS(fsId); } +uint64_t TopologyImpl::GetFsNextInodeId(FsIdType fsId) { + ReadLockGuard rlock(clusterMutex_); + return clusterInfo_.GetFsNextInodeId(fsId); +} + std::vector TopologyImpl::ListCopysetInfo() const { std::vector ret; + ReadLockGuard rlockCopySet(copySetMutex_); for (auto const &i : copySetMap_) { ret.emplace_back(i.second); } diff --git a/curvefs/src/mds/topology/topology.h b/curvefs/src/mds/topology/topology.h index 76402229bf..6078564507 100644 --- a/curvefs/src/mds/topology/topology.h +++ b/curvefs/src/mds/topology/topology.h @@ -23,6 +23,7 @@ #define CURVEFS_SRC_MDS_TOPOLOGY_TOPOLOGY_H_ #include +#include #include #include #include @@ -280,6 +281,8 @@ class Topology { virtual uint32_t GetPartitionIndexOfFS(FsIdType fsId) = 0; + virtual uint64_t GetFsNextInodeId(FsIdType fsId) = 0; + virtual std::vector ListCopysetInfo() const = 0; virtual void GetMetaServersSpace( @@ -525,7 +528,9 @@ class TopologyImpl : public Topology { TopoStatusCode GenCopysetAddrBatchForPool(PoolIdType poolId, uint16_t replicaNum, std::list* copysetList) override; - uint32_t GetPartitionIndexOfFS(FsIdType fsId); + uint32_t GetPartitionIndexOfFS(FsIdType fsId) override; + + uint64_t GetFsNextInodeId(FsIdType fsId) override; TopoStatusCode GetPoolIdByMetaserverId(MetaServerIdType id, PoolIdType *poolIdOut); diff --git a/curvefs/src/mds/topology/topology_item.h b/curvefs/src/mds/topology/topology_item.h index 4d6b547c49..80099511d5 100644 --- a/curvefs/src/mds/topology/topology_item.h +++ b/curvefs/src/mds/topology/topology_item.h @@ -55,6 +55,8 @@ struct ClusterInformation { std::string clusterId; // std::map partitionIndexs; + // + std::unordered_map fsNextInodeId; ClusterInformation() = default; explicit ClusterInformation(const std::string &clusterId) @@ -76,6 +78,14 @@ struct ClusterInformation { partitionIndexs[fsId]++; } + void UpdateFsNextInodeId(uint32_t fsId, uint64_t inodeId) { + fsNextInodeId[fsId] = std::max(fsNextInodeId[fsId], inodeId); + } + + uint64_t GetFsNextInodeId(uint32_t fsId) { + return fsNextInodeId[fsId] == 0 ? 0 : fsNextInodeId[fsId] + 1; + } + bool SerializeToString(std::string *value) const; bool ParseFromString(const std::string &value); diff --git a/curvefs/src/mds/topology/topology_manager.cpp b/curvefs/src/mds/topology/topology_manager.cpp index c546857bbc..0b50a15c2d 100644 --- a/curvefs/src/mds/topology/topology_manager.cpp +++ b/curvefs/src/mds/topology/topology_manager.cpp @@ -649,9 +649,8 @@ TopoStatusCode TopologyManager::CreatePartitionOnCopyset( } // calculate inodeId start and end of partition - uint32_t index = topology_->GetPartitionIndexOfFS(fsId); - uint64_t idStart = index * option_.idNumberInPartition; - uint64_t idEnd = (index + 1) * option_.idNumberInPartition - 1; + uint64_t idStart = topology_->GetFsNextInodeId(fsId); + uint64_t idEnd = idStart + option_.idNumberInPartition - 1; PartitionIdType partitionId = topology_->AllocatePartitionId(); if (partitionId == static_cast(UNINITIALIZE_ID)) { diff --git a/curvefs/src/metaserver/copyset/copyset_node.cpp b/curvefs/src/metaserver/copyset/copyset_node.cpp index 4b11c268b9..449886204f 100644 --- a/curvefs/src/metaserver/copyset/copyset_node.cpp +++ b/curvefs/src/metaserver/copyset/copyset_node.cpp @@ -92,7 +92,9 @@ CopysetNode::CopysetNode(PoolId poolId, CopysetId copysetId, confChangeMtx_(), ongoingConfChange_(), metric_(absl::make_unique(poolId_, copysetId_)), - isLoading_(false) {} + isLoading_(false), + snapshotLock_(), + snapshotTask_() {} CopysetNode::~CopysetNode() { Stop(); @@ -188,6 +190,8 @@ void CopysetNode::Stop() { LOG_IF(ERROR, metaStore_->Destroy() != true) << "Failed to clear metastore, copyset: " << name_; } + // wait for snapshot + WaitSnapshotDone(); } int CopysetNode::LoadConfEpoch(const std::string& file) { @@ -305,7 +309,7 @@ void CopysetNode::on_apply(braft::Iterator& iter) { auto type = metaOperator->GetOperatorType(); auto task = std::bind(&MetaOperator::OnApplyFromLog, metaOperator.release(), - TimeUtility::GetTimeofDayUs()); + iter.index(), TimeUtility::GetTimeofDayUs()); applyQueue_->Push(hashcode, type, std::move(task)); timer.stop(); g_concurrent_apply_from_log_wait_latency << timer.u_elapsed(); @@ -361,6 +365,42 @@ class OnSnapshotSaveDoneClosureImpl : public OnSnapshotSaveDoneClosure { } // namespace +void CopysetNode::DoSnapshot(OnSnapshotSaveDoneClosure* done) { + // NOTE: save metadata cannot be asynchronous + // we need maintain the consistency with + // raft snapshot metadata + std::vector files; + brpc::ClosureGuard doneGuard(done); + auto* writer = done->GetSnapshotWriter(); + if (!metaStore_->SaveMeta(writer->get_path(), &files)) { + done->SetError(MetaStatusCode::SAVE_META_FAIL); + LOG(ERROR) << "Save meta store metadata failed"; + return; + } + // asynchronous save data + { + std::lock_guard lock(snapshotLock_); + snapshotTask_ = + std::async(std::launch::async, [files, done, this]() mutable { + brpc::ClosureGuard doneGuard(done); + auto* writer = done->GetSnapshotWriter(); + // save data files + if (!metaStore_->SaveData(writer->get_path(), &files)) { + done->SetError(MetaStatusCode::SAVE_META_FAIL); + LOG(ERROR) << "Save meta store data failed"; + return; + } + // add files to snapshot writer + // file is a relative path under the given directory + for (const auto& f : files) { + writer->add_file(f); + } + done->SetSuccess(); + }); + } + doneGuard.release(); +} + void CopysetNode::on_snapshot_save(braft::SnapshotWriter* writer, braft::Closure* done) { LOG(INFO) << "Copyset " << name_ << " saving snapshot to '" @@ -389,19 +429,21 @@ void CopysetNode::on_snapshot_save(braft::SnapshotWriter* writer, writer->add_file(kConfEpochFilename); - // TODO(wuhanqing): MetaStore::Save will start a thread and do task - // asynchronously, after task completed it will call - // OnSnapshotSaveDoneImpl::Run - // BUT, this manner is not so clear, maybe it better to make thing - // asynchronous directly in here - metaStore_->Save(writer->get_path(), new OnSnapshotSaveDoneClosureImpl( - this, writer, done, metricCtx)); + DoSnapshot( + new OnSnapshotSaveDoneClosureImpl(this, writer, done, metricCtx)); doneGuard.release(); // `Cancel` only available for rvalue std::move(cleanMetricIfFailed).Cancel(); } +void CopysetNode::WaitSnapshotDone() { + std::lock_guard lock(snapshotLock_); + if (snapshotTask_.valid()) { + snapshotTask_.wait(); + } +} + namespace { class CopysetLoadingGuard { @@ -462,7 +504,7 @@ int CopysetNode::on_snapshot_load(braft::SnapshotReader* reader) { void CopysetNode::on_leader_start(int64_t term) { /* - * Invoke order in on_leader_start: + * Invoke order in on_leader_start: * 1. flush concurrent apply queue. * 2. set term in states machine. * diff --git a/curvefs/src/metaserver/copyset/copyset_node.h b/curvefs/src/metaserver/copyset/copyset_node.h index ef6c09b655..b1a6f14760 100644 --- a/curvefs/src/metaserver/copyset/copyset_node.h +++ b/curvefs/src/metaserver/copyset/copyset_node.h @@ -26,12 +26,14 @@ #include #include +#include #include +#include #include #include #include -#include +#include "curvefs/proto/heartbeat.pb.h" #include "curvefs/src/metaserver/common/types.h" #include "curvefs/src/metaserver/copyset/concurrent_apply_queue.h" #include "curvefs/src/metaserver/copyset/conf_epoch_file.h" @@ -40,7 +42,6 @@ #include "curvefs/src/metaserver/copyset/metric.h" #include "curvefs/src/metaserver/copyset/raft_node.h" #include "curvefs/src/metaserver/metastore.h" -#include "curvefs/proto/heartbeat.pb.h" namespace curvefs { namespace metaserver { @@ -220,6 +221,13 @@ class CopysetNode : public braft::StateMachine { FRIEND_TEST(CopysetNodeBlockGroupTest, Test_AggregateBlockStatInfo); + private: + // for snapshot + + void WaitSnapshotDone(); + + void DoSnapshot(OnSnapshotSaveDoneClosure* done); + private: const PoolId poolId_; const CopysetId copysetId_; @@ -267,6 +275,10 @@ class CopysetNode : public braft::StateMachine { std::unique_ptr metric_; std::atomic isLoading_; + + mutable std::mutex snapshotLock_; + + std::future snapshotTask_; }; inline void CopysetNode::Propose(const braft::Task& task) { diff --git a/curvefs/src/metaserver/copyset/meta_operator.cpp b/curvefs/src/metaserver/copyset/meta_operator.cpp index 82fdf802bc..456fd9c361 100644 --- a/curvefs/src/metaserver/copyset/meta_operator.cpp +++ b/curvefs/src/metaserver/copyset/meta_operator.cpp @@ -123,16 +123,14 @@ void MetaOperator::FastApplyTask() { auto task = std::bind(&MetaOperator::OnApply, this, node_->GetAppliedIndex(), new MetaOperatorClosure(this), TimeUtility::GetTimeofDayUs()); - node_->GetApplyQueue()->Push(HashCode(), - GetOperatorType(), std::move(task)); + node_->GetApplyQueue()->Push(HashCode(), GetOperatorType(), + std::move(task)); timer.stop(); g_concurrent_fast_apply_wait_latency << timer.u_elapsed(); } -#define OPERATOR_CAN_BY_PASS_PROPOSE(TYPE) \ - bool TYPE##Operator::CanBypassPropose() const { \ - return true; \ - } \ +#define OPERATOR_CAN_BY_PASS_PROPOSE(TYPE) \ + bool TYPE##Operator::CanBypassPropose() const { return true; } // below operator are readonly, so can enable lease read OPERATOR_CAN_BY_PASS_PROPOSE(GetDentry); @@ -144,31 +142,31 @@ OPERATOR_CAN_BY_PASS_PROPOSE(GetVolumeExtent); #undef OPERATOR_CAN_BY_PASS_PROPOSE -#define OPERATOR_ON_APPLY(TYPE) \ - void TYPE##Operator::OnApply(int64_t index, \ - google::protobuf::Closure *done, \ - uint64_t startTimeUs) { \ - brpc::ClosureGuard doneGuard(done); \ - uint64_t timeUs = TimeUtility::GetTimeofDayUs(); \ - node_->GetMetric()->WaitInQueueLatency(OperatorType::TYPE, \ - timeUs - startTimeUs); \ - auto status = node_->GetMetaStore()->TYPE( \ - static_cast(request_), \ - static_cast(response_)); \ - uint64_t executeTime = TimeUtility::GetTimeofDayUs() - timeUs; \ - node_->GetMetric()->ExecuteLatency(OperatorType::TYPE, executeTime); \ - if (status == MetaStatusCode::OK) { \ - node_->UpdateAppliedIndex(index); \ - static_cast(response_)->set_appliedindex( \ - std::max(index, node_->GetAppliedIndex())); \ - node_->GetMetric()->OnOperatorComplete( \ - OperatorType::TYPE, \ - TimeUtility::GetTimeofDayUs() - startTimeUs, true); \ - } else { \ - node_->GetMetric()->OnOperatorComplete( \ - OperatorType::TYPE, \ - TimeUtility::GetTimeofDayUs() - startTimeUs, false); \ - } \ +#define OPERATOR_ON_APPLY(TYPE) \ + void TYPE##Operator::OnApply(int64_t index, \ + google::protobuf::Closure* done, \ + uint64_t startTimeUs) { \ + brpc::ClosureGuard doneGuard(done); \ + uint64_t timeUs = TimeUtility::GetTimeofDayUs(); \ + node_->GetMetric()->WaitInQueueLatency(OperatorType::TYPE, \ + timeUs - startTimeUs); \ + auto status = node_->GetMetaStore()->TYPE( \ + static_cast(request_), \ + static_cast(response_), index); \ + uint64_t executeTime = TimeUtility::GetTimeofDayUs() - timeUs; \ + node_->GetMetric()->ExecuteLatency(OperatorType::TYPE, executeTime); \ + if (status == MetaStatusCode::OK) { \ + node_->UpdateAppliedIndex(index); \ + static_cast(response_)->set_appliedindex( \ + std::max(index, node_->GetAppliedIndex())); \ + node_->GetMetric()->OnOperatorComplete( \ + OperatorType::TYPE, \ + TimeUtility::GetTimeofDayUs() - startTimeUs, true); \ + } else { \ + node_->GetMetric()->OnOperatorComplete( \ + OperatorType::TYPE, \ + TimeUtility::GetTimeofDayUs() - startTimeUs, false); \ + } \ } OPERATOR_ON_APPLY(GetDentry); @@ -208,7 +206,8 @@ void GetOrModifyS3ChunkInfoOperator::OnApply(int64_t index, { brpc::ClosureGuard doneGuard(done); - rc = metastore->GetOrModifyS3ChunkInfo(request, response, &iterator); + rc = metastore->GetOrModifyS3ChunkInfo(request, response, &iterator, + index); if (rc == MetaStatusCode::OK) { node_->UpdateAppliedIndex(index); response->set_appliedindex( @@ -251,7 +250,7 @@ void GetVolumeExtentOperator::OnApply(int64_t index, auto *response = static_cast(response_); auto *metaStore = node_->GetMetaStore(); - auto st = metaStore->GetVolumeExtent(request, response); + auto st = metaStore->GetVolumeExtent(request, response, index); node_->GetMetric()->OnOperatorComplete( OperatorType::GetVolumeExtent, TimeUtility::GetTimeofDayUs() - startTimeUs, st == MetaStatusCode::OK); @@ -292,11 +291,11 @@ void GetVolumeExtentOperator::OnApply(int64_t index, } #define OPERATOR_ON_APPLY_FROM_LOG(TYPE) \ - void TYPE##Operator::OnApplyFromLog(uint64_t startTimeUs) { \ + void TYPE##Operator::OnApplyFromLog(int64_t index, uint64_t startTimeUs) { \ std::unique_ptr selfGuard(this); \ TYPE##Response response; \ auto status = node_->GetMetaStore()->TYPE( \ - static_cast(request_), &response); \ + static_cast(request_), &response, index); \ node_->GetMetric()->OnOperatorCompleteFromLog( \ OperatorType::TYPE, TimeUtility::GetTimeofDayUs() - startTimeUs, \ status == MetaStatusCode::OK); \ @@ -317,7 +316,8 @@ OPERATOR_ON_APPLY_FROM_LOG(UpdateDeallocatableBlockGroup); #undef OPERATOR_ON_APPLY_FROM_LOG -void GetOrModifyS3ChunkInfoOperator::OnApplyFromLog(uint64_t startTimeUs) { +void GetOrModifyS3ChunkInfoOperator::OnApplyFromLog(int64_t index, + uint64_t startTimeUs) { std::unique_ptr selfGuard(this); GetOrModifyS3ChunkInfoRequest request; GetOrModifyS3ChunkInfoResponse response; @@ -325,7 +325,7 @@ void GetOrModifyS3ChunkInfoOperator::OnApplyFromLog(uint64_t startTimeUs) { request = *static_cast(request_); request.set_returns3chunkinfomap(false); auto status = node_->GetMetaStore()->GetOrModifyS3ChunkInfo( - &request, &response, &iterator); + &request, &response, &iterator, index); node_->GetMetric()->OnOperatorCompleteFromLog( OperatorType::GetOrModifyS3ChunkInfo, TimeUtility::GetTimeofDayUs() - startTimeUs, @@ -333,8 +333,9 @@ void GetOrModifyS3ChunkInfoOperator::OnApplyFromLog(uint64_t startTimeUs) { } #define READONLY_OPERATOR_ON_APPLY_FROM_LOG(TYPE) \ - void TYPE##Operator::OnApplyFromLog(uint64_t startTimeUs) { \ + void TYPE##Operator::OnApplyFromLog(int64_t index, uint64_t startTimeUs) { \ (void)startTimeUs; \ + (void)index; \ std::unique_ptr selfGuard(this); \ } diff --git a/curvefs/src/metaserver/copyset/meta_operator.h b/curvefs/src/metaserver/copyset/meta_operator.h index dca819f9e8..b78aa3e78f 100644 --- a/curvefs/src/metaserver/copyset/meta_operator.h +++ b/curvefs/src/metaserver/copyset/meta_operator.h @@ -71,16 +71,14 @@ class MetaOperator { /** * @brief Return internal closure */ - google::protobuf::Closure* Closure() const { - return done_; - } + google::protobuf::Closure* Closure() const { return done_; } void RedirectRequest(); virtual void OnApply(int64_t index, google::protobuf::Closure* done, uint64_t startTimeUs) = 0; - virtual void OnApplyFromLog(uint64_t startTimeUs) = 0; + virtual void OnApplyFromLog(int64_t index, uint64_t startTimeUs) = 0; // Get hash code of current operator which is used to push current operator // task to apply queue, and apply queue will guarantee that operators with @@ -109,9 +107,7 @@ class MetaOperator { /** * @brief Check whether current copyset node is leader */ - bool IsLeaderTerm() const { - return node_->IsLeaderTerm(); - } + bool IsLeaderTerm() const { return node_->IsLeaderTerm(); } /** * @brief Propose current operator to braft::Task @@ -136,9 +132,7 @@ class MetaOperator { * return true if operator is readonly and request carry with * an valid appliedindex */ - virtual bool CanBypassPropose() const { - return false; - } + virtual bool CanBypassPropose() const { return false; } protected: CopysetNode* node_; @@ -169,7 +163,7 @@ class GetDentryOperator : public MetaOperator { void OnApply(int64_t index, google::protobuf::Closure* done, uint64_t startTimeUs) override; - void OnApplyFromLog(uint64_t startTimeUs) override; + void OnApplyFromLog(int64_t index, uint64_t startTimeUs) override; uint64_t HashCode() const override; @@ -190,7 +184,7 @@ class ListDentryOperator : public MetaOperator { void OnApply(int64_t index, google::protobuf::Closure* done, uint64_t startTimeUs) override; - void OnApplyFromLog(uint64_t startTimeUs) override; + void OnApplyFromLog(int64_t index, uint64_t startTimeUs) override; uint64_t HashCode() const override; @@ -211,7 +205,7 @@ class CreateDentryOperator : public MetaOperator { void OnApply(int64_t index, google::protobuf::Closure* done, uint64_t startTimeUs) override; - void OnApplyFromLog(uint64_t startTimeUs) override; + void OnApplyFromLog(int64_t index, uint64_t startTimeUs) override; uint64_t HashCode() const override; @@ -230,7 +224,7 @@ class DeleteDentryOperator : public MetaOperator { void OnApply(int64_t index, google::protobuf::Closure* done, uint64_t startTimeUs) override; - void OnApplyFromLog(uint64_t startTimeUs) override; + void OnApplyFromLog(int64_t index, uint64_t startTimeUs) override; uint64_t HashCode() const override; @@ -249,7 +243,7 @@ class GetInodeOperator : public MetaOperator { void OnApply(int64_t index, google::protobuf::Closure* done, uint64_t startTimeUs) override; - void OnApplyFromLog(uint64_t startTimeUs) override; + void OnApplyFromLog(int64_t index, uint64_t startTimeUs) override; uint64_t HashCode() const override; @@ -270,7 +264,7 @@ class BatchGetInodeAttrOperator : public MetaOperator { void OnApply(int64_t index, google::protobuf::Closure* done, uint64_t startTimeUs) override; - void OnApplyFromLog(uint64_t startTimeUs) override; + void OnApplyFromLog(int64_t index, uint64_t startTimeUs) override; uint64_t HashCode() const override; @@ -291,7 +285,7 @@ class BatchGetXAttrOperator : public MetaOperator { void OnApply(int64_t index, google::protobuf::Closure* done, uint64_t startTimeUs) override; - void OnApplyFromLog(uint64_t startTimeUs) override; + void OnApplyFromLog(int64_t index, uint64_t startTimeUs) override; uint64_t HashCode() const override; @@ -312,7 +306,7 @@ class CreateInodeOperator : public MetaOperator { void OnApply(int64_t index, google::protobuf::Closure* done, uint64_t startTimeUs) override; - void OnApplyFromLog(uint64_t startTimeUs) override; + void OnApplyFromLog(int64_t index, uint64_t startTimeUs) override; uint64_t HashCode() const override; @@ -331,7 +325,7 @@ class UpdateInodeOperator : public MetaOperator { void OnApply(int64_t index, google::protobuf::Closure* done, uint64_t startTimeUs) override; - void OnApplyFromLog(uint64_t startTimeUs) override; + void OnApplyFromLog(int64_t index, uint64_t startTimeUs) override; uint64_t HashCode() const override; @@ -345,12 +339,12 @@ class UpdateInodeOperator : public MetaOperator { class GetOrModifyS3ChunkInfoOperator : public MetaOperator { public: - using MetaOperator::MetaOperator; + using MetaOperator::MetaOperator; void OnApply(int64_t index, google::protobuf::Closure* done, uint64_t startTimeUs) override; - void OnApplyFromLog(uint64_t startTimeUs) override; + void OnApplyFromLog(int64_t index, uint64_t startTimeUs) override; uint64_t HashCode() const override; @@ -369,7 +363,7 @@ class DeleteInodeOperator : public MetaOperator { void OnApply(int64_t index, google::protobuf::Closure* done, uint64_t startTimeUs) override; - void OnApplyFromLog(uint64_t startTimeUs) override; + void OnApplyFromLog(int64_t index, uint64_t startTimeUs) override; uint64_t HashCode() const override; @@ -388,7 +382,7 @@ class CreateRootInodeOperator : public MetaOperator { void OnApply(int64_t index, google::protobuf::Closure* done, uint64_t startTimeUs) override; - void OnApplyFromLog(uint64_t startTimeUs) override; + void OnApplyFromLog(int64_t index, uint64_t startTimeUs) override; uint64_t HashCode() const override; @@ -407,7 +401,7 @@ class CreateManageInodeOperator : public MetaOperator { void OnApply(int64_t index, google::protobuf::Closure* done, uint64_t startTimeUs) override; - void OnApplyFromLog(uint64_t startTimeUs) override; + void OnApplyFromLog(int64_t index, uint64_t startTimeUs) override; uint64_t HashCode() const override; @@ -426,7 +420,7 @@ class UpdateInodeS3VersionOperator : public MetaOperator { void OnApply(int64_t index, google::protobuf::Closure* done, uint64_t startTimeUs) override; - void OnApplyFromLog(uint64_t startTimeUs) override; + void OnApplyFromLog(int64_t index, uint64_t startTimeUs) override; uint64_t HashCode() const override; @@ -445,7 +439,7 @@ class CreatePartitionOperator : public MetaOperator { void OnApply(int64_t index, google::protobuf::Closure* done, uint64_t startTimeUs) override; - void OnApplyFromLog(uint64_t startTimeUs) override; + void OnApplyFromLog(int64_t index, uint64_t startTimeUs) override; uint64_t HashCode() const override; @@ -464,7 +458,7 @@ class DeletePartitionOperator : public MetaOperator { void OnApply(int64_t index, google::protobuf::Closure* done, uint64_t startTimeUs) override; - void OnApplyFromLog(uint64_t startTimeUs) override; + void OnApplyFromLog(int64_t index, uint64_t startTimeUs) override; uint64_t HashCode() const override; @@ -483,7 +477,7 @@ class PrepareRenameTxOperator : public MetaOperator { void OnApply(int64_t index, google::protobuf::Closure* done, uint64_t startTimeUs) override; - void OnApplyFromLog(uint64_t startTimeUs) override; + void OnApplyFromLog(int64_t index, uint64_t startTimeUs) override; uint64_t HashCode() const override; @@ -499,11 +493,10 @@ class GetVolumeExtentOperator : public MetaOperator { public: using MetaOperator::MetaOperator; - void OnApply(int64_t index, - google::protobuf::Closure* done, + void OnApply(int64_t index, google::protobuf::Closure* done, uint64_t startTimeUs) override; - void OnApplyFromLog(uint64_t startTimeUs) override; + void OnApplyFromLog(int64_t index, uint64_t startTimeUs) override; uint64_t HashCode() const override; @@ -521,11 +514,10 @@ class UpdateVolumeExtentOperator : public MetaOperator { public: using MetaOperator::MetaOperator; - void OnApply(int64_t index, - google::protobuf::Closure* done, + void OnApply(int64_t index, google::protobuf::Closure* done, uint64_t startTimeUs) override; - void OnApplyFromLog(uint64_t startTimeUs) override; + void OnApplyFromLog(int64_t index, uint64_t startTimeUs) override; uint64_t HashCode() const override; @@ -541,10 +533,10 @@ class UpdateDeallocatableBlockGroupOperator : public MetaOperator { public: using MetaOperator::MetaOperator; - void OnApply(int64_t index, google::protobuf::Closure *done, + void OnApply(int64_t index, google::protobuf::Closure* done, uint64_t startTimeUs) override; - void OnApplyFromLog(uint64_t startTimeUs) override; + void OnApplyFromLog(int64_t index, uint64_t startTimeUs) override; uint64_t HashCode() const override; diff --git a/curvefs/src/metaserver/dentry_manager.cpp b/curvefs/src/metaserver/dentry_manager.cpp index 5a36887665..46b0102369 100644 --- a/curvefs/src/metaserver/dentry_manager.cpp +++ b/curvefs/src/metaserver/dentry_manager.cpp @@ -26,25 +26,44 @@ #include "curvefs/src/metaserver/dentry_manager.h" +#define CHECK_APPLIED() \ + do { \ + if (logIndex <= appliedIndex_) { \ + VLOG(3) << __func__ \ + << "Log entry already be applied, index = " << logIndex \ + << "applied index = " << appliedIndex_; \ + return MetaStatusCode::IDEMPOTENCE_OK; \ + } \ + } while (false) + namespace curvefs { namespace metaserver { DentryManager::DentryManager(std::shared_ptr dentryStorage, std::shared_ptr txManager) - : dentryStorage_(dentryStorage), - txManager_(txManager) {} + : dentryStorage_(dentryStorage), txManager_(txManager), appliedIndex_(-1) { + // for compatibility, we initialize applied index to -1 +} + +bool DentryManager::Init() { + if (dentryStorage_->Init()) { + auto s = dentryStorage_->GetAppliedIndex(&appliedIndex_); + return s == MetaStatusCode::OK || s == MetaStatusCode::NOT_FOUND; + } + return false; +} void DentryManager::Log4Dentry(const std::string& request, const Dentry& dentry) { - VLOG(9) << "Receive " << request << " request, dentry = (" + VLOG(9) << "Receive " << request << " request, dentry = (" << dentry.ShortDebugString() << ")"; } void DentryManager::Log4Code(const std::string& request, MetaStatusCode rc) { - auto succ = (rc == MetaStatusCode::OK || - rc == MetaStatusCode::IDEMPOTENCE_OK || - (rc == MetaStatusCode::NOT_FOUND && - (request == "ListDentry" || request == "GetDentry"))); + auto succ = + (rc == MetaStatusCode::OK || rc == MetaStatusCode::IDEMPOTENCE_OK || + (rc == MetaStatusCode::NOT_FOUND && + (request == "ListDentry" || request == "GetDentry"))); std::ostringstream message; message << request << " " << (succ ? "success" : "fail") << ", retCode = " << MetaStatusCode_Name(rc); @@ -56,25 +75,30 @@ void DentryManager::Log4Code(const std::string& request, MetaStatusCode rc) { } } -MetaStatusCode DentryManager::CreateDentry(const Dentry& dentry) { +MetaStatusCode DentryManager::CreateDentry(const Dentry& dentry, + int64_t logIndex) { + CHECK_APPLIED(); Log4Dentry("CreateDentry", dentry); - MetaStatusCode rc = dentryStorage_->Insert(dentry); + MetaStatusCode rc = dentryStorage_->Insert(dentry, logIndex); Log4Code("CreateDentry", rc); return rc; } -MetaStatusCode DentryManager::CreateDentry(const DentryVec& vec, - bool merge) { +MetaStatusCode DentryManager::CreateDentry(const DentryVec& vec, bool merge, + int64_t logIndex) { + CHECK_APPLIED(); VLOG(9) << "Receive CreateDentryVec request, dentryVec = (" << vec.ShortDebugString() << ")"; - MetaStatusCode rc = dentryStorage_->Insert(vec, merge); + MetaStatusCode rc = dentryStorage_->Insert(vec, merge, logIndex); Log4Code("CreateDentryVec", rc); return rc; } -MetaStatusCode DentryManager::DeleteDentry(const Dentry& dentry) { +MetaStatusCode DentryManager::DeleteDentry(const Dentry& dentry, + int64_t logIndex) { + CHECK_APPLIED(); Log4Dentry("DeleteDentry", dentry); - MetaStatusCode rc = dentryStorage_->Delete(dentry); + MetaStatusCode rc = dentryStorage_->Delete(dentry, logIndex); Log4Code("DeleteDentry", rc); return rc; } @@ -88,8 +112,7 @@ MetaStatusCode DentryManager::GetDentry(Dentry* dentry) { MetaStatusCode DentryManager::ListDentry(const Dentry& dentry, std::vector* dentrys, - uint32_t limit, - bool onlyDir) { + uint32_t limit, bool onlyDir) { Log4Dentry("ListDentry", dentry); MetaStatusCode rc = dentryStorage_->List(dentry, dentrys, limit, onlyDir); Log4Code("ListDentry", rc); @@ -101,12 +124,12 @@ void DentryManager::ClearDentry() { LOG(INFO) << "ClearDentry ok"; } -MetaStatusCode DentryManager::HandleRenameTx( - const std::vector& dentrys) { +MetaStatusCode DentryManager::HandleRenameTx(const std::vector& dentrys, + int64_t logIndex) { for (const auto& dentry : dentrys) { Log4Dentry("HandleRenameTx", dentry); } - auto rc = txManager_->HandleRenameTx(dentrys); + auto rc = txManager_->HandleRenameTx(dentrys, logIndex); Log4Code("HandleRenameTx", rc); return rc; } diff --git a/curvefs/src/metaserver/dentry_manager.h b/curvefs/src/metaserver/dentry_manager.h index 670bb59db0..ef5ca8305c 100644 --- a/curvefs/src/metaserver/dentry_manager.h +++ b/curvefs/src/metaserver/dentry_manager.h @@ -40,23 +40,26 @@ class DentryManager { DentryManager(std::shared_ptr dentryStorage, std::shared_ptr txManger); - MetaStatusCode CreateDentry(const Dentry& dentry); + bool Init(); + + MetaStatusCode CreateDentry(const Dentry& dentry, int64_t logIndex); // only invoked from snapshot loadding - MetaStatusCode CreateDentry(const DentryVec& vec, bool merge); + MetaStatusCode CreateDentry(const DentryVec& vec, bool merge, + int64_t logIndex); - MetaStatusCode DeleteDentry(const Dentry& dentry); + MetaStatusCode DeleteDentry(const Dentry& dentry, int64_t logIndex); MetaStatusCode GetDentry(Dentry* dentry); MetaStatusCode ListDentry(const Dentry& dentry, - std::vector* dentrys, - uint32_t limit, + std::vector* dentrys, uint32_t limit, bool onlyDir = false); void ClearDentry(); - MetaStatusCode HandleRenameTx(const std::vector& dentrys); + MetaStatusCode HandleRenameTx(const std::vector& dentrys, + int64_t logIndex); private: void Log4Dentry(const std::string& request, const Dentry& dentry); @@ -65,6 +68,7 @@ class DentryManager { private: std::shared_ptr dentryStorage_; std::shared_ptr txManager_; + int64_t appliedIndex_; }; } // namespace metaserver diff --git a/curvefs/src/metaserver/dentry_storage.cpp b/curvefs/src/metaserver/dentry_storage.cpp index 5f4cfb2255..57fc70c1cf 100644 --- a/curvefs/src/metaserver/dentry_storage.cpp +++ b/curvefs/src/metaserver/dentry_storage.cpp @@ -20,41 +20,45 @@ * Author: chenwei */ +#include "curvefs/src/metaserver/dentry_storage.h" + #include + +#include #include -#include #include -#include +#include +#include +#include "curvefs/src/metaserver/storage/status.h" +#include "curvefs/src/metaserver/storage/storage.h" +#include "curvefs/src/metaserver/transaction.h" #include "src/common/string_util.h" -#include "curvefs/src/metaserver/dentry_storage.h" namespace curvefs { namespace metaserver { using ::curve::common::ReadLockGuard; -using ::curve::common::WriteLockGuard; using ::curve::common::StringStartWith; -using ::curvefs::metaserver::storage::Status; +using ::curve::common::WriteLockGuard; using ::curvefs::metaserver::storage::Key4Dentry; -using ::curvefs::metaserver::storage::Prefix4SameParentDentry; using ::curvefs::metaserver::storage::Prefix4AllDentry; +using ::curvefs::metaserver::storage::Prefix4SameParentDentry; +using ::curvefs::metaserver::storage::Status; bool operator==(const Dentry& lhs, const Dentry& rhs) { - return EQUAL(fsid) && EQUAL(parentinodeid) && EQUAL(name) && - EQUAL(txid) && EQUAL(inodeid) && EQUAL(flag); + return EQUAL(fsid) && EQUAL(parentinodeid) && EQUAL(name) && EQUAL(txid) && + EQUAL(inodeid) && EQUAL(flag); } bool operator<(const Dentry& lhs, const Dentry& rhs) { - return LESS(fsid) || - LESS2(fsid, parentinodeid) || + return LESS(fsid) || LESS2(fsid, parentinodeid) || LESS3(fsid, parentinodeid, name) || LESS4(fsid, parentinodeid, name, txid); } static bool BelongSomeOne(const Dentry& lhs, const Dentry& rhs) { - return EQUAL(fsid) && EQUAL(parentinodeid) && EQUAL(name) && - EQUAL(inodeid); + return EQUAL(fsid) && EQUAL(parentinodeid) && EQUAL(name) && EQUAL(inodeid); } static bool HasDeleteMarkFlag(const Dentry& dentry) { @@ -62,9 +66,7 @@ static bool HasDeleteMarkFlag(const Dentry& dentry) { } DentryVector::DentryVector(DentryVec* vec) - : vec_(vec), - nPendingAdd_(0), - nPendingDel_(0) {} + : vec_(vec), nPendingAdd_(0), nPendingDel_(0) {} void DentryVector::Insert(const Dentry& dentry) { for (const Dentry& item : vec_->dentrys()) { @@ -112,10 +114,8 @@ void DentryVector::Confirm(uint64_t* count) { *count = *count + nPendingAdd_ - nPendingDel_; } -DentryList::DentryList(std::vector* list, - uint32_t limit, - const std::string& exclude, - uint64_t maxTxId, +DentryList::DentryList(std::vector* list, uint32_t limit, + const std::string& exclude, uint64_t maxTxId, bool onlyDir) : list_(list), size_(0), @@ -152,12 +152,22 @@ void DentryList::PushBack(DentryVec* vec) { VLOG(9) << "Push dentry, dentry = (" << last->ShortDebugString() << ")"; } -uint32_t DentryList::Size() { - return size_; -} +uint32_t DentryList::Size() { return size_; } + +bool DentryList::IsFull() { return limit_ != 0 && size_ >= limit_; } -bool DentryList::IsFull() { - return limit_ != 0 && size_ >= limit_; +const char* DentryStorage::kDentryAppliedKey("dentry"); +const char* DentryStorage::kDentryCountKey("count"); +const char* DentryStorage::kHandleTxKey("handleTx"); +const char* DentryStorage::kPendingTxKey("pendingTx"); + +bool DentryStorage::Init() { + auto s = GetDentryCount(&nDentry_); + if (s.ok() || s.IsNotFound()) { + s = GetHandleTxIndex(&handleTxIndex_); + return s.ok() || s.IsNotFound(); + } + return false; } DentryStorage::DentryStorage(std::shared_ptr kvStorage, @@ -165,15 +175,26 @@ DentryStorage::DentryStorage(std::shared_ptr kvStorage, uint64_t nDentry) : kvStorage_(kvStorage), table4Dentry_(nameGenerator->GetDentryTableName()), + table4AppliedIndex_(nameGenerator->GetAppliedIndexTableName()), + table4Transaction_(nameGenerator->GetTransactionTableName()), + table4DentryCount_(nameGenerator->GetDentryCountTableName()), + handleTxIndex_(-1), nDentry_(nDentry), - conv_() {} + conv_() { + // NOTE: for compatibility with older versions + // we cannot ignore `nDentry` argument + // try get dentry count for rocksdb + // if we got it, replace old value +} std::string DentryStorage::DentryKey(const Dentry& dentry) { Key4Dentry key(dentry.fsid(), dentry.parentinodeid(), dentry.name()); return conv_.SerializeToString(key); } -bool DentryStorage::CompressDentry(DentryVec* vec, BTree* dentrys) { +bool DentryStorage::CompressDentry(storage::StorageTransaction* txn, + DentryVec* vec, BTree* dentrys, + uint64_t* outCount) { DentryVector vector(vec); std::vector deleted; if (dentrys->size() == 2) { @@ -185,28 +206,36 @@ bool DentryStorage::CompressDentry(DentryVec* vec, BTree* dentrys) { for (const auto& dentry : deleted) { vector.Delete(dentry); } - + const char* step = "Compress dentry from transaction"; Status s; std::string skey = DentryKey(*dentrys->begin()); - if (vec->dentrys_size() == 0) { // delete directly - s = kvStorage_->SDel(table4Dentry_, skey); - } else { - s = kvStorage_->SSet(table4Dentry_, skey, *vec); - } - - if (s.ok()) { - vector.Confirm(&nDentry_); + do { + if (vec->dentrys_size() == 0) { // delete directly + s = txn->SDel(table4Dentry_, skey); + } else { + s = txn->SSet(table4Dentry_, skey, *vec); + } + if (!s.ok()) { + break; + } + uint64_t countCopy = *outCount; + vector.Confirm(&countCopy); + s = SetDentryCount(txn, countCopy); + if (!s.ok()) { + step = "Insert dentry count to transaction"; + break; + } + *outCount = countCopy; return true; - } + } while (false); + LOG(ERROR) << step << " failed, status = " << s.ToString(); return false; } // NOTE: Find() return the dentry which has the latest txid, // and it will clean the old txid's dentry if you specify compress to true -MetaStatusCode DentryStorage::Find(const Dentry& in, - Dentry* out, - DentryVec* vec, - bool compress) { +MetaStatusCode DentryStorage::Find(const Dentry& in, Dentry* out, + DentryVec* vec) { std::string skey = DentryKey(in); Status s = kvStorage_->SGet(table4Dentry_, skey, vec); if (s.IsNotFound()) { @@ -235,92 +264,344 @@ MetaStatusCode DentryStorage::Find(const Dentry& in, rc = MetaStatusCode::OK; *out = *dentrys.rbegin(); } + return rc; +} - if (compress && !CompressDentry(vec, &dentrys)) { +// NOTE: Find() return the dentry which has the latest txid, +// and it will clean the old txid's dentry if you specify compressOutCount to +// non-nullptr compressOutCount must point to a variable that value is equal +// with `nDentry_` +MetaStatusCode DentryStorage::Find(storage::StorageTransaction* txn, + const Dentry& in, Dentry* out, + DentryVec* vec, uint64_t* compressOutCount) { + std::string skey = DentryKey(in); + Status s = txn->SGet(table4Dentry_, skey, vec); + if (s.IsNotFound()) { + return MetaStatusCode::NOT_FOUND; + } else if (!s.ok()) { + return MetaStatusCode::STORAGE_INTERNAL_ERROR; + } + // status = OK + BTree dentrys; + DentryVector vector(vec); + vector.Filter(in.txid(), &dentrys); + size_t size = dentrys.size(); + if (size > 2) { + LOG(ERROR) << "There are more than 2 dentrys"; + return MetaStatusCode::NOT_FOUND; + } else if (size == 0) { + return MetaStatusCode::NOT_FOUND; + } + + // size == 1 || size == 2 + MetaStatusCode rc; + if (HasDeleteMarkFlag(*dentrys.rbegin())) { + rc = MetaStatusCode::NOT_FOUND; + } else { + rc = MetaStatusCode::OK; + *out = *dentrys.rbegin(); + } + + if (compressOutCount != nullptr && + !CompressDentry(txn, vec, &dentrys, compressOutCount)) { rc = MetaStatusCode::STORAGE_INTERNAL_ERROR; } return rc; } -MetaStatusCode DentryStorage::Insert(const Dentry& dentry) { +MetaStatusCode DentryStorage::GetAppliedIndex(int64_t* index) { + common::AppliedIndex val; + auto s = kvStorage_->SGet(table4AppliedIndex_, kDentryAppliedKey, &val); + if (s.ok()) { + *index = val.index(); + return MetaStatusCode::OK; + } + if (s.IsNotFound()) { + return MetaStatusCode::NOT_FOUND; + } + return MetaStatusCode::STORAGE_INTERNAL_ERROR; +} + +storage::Status DentryStorage::SetAppliedIndex( + storage::StorageTransaction* transaction, int64_t index) { + common::AppliedIndex val; + val.set_index(index); + return transaction->SSet(table4AppliedIndex_, kDentryAppliedKey, val); +} + +storage::Status DentryStorage::DelAppliedIndex( + storage::StorageTransaction* transaction) { + return transaction->SDel(table4AppliedIndex_, kDentryAppliedKey); +} + +storage::Status DentryStorage::SetHandleTxIndex( + storage::StorageTransaction* transaction, int64_t index) { + common::AppliedIndex val; + val.set_index(index); + return transaction->SSet(table4AppliedIndex_, kHandleTxKey, val); +} + +storage::Status DentryStorage::DelHandleTxIndex( + storage::StorageTransaction* transaction) { + return transaction->SDel(table4AppliedIndex_, kHandleTxKey); +} + +MetaStatusCode DentryStorage::GetPendingTx( + metaserver::TransactionRequest* request) { + auto s = kvStorage_->SGet(table4Transaction_, kPendingTxKey, request); + if (s.ok()) { + return MetaStatusCode::OK; + } + if (s.IsNotFound()) { + return MetaStatusCode::NOT_FOUND; + } + return MetaStatusCode::STORAGE_INTERNAL_ERROR; +} + +storage::Status DentryStorage::SetPendingTx( + storage::StorageTransaction* txn, + const metaserver::TransactionRequest& request) { + return txn->SSet(table4Transaction_, kPendingTxKey, request); +} + +storage::Status DentryStorage::DelPendingTx(storage::StorageTransaction* txn) { + return txn->SDel(table4Transaction_, kPendingTxKey); +} + +storage::Status DentryStorage::ClearPendingTx( + storage::StorageTransaction* txn) { + metaserver::TransactionRequest request; + request.set_type(metaserver::TransactionRequest::None); + request.set_rawpayload(""); + return txn->SSet(table4Transaction_, "transaction", request); +} + +storage::Status DentryStorage::SetDentryCount(storage::StorageTransaction* txn, + uint64_t count) { + common::ItemCount val; + val.set_count(count); + return txn->SSet(table4DentryCount_, kDentryCountKey, val); +} + +storage::Status DentryStorage::DelDentryCount( + storage::StorageTransaction* txn) { + return txn->SDel(table4DentryCount_, kDentryCountKey); +} + +storage::Status DentryStorage::GetDentryCount(uint64_t* count) { + common::ItemCount val; + auto s = kvStorage_->SGet(table4DentryCount_, kDentryCountKey, &val); + if (s.ok()) { + *count = val.count(); + } + return s; +} + +storage::Status DentryStorage::GetHandleTxIndex(int64_t* index) { + common::AppliedIndex val; + auto s = kvStorage_->SGet(table4AppliedIndex_, kHandleTxKey, &val); + if (s.ok()) { + *index = val.index(); + } + return s; +} + +MetaStatusCode DentryStorage::Insert(const Dentry& dentry, int64_t logIndex) { WriteLockGuard lg(rwLock_); Dentry out; DentryVec vec; - MetaStatusCode rc = Find(dentry, &out, &vec, true); - if (rc == MetaStatusCode::OK) { - if (BelongSomeOne(out, dentry)) { - return MetaStatusCode::IDEMPOTENCE_OK; + std::shared_ptr txn; + storage::Status s; + const char* step = "Begin transaction"; + do { + txn = kvStorage_->BeginTransaction(); + if (txn == nullptr) { + break; } - return MetaStatusCode::DENTRY_EXIST; - } else if (rc != MetaStatusCode::NOT_FOUND) { - return MetaStatusCode::STORAGE_INTERNAL_ERROR; - } - - // rc == MetaStatusCode::NOT_FOUND - DentryVector vector(&vec); - vector.Insert(dentry); - std::string skey = DentryKey(dentry); - Status s = kvStorage_->SSet(table4Dentry_, skey, vec); - if (!s.ok()) { - LOG(ERROR) << "Insert dentry failed, status = " << s.ToString(); - return MetaStatusCode::STORAGE_INTERNAL_ERROR; + uint64_t count = nDentry_; + s = SetAppliedIndex(txn.get(), logIndex); + if (!s.ok()) { + step = "Insert applied index to transaction"; + break; + } + MetaStatusCode rc = Find(txn.get(), dentry, &out, &vec, &count); + if (rc == MetaStatusCode::OK) { + auto s = txn->Commit(); + if (!s.ok()) { + step = "Commit compress dentry transaction"; + break; + } + // if compress is success + // we use output dentry count to replace old one + nDentry_ = count; + if (BelongSomeOne(out, dentry)) { + return MetaStatusCode::IDEMPOTENCE_OK; + } + return MetaStatusCode::DENTRY_EXIST; + } else if (rc != MetaStatusCode::NOT_FOUND) { + step = "Find dentry failed"; + break; + } + // rc == MetaStatusCode::NOT_FOUND + + // NOTE: `count` maybe already written by `Find()` in here + // so we continue use `count` in follow operations + DentryVector vector(&vec); + vector.Insert(dentry); + std::string skey = DentryKey(dentry); + s = txn->SSet(table4Dentry_, skey, vec); + if (!s.ok()) { + step = "Insert dentry to transaction"; + break; + } + vector.Confirm(&count); + s = SetDentryCount(txn.get(), count); + if (!s.ok()) { + step = "Insert dentry count to transaction"; + break; + } + s = txn->Commit(); + if (!s.ok()) { + step = "Insert dentry"; + break; + } + nDentry_ = count; + return MetaStatusCode::OK; + } while (false); + LOG(ERROR) << step << " failed, status = " << s.ToString(); + if (txn != nullptr && !txn->Rollback().ok()) { + LOG(ERROR) << "Rollback insert dentry transaction failed, status = " + << s.ToString(); } - vector.Confirm(&nDentry_); - return MetaStatusCode::OK; + return MetaStatusCode::STORAGE_INTERNAL_ERROR; } -MetaStatusCode DentryStorage::Insert(const DentryVec& vec, bool merge) { +MetaStatusCode DentryStorage::Insert(const DentryVec& vec, bool merge, + int64_t logIndex) { WriteLockGuard lg(rwLock_); Status s; DentryVec oldVec; std::string skey = DentryKey(vec.dentrys(0)); - if (merge) { // for old version dumpfile (v1) - s = kvStorage_->SGet(table4Dentry_, skey, &oldVec); - if (s.IsNotFound()) { - // do nothing - } else if (!s.ok()) { - return MetaStatusCode::STORAGE_INTERNAL_ERROR; + std::shared_ptr txn; + const char* step = "Begin transaction"; + do { + txn = kvStorage_->BeginTransaction(); + if (txn == nullptr) { + break; } + if (merge) { // for old version dumpfile (v1) + s = txn->SGet(table4Dentry_, skey, &oldVec); + if (s.IsNotFound()) { + // do nothing + } else if (!s.ok()) { + step = "Find old version from transaction"; + break; + } + } + DentryVector vector(&oldVec); + vector.Merge(vec); + s = txn->SSet(table4Dentry_, skey, oldVec); + if (!s.ok()) { + step = "Insert dentry vector to tranasction"; + break; + } + s = SetAppliedIndex(txn.get(), logIndex); + if (!s.ok()) { + step = "Insert applied index to tranasction"; + break; + } + uint64_t count = nDentry_; + vector.Confirm(&count); + s = SetDentryCount(txn.get(), count); + if (!s.ok()) { + step = "Insert dentry count to transaction"; + break; + } + s = txn->Commit(); + if (!s.ok()) { + step = "Insert dentry vector"; + break; + } + nDentry_ = count; + return MetaStatusCode::OK; + } while (false); + LOG(ERROR) << step << " failed, status = " << s.ToString(); + if (txn != nullptr && !txn->Rollback().ok()) { + LOG(ERROR) << "Rollback insert dentry transaction failed, status = " + << s.ToString(); } - - DentryVector vector(&oldVec); - vector.Merge(vec); - s = kvStorage_->SSet(table4Dentry_, skey, oldVec); - if (!s.ok()) { - LOG(ERROR) << "Insert dentry vector failed, status = " << s.ToString(); - return MetaStatusCode::STORAGE_INTERNAL_ERROR; - } - vector.Confirm(&nDentry_); - return MetaStatusCode::OK; + return MetaStatusCode::STORAGE_INTERNAL_ERROR; } -MetaStatusCode DentryStorage::Delete(const Dentry& dentry) { +MetaStatusCode DentryStorage::Delete(const Dentry& dentry, int64_t logIndex) { WriteLockGuard lg(rwLock_); Dentry out; DentryVec vec; - MetaStatusCode rc = Find(dentry, &out, &vec, true); - if (rc == MetaStatusCode::NOT_FOUND) { - return MetaStatusCode::NOT_FOUND; - } else if (rc != MetaStatusCode::OK) { - return MetaStatusCode::STORAGE_INTERNAL_ERROR; - } - - Status s; - DentryVector vector(&vec); - vector.Delete(out); - std::string skey = DentryKey(dentry); - if (vec.dentrys_size() == 0) { - s = kvStorage_->SDel(table4Dentry_, skey); - } else { - s = kvStorage_->SSet(table4Dentry_, skey, vec); - } - - if (s.ok()) { - vector.Confirm(&nDentry_); + const char* step = "Begin transaction"; + std::shared_ptr txn; + storage::Status s; + do { + txn = kvStorage_->BeginTransaction(); + if (txn == nullptr) { + break; + } + uint64_t count = nDentry_; + s = SetAppliedIndex(txn.get(), logIndex); + if (!s.ok()) { + step = "Insert applied index to transaction"; + break; + } + MetaStatusCode rc = Find(txn.get(), dentry, &out, &vec, &count); + if (rc == MetaStatusCode::NOT_FOUND) { + // NOTE: we should commit transaction + // even if rc is NOT_FOUND + // because Find() maybe write dentry count to rocksdb + s = txn->Commit(); + if (!s.ok()) { + step = "Commit transaction"; + break; + } + nDentry_ = count; + return MetaStatusCode::NOT_FOUND; + } else if (rc != MetaStatusCode::OK) { + step = "Find dentry"; + break; + } + DentryVector vector(&vec); + vector.Delete(out); + std::string skey = DentryKey(dentry); + if (vec.dentrys_size() == 0) { + s = txn->SDel(table4Dentry_, skey); + } else { + s = txn->SSet(table4Dentry_, skey, vec); + } + if (!s.ok()) { + step = "Delete dentry vector from transaction"; + break; + } + // NOTE: we should use count variable instead of nDentry_ + // (it means that we should not reset count to nDentry_) + // count is newest version of dentry count + vector.Confirm(&count); + s = SetDentryCount(txn.get(), count); + if (!s.ok()) { + step = "Insert applied index to transaction"; + break; + } + s = txn->Commit(); + if (!s.ok()) { + step = "Delete dentry vector"; + break; + } + nDentry_ = count; return MetaStatusCode::OK; + } while (false); + LOG(ERROR) << step << " failed, status = " << s.ToString(); + if (txn != nullptr && !txn->Rollback().ok()) { + LOG(ERROR) << "Rollback transaction failed"; } return MetaStatusCode::STORAGE_INTERNAL_ERROR; } @@ -330,7 +611,7 @@ MetaStatusCode DentryStorage::Get(Dentry* dentry) { Dentry out; DentryVec vec; - MetaStatusCode rc = Find(*dentry, &out, &vec, false); + MetaStatusCode rc = Find(*dentry, &out, &vec); if (rc == MetaStatusCode::NOT_FOUND) { return MetaStatusCode::NOT_FOUND; } else if (rc != MetaStatusCode::OK) { @@ -343,8 +624,7 @@ MetaStatusCode DentryStorage::Get(Dentry* dentry) { } MetaStatusCode DentryStorage::List(const Dentry& dentry, - std::vector* dentrys, - uint32_t limit, + std::vector* dentrys, uint32_t limit, bool onlyDir) { // TODO(all): consider store dir dentry and file dentry separately ReadLockGuard lg(rwLock_); @@ -393,78 +673,222 @@ MetaStatusCode DentryStorage::List(const Dentry& dentry, } } time.stop(); - VLOG(1) << "ListDentry request: dentry = (" - << dentry.ShortDebugString() << ")" - << ", onlyDir = " << onlyDir - << ", limit = " << limit - << ", lower key = " << lower - << ", seekTimes = " << seekTimes + VLOG(1) << "ListDentry request: dentry = (" << dentry.ShortDebugString() + << ")" + << ", onlyDir = " << onlyDir << ", limit = " << limit + << ", lower key = " << lower << ", seekTimes = " << seekTimes << ", dentrySize = " << dentrys->size() << ", costUs = " << time.u_elapsed(); return MetaStatusCode::OK; } -MetaStatusCode DentryStorage::HandleTx(TX_OP_TYPE type, const Dentry& dentry) { +MetaStatusCode DentryStorage::PrepareTx( + const std::vector& dentrys, + const metaserver::TransactionRequest& txRequest, int64_t logIndex) { WriteLockGuard lg(rwLock_); - + uint64_t count = nDentry_; Status s; - Dentry out; - DentryVec vec; - DentryVector vector(&vec); - std::string skey = DentryKey(dentry); - MetaStatusCode rc = MetaStatusCode::OK; - switch (type) { - case TX_OP_TYPE::PREPARE: - s = kvStorage_->SGet(table4Dentry_, skey, &vec); + const char* step = "Begin transaction"; + std::shared_ptr txn; + do { + txn = kvStorage_->BeginTransaction(); + if (txn == nullptr) { + break; + } + bool quit = false; + for (const auto& dentry : dentrys) { + DentryVec vec; + DentryVector vector(&vec); + std::string skey = DentryKey(dentry); + s = txn->SGet(table4Dentry_, skey, &vec); if (!s.ok() && !s.IsNotFound()) { - rc = MetaStatusCode::STORAGE_INTERNAL_ERROR; + step = "Get dentry from transaction"; + quit = true; break; } - // OK || NOT_FOUND vector.Insert(dentry); - s = kvStorage_->SSet(table4Dentry_, skey, vec); + s = txn->SSet(table4Dentry_, skey, vec); if (!s.ok()) { - rc = MetaStatusCode::STORAGE_INTERNAL_ERROR; - } else { - vector.Confirm(&nDentry_); + step = "Insert dentry to transaction"; + quit = true; + break; } + vector.Confirm(&count); + } + if (quit) { + break; + } + s = SetAppliedIndex(txn.get(), logIndex); + if (!s.ok()) { + step = "Insert applied index to transaction"; + break; + } + s = SetPendingTx(txn.get(), txRequest); + if (!s.ok()) { + step = "Insert tx request to transaction"; + break; + } + s = txn->Commit(); + if (!s.ok()) { + step = "Commit transaction"; break; + } + nDentry_ = count; + return MetaStatusCode::OK; + } while (false); + LOG(ERROR) << step << " failed, status = " << s.ToString(); + if (txn != nullptr && !txn->Rollback().ok()) { + LOG(ERROR) << "Rollback transaction fail"; + } + return MetaStatusCode::STORAGE_INTERNAL_ERROR; +} - case TX_OP_TYPE::COMMIT: - rc = Find(dentry, &out, &vec, true); - if (rc == MetaStatusCode::OK || - rc == MetaStatusCode::NOT_FOUND) { - rc = MetaStatusCode::OK; +MetaStatusCode DentryStorage::CommitTx(const std::vector& dentrys, + int64_t logIndex) { + if (logIndex <= handleTxIndex_) { + // NOTE: if we enter here + // means that this log entry is "half apply" + // there are two parts in HandleRenameTx: + // * Commit last one (1) + // * Prepare this one (2) + // if (1) already write to rocksdb, but (2) doesn't + // we enter here + LOG(INFO) << "Log entry already be applied, index = " << logIndex + << " handle tx index = " << handleTxIndex_; + return MetaStatusCode::IDEMPOTENCE_OK; + } + WriteLockGuard lg(rwLock_); + Status s; + const char* step = "Begin transaction"; + std::shared_ptr txn; + do { + txn = kvStorage_->BeginTransaction(); + if (txn == nullptr) { + break; + } + uint64_t count = nDentry_; + bool quit = false; + for (const auto& dentry : dentrys) { + Dentry out; + DentryVec vec; + std::string skey = DentryKey(dentry); + MetaStatusCode rc = MetaStatusCode::OK; + rc = Find(txn.get(), dentry, &out, &vec, &count); + if (rc != MetaStatusCode::OK && rc != MetaStatusCode::NOT_FOUND) { + step = "Find dentry from transaction"; + quit = true; + break; } + } + if (quit) { + break; + } + s = SetHandleTxIndex(txn.get(), logIndex); + if (!s.ok()) { + step = "Insert handle tx index to transaction"; + break; + } + s = ClearPendingTx(txn.get()); + if (!s.ok()) { + step = "Delete pending tx from transaction"; break; + } + s = txn->Commit(); + if (!s.ok()) { + step = "Commit transaction"; + break; + } + nDentry_ = count; + return MetaStatusCode::OK; + } while (false); + LOG(ERROR) << step << " failed, status = " << s.ToString(); + if (txn != nullptr && !txn->Rollback().ok()) { + LOG(ERROR) << "Rollback transaction failed"; + } + return MetaStatusCode::STORAGE_INTERNAL_ERROR; +} - case TX_OP_TYPE::ROLLBACK: - s = kvStorage_->SGet(table4Dentry_, skey, &vec); +MetaStatusCode DentryStorage::RollbackTx(const std::vector& dentrys, + int64_t logIndex) { + if (logIndex <= handleTxIndex_) { + // NOTE: if we enter here + // means that this log entry is "half apply" + // there are two parts in HandleRenameTx: + // * Commit last one (1) + // * Prepare this one (2) + // if (1) already write to rocksdb, but (2) doesn't + // we enter here + LOG(INFO) << "Log entry already be applied, index = " << logIndex + << " handle tx index = " << handleTxIndex_; + return MetaStatusCode::IDEMPOTENCE_OK; + } + WriteLockGuard lg(rwLock_); + Status s; + const char* step = "Begin transaction"; + std::shared_ptr txn; + do { + txn = kvStorage_->BeginTransaction(); + if (txn == nullptr) { + break; + } + uint64_t count = nDentry_; + bool quit = false; + for (const auto& dentry : dentrys) { + DentryVec vec; + DentryVector vector(&vec); + std::string skey = DentryKey(dentry); + s = txn->SGet(table4Dentry_, skey, &vec); if (!s.ok() && !s.IsNotFound()) { - rc = MetaStatusCode::STORAGE_INTERNAL_ERROR; + step = "Find dentry"; + quit = true; break; } - // OK || NOT_FOUND vector.Delete(dentry); if (vec.dentrys_size() == 0) { // delete directly - s = kvStorage_->SDel(table4Dentry_, skey); + s = txn->SDel(table4Dentry_, skey); } else { - s = kvStorage_->SSet(table4Dentry_, skey, vec); + s = txn->SSet(table4Dentry_, skey, vec); } if (!s.ok()) { - rc = MetaStatusCode::STORAGE_INTERNAL_ERROR; - } else { - vector.Confirm(&nDentry_); + step = "Delete dentry from transaction"; + quit = true; + break; } + vector.Confirm(&count); + } + if (quit) { break; - - default: - rc = MetaStatusCode::PARAM_ERROR; + } + s = SetDentryCount(txn.get(), count); + if (!s.ok()) { + step = "Insert dentry count to transaction"; + break; + } + s = SetHandleTxIndex(txn.get(), logIndex); + if (!s.ok()) { + step = "Insert handle tx index to transaction"; + break; + } + s = ClearPendingTx(txn.get()); + if (!s.ok()) { + step = "Delete pending tx from transaction"; + break; + } + s = txn->Commit(); + if (!s.ok()) { + step = "Commit transaction"; + break; + } + nDentry_ = count; + return MetaStatusCode::OK; + } while (false); + LOG(ERROR) << step << " failed, status = " << s.ToString(); + if (txn != nullptr && !txn->Rollback().ok()) { + LOG(ERROR) << "Rollback transaction failed"; } - - return rc; + return MetaStatusCode::STORAGE_INTERNAL_ERROR; } std::shared_ptr DentryStorage::GetAll() { @@ -493,15 +917,61 @@ bool DentryStorage::Empty() { return true; } +// NOTE: we will clear all apply metadata in `Clear()` +// when follower replay logs on this snapshot, it may cause +// repeat apply log entries, and raise some errors +// but we know this partition will be clear at the end of logs MetaStatusCode DentryStorage::Clear() { + // FIXME: non-atomic clear operations + // NOTE: clear operations non-atomic is acceptable + // because if we fail stop, we will replay + // raft logs and clear it again WriteLockGuard lg(rwLock_); Status s = kvStorage_->SClear(table4Dentry_); if (!s.ok()) { - LOG(ERROR) << "failed to clear dentry table, status = " << s.ToString(); + LOG(ERROR) << "Clear dentry table failed, status = " << s.ToString(); return MetaStatusCode::STORAGE_INTERNAL_ERROR; } - nDentry_ = 0; - return MetaStatusCode::OK; + std::shared_ptr txn; + const char* step = "Begin transaction"; + do { + txn = kvStorage_->BeginTransaction(); + if (txn == nullptr) { + break; + } + s = DelDentryCount(txn.get()); + if (!s.ok()) { + step = "Delete dentry count"; + break; + } + s = DelPendingTx(txn.get()); + if (!s.ok()) { + step = "Delete pending tx"; + break; + } + s = DelAppliedIndex(txn.get()); + if (!s.ok()) { + step = "Delete applied index"; + break; + } + s = DelHandleTxIndex(txn.get()); + if (!s.ok()) { + step = "Delete handle tx index"; + break; + } + s = txn->Commit(); + if (!s.ok()) { + step = "Commit clear dentry table transaction"; + break; + } + nDentry_ = 0; + return MetaStatusCode::OK; + } while (false); + if (txn != nullptr && !txn->Rollback().ok()) { + LOG(ERROR) << "Rollback transaction failed"; + } + LOG(ERROR) << step << " failed, status = " << s.ToString(); + return MetaStatusCode::STORAGE_INTERNAL_ERROR; } } // namespace metaserver diff --git a/curvefs/src/metaserver/dentry_storage.h b/curvefs/src/metaserver/dentry_storage.h index 4a159ff740..102df6737a 100644 --- a/curvefs/src/metaserver/dentry_storage.h +++ b/curvefs/src/metaserver/dentry_storage.h @@ -23,25 +23,26 @@ #ifndef CURVEFS_SRC_METASERVER_DENTRY_STORAGE_H_ #define CURVEFS_SRC_METASERVER_DENTRY_STORAGE_H_ +#include #include +#include #include #include -#include -#include #include "absl/container/btree_set.h" -#include "src/common/concurrent/rw_lock.h" #include "curvefs/proto/metaserver.pb.h" -#include "curvefs/src/metaserver/storage/storage.h" #include "curvefs/src/metaserver/storage/converter.h" +#include "curvefs/src/metaserver/storage/status.h" +#include "curvefs/src/metaserver/storage/storage.h" +#include "src/common/concurrent/rw_lock.h" namespace curvefs { namespace metaserver { using ::curve::common::RWLock; +using ::curvefs::metaserver::storage::Converter; using ::curvefs::metaserver::storage::Iterator; using ::curvefs::metaserver::storage::NameGenerator; -using ::curvefs::metaserver::storage::Converter; using KVStorage = ::curvefs::metaserver::storage::KVStorage; using BTree = absl::btree_set; @@ -77,11 +78,8 @@ class DentryVector { class DentryList { public: - DentryList(std::vector* list, - uint32_t limit, - const std::string& exclude, - uint64_t maxTxId, - bool onlyDir); + DentryList(std::vector* list, uint32_t limit, + const std::string& exclude, uint64_t maxTxId, bool onlyDir); void PushBack(DentryVec* vec); @@ -99,33 +97,34 @@ class DentryList { }; class DentryStorage { - public: - enum class TX_OP_TYPE { - PREPARE, - COMMIT, - ROLLBACK, - }; - public: DentryStorage(std::shared_ptr kvStorage, std::shared_ptr nameGenerator, uint64_t nDentry); - MetaStatusCode Insert(const Dentry& dentry); + bool Init(); + + MetaStatusCode Insert(const Dentry& dentry, int64_t logIndex); // only for loadding from snapshot - MetaStatusCode Insert(const DentryVec& vec, bool merge); + MetaStatusCode Insert(const DentryVec& vec, bool merge, int64_t logIndex); - MetaStatusCode Delete(const Dentry& dentry); + MetaStatusCode Delete(const Dentry& dentry, int64_t logIndex); MetaStatusCode Get(Dentry* dentry); - MetaStatusCode List(const Dentry& dentry, - std::vector* dentrys, - uint32_t limit, - bool onlyDir = false); + MetaStatusCode List(const Dentry& dentry, std::vector* dentrys, + uint32_t limit, bool onlyDir = false); + + MetaStatusCode PrepareTx(const std::vector& dentrys, + const metaserver::TransactionRequest& txRequest, + int64_t logIndex); + + MetaStatusCode CommitTx(const std::vector& dentrys, + int64_t logIndex); - MetaStatusCode HandleTx(TX_OP_TYPE type, const Dentry& dentry); + MetaStatusCode RollbackTx(const std::vector& dentrys, + int64_t logIndex); std::shared_ptr GetAll(); @@ -135,22 +134,63 @@ class DentryStorage { MetaStatusCode Clear(); + MetaStatusCode GetPendingTx(metaserver::TransactionRequest* request); + + MetaStatusCode GetAppliedIndex(int64_t* index); + private: std::string DentryKey(const Dentry& entry); - bool CompressDentry(DentryVec* vec, BTree* dentrys); + bool CompressDentry(storage::StorageTransaction* txn, DentryVec* vec, + BTree* dentrys, uint64_t* outCount); + + MetaStatusCode Find(const Dentry& in, Dentry* out, DentryVec* vec); + + MetaStatusCode Find(storage::StorageTransaction* txn, const Dentry& in, + Dentry* out, DentryVec* vec, + uint64_t* compressOutCount); + + storage::Status SetAppliedIndex(storage::StorageTransaction* transaction, + int64_t index); + + storage::Status DelAppliedIndex(storage::StorageTransaction* transaction); + + storage::Status SetHandleTxIndex(storage::StorageTransaction* transaction, + int64_t index); + + storage::Status DelHandleTxIndex(storage::StorageTransaction* transaction); - MetaStatusCode Find(const Dentry& in, - Dentry* out, - DentryVec* vec, - bool compress); + storage::Status SetPendingTx(storage::StorageTransaction* transaction, + const metaserver::TransactionRequest& request); + + storage::Status DelPendingTx(storage::StorageTransaction* transaction); + + storage::Status ClearPendingTx(storage::StorageTransaction* transaction); + + storage::Status SetDentryCount(storage::StorageTransaction* transaction, + uint64_t count); + + storage::Status DelDentryCount(storage::StorageTransaction* transaction); + + storage::Status GetDentryCount(uint64_t* count); + + storage::Status GetHandleTxIndex(int64_t* count); private: RWLock rwLock_; std::shared_ptr kvStorage_; std::string table4Dentry_; + std::string table4AppliedIndex_; + std::string table4Transaction_; + std::string table4DentryCount_; + int64_t handleTxIndex_; uint64_t nDentry_; Converter conv_; + + static const char* kDentryCountKey; + static const char* kDentryAppliedKey; + static const char* kHandleTxKey; + static const char* kPendingTxKey; }; } // namespace metaserver diff --git a/curvefs/src/metaserver/inode_manager.cpp b/curvefs/src/metaserver/inode_manager.cpp index f0e1146445..d5e4fce007 100644 --- a/curvefs/src/metaserver/inode_manager.cpp +++ b/curvefs/src/metaserver/inode_manager.cpp @@ -24,32 +24,53 @@ #include #include + +#include #include +#include #include #include -#include #include "curvefs/proto/metaserver.pb.h" #include "curvefs/src/common/define.h" +#include "curvefs/src/metaserver/storage/storage.h" #include "src/common/concurrent/name_lock.h" #include "src/common/timeutility.h" -using ::curve::common::TimeUtility; using ::curve::common::NameLockGuard; +using ::curve::common::TimeUtility; using ::google::protobuf::util::MessageDifferencer; +#define CHECK_APPLIED() \ + do { \ + if (logIndex <= appliedIndex_) { \ + VLOG(3) << __func__ \ + << "Log entry already be applied, index = " << logIndex \ + << "applied index = " << appliedIndex_; \ + return MetaStatusCode::IDEMPOTENCE_OK; \ + } \ + } while (false) + namespace curvefs { namespace metaserver { + +bool InodeManager::Init() { + if (inodeStorage_->Init()) { + auto s = inodeStorage_->GetAppliedIndex(&appliedIndex_); + return s == MetaStatusCode::OK || s == MetaStatusCode::NOT_FOUND; + } + return false; +} + MetaStatusCode InodeManager::CreateInode(uint64_t inodeId, - const InodeParam ¶m, - Inode *newInode) { + const InodeParam& param, + Inode* newInode, int64_t logIndex) { + CHECK_APPLIED(); VLOG(6) << "CreateInode, fsId = " << param.fsId - << ", length = " << param.length - << ", uid = " << param.uid << ", gid = " << param.gid - << ", mode = " << param.mode + << ", length = " << param.length << ", uid = " << param.uid + << ", gid = " << param.gid << ", mode = " << param.mode << ", type =" << FsFileType_Name(param.type) - << ", symlink = " << param.symlink - << ", rdev = " << param.rdev + << ", symlink = " << param.symlink << ", rdev = " << param.rdev << ", parent = " << param.parent; if (param.type == FsFileType::TYPE_SYM_LINK && param.symlink.empty()) { return MetaStatusCode::SYM_LINK_EMPTY; @@ -63,7 +84,7 @@ MetaStatusCode InodeManager::CreateInode(uint64_t inodeId, } // 2. insert inode - MetaStatusCode ret = inodeStorage_->Insert(inode); + MetaStatusCode ret = inodeStorage_->Insert(inode, logIndex); if (ret != MetaStatusCode::OK) { LOG(ERROR) << "ret = " << MetaStatusCode_Name(ret) << "inode = " << inode.DebugString(); @@ -76,10 +97,11 @@ MetaStatusCode InodeManager::CreateInode(uint64_t inodeId, return MetaStatusCode::OK; } -MetaStatusCode InodeManager::CreateRootInode(const InodeParam ¶m) { +MetaStatusCode InodeManager::CreateRootInode(const InodeParam& param, + int64_t logIndex) { + CHECK_APPLIED(); LOG(INFO) << "CreateRootInode, fsId = " << param.fsId - << ", uid = " << param.uid - << ", gid = " << param.gid + << ", uid = " << param.uid << ", gid = " << param.gid << ", mode = " << param.mode; // 1. generate root inode @@ -87,7 +109,7 @@ MetaStatusCode InodeManager::CreateRootInode(const InodeParam ¶m) { GenerateInodeInternal(ROOTINODEID, param, &inode); // 2. insert root inode - MetaStatusCode ret = inodeStorage_->Insert(inode); + MetaStatusCode ret = inodeStorage_->Insert(inode, logIndex); if (ret != MetaStatusCode::OK) { LOG(ERROR) << "CreateRootInode fail, fsId = " << param.fsId << ", uid = " << param.uid << ", gid = " << param.gid @@ -100,12 +122,13 @@ MetaStatusCode InodeManager::CreateRootInode(const InodeParam ¶m) { return MetaStatusCode::OK; } -MetaStatusCode InodeManager::CreateManageInode(const InodeParam ¶m, +MetaStatusCode InodeManager::CreateManageInode(const InodeParam& param, ManageInodeType manageType, - Inode *newInode) { + Inode* newInode, + int64_t logIndex) { + CHECK_APPLIED(); LOG(INFO) << "CreateManageInode, fsId = " << param.fsId - << ", uid = " << param.uid - << ", gid = " << param.gid + << ", uid = " << param.uid << ", gid = " << param.gid << ", mode = " << param.mode; // 1. get inode id @@ -122,7 +145,7 @@ MetaStatusCode InodeManager::CreateManageInode(const InodeParam ¶m, GenerateInodeInternal(inodeId, param, &inode); // 3. insert manage inode - MetaStatusCode ret = inodeStorage_->Insert(inode); + MetaStatusCode ret = inodeStorage_->Insert(inode, logIndex); if (ret != MetaStatusCode::OK) { LOG(ERROR) << "CreateManageInode fail, fsId = " << param.fsId << ", uid = " << param.uid << ", gid = " << param.gid @@ -180,13 +203,10 @@ void InodeManager::GenerateInodeInternal(uint64_t inodeId, } else { inode->set_nlink(1); } - return; } -MetaStatusCode InodeManager::GetInode(uint32_t fsId, - uint64_t inodeId, - Inode *inode, - bool paddingS3ChunkInfo) { +MetaStatusCode InodeManager::GetInode(uint32_t fsId, uint64_t inodeId, + Inode* inode, bool paddingS3ChunkInfo) { VLOG(6) << "GetInode, fsId = " << fsId << ", inodeId = " << inodeId; NameLockGuard lg(inodeLock_, GetInodeLockName(fsId, inodeId)); MetaStatusCode rc = inodeStorage_->Get(Key4Inode(fsId, inodeId), inode); @@ -226,8 +246,7 @@ MetaStatusCode InodeManager::GetInodeAttr(uint32_t fsId, uint64_t inodeId, } VLOG(9) << "GetInodeAttr success, fsId = " << fsId - << ", inodeId = " << inodeId - << ", " << attr->ShortDebugString(); + << ", inodeId = " << inodeId << ", " << attr->ShortDebugString(); return MetaStatusCode::OK; } @@ -253,7 +272,9 @@ MetaStatusCode InodeManager::GetXAttr(uint32_t fsId, uint64_t inodeId, return MetaStatusCode::OK; } -MetaStatusCode InodeManager::DeleteInode(uint32_t fsId, uint64_t inodeId) { +MetaStatusCode InodeManager::DeleteInode(uint32_t fsId, uint64_t inodeId, + int64_t logIndex) { + CHECK_APPLIED(); VLOG(6) << "DeleteInode, fsId = " << fsId << ", inodeId = " << inodeId; NameLockGuard lg(inodeLock_, GetInodeLockName(fsId, inodeId)); InodeAttr attr; @@ -261,11 +282,12 @@ MetaStatusCode InodeManager::DeleteInode(uint32_t fsId, uint64_t inodeId) { inodeStorage_->GetAttr(Key4Inode(fsId, inodeId), &attr); if (retGetAttr != MetaStatusCode::OK) { VLOG(9) << "GetInodeAttr fail, fsId = " << fsId - << ", inodeId = " << inodeId - << ", ret = " << MetaStatusCode_Name(retGetAttr); + << ", inodeId = " << inodeId + << ", ret = " << MetaStatusCode_Name(retGetAttr); } - MetaStatusCode ret = inodeStorage_->Delete(Key4Inode(fsId, inodeId)); + MetaStatusCode ret = + inodeStorage_->Delete(Key4Inode(fsId, inodeId), logIndex); if (ret != MetaStatusCode::OK) { LOG(ERROR) << "DeleteInode fail, fsId = " << fsId << ", inodeId = " << inodeId @@ -282,11 +304,13 @@ MetaStatusCode InodeManager::DeleteInode(uint32_t fsId, uint64_t inodeId) { return MetaStatusCode::OK; } -MetaStatusCode InodeManager::UpdateInode(const UpdateInodeRequest& request) { +MetaStatusCode InodeManager::UpdateInode(const UpdateInodeRequest& request, + int64_t logIndex) { + CHECK_APPLIED(); VLOG(9) << "update inode, fsid: " << request.fsid() << ", inodeid: " << request.inodeid(); - NameLockGuard lg(inodeLock_, GetInodeLockName( - request.fsid(), request.inodeid())); + NameLockGuard lg(inodeLock_, + GetInodeLockName(request.fsid(), request.inodeid())); Inode old; MetaStatusCode ret = @@ -300,10 +324,10 @@ MetaStatusCode InodeManager::UpdateInode(const UpdateInodeRequest& request) { bool needUpdate = false; bool needAddTrash = false; -#define UPDATE_INODE(param) \ - if (request.has_##param()) { \ +#define UPDATE_INODE(param) \ + if (request.has_##param()) { \ old.set_##param(request.param()); \ - needUpdate = true; \ + needUpdate = true; \ } UPDATE_INODE(length) @@ -346,11 +370,15 @@ MetaStatusCode InodeManager::UpdateInode(const UpdateInodeRequest& request) { (needAddTrash && (FsFileType::TYPE_FILE == old.type())); bool s3NeedTrash = (needAddTrash && (FsFileType::TYPE_S3 == old.type())); + std::shared_ptr txn; if (needUpdate) { - ret = inodeStorage_->Update(old, fileNeedDeallocate); + ret = inodeStorage_->Update(&txn, old, logIndex, fileNeedDeallocate); if (ret != MetaStatusCode::OK) { LOG(ERROR) << "UpdateInode fail, " << request.ShortDebugString() << ", ret: " << MetaStatusCode_Name(ret); + if (txn != nullptr && !txn->Rollback().ok()) { + LOG(ERROR) << "Rollback transaction failed"; + } return ret; } } @@ -368,11 +396,15 @@ MetaStatusCode InodeManager::UpdateInode(const UpdateInodeRequest& request) { uint64_t chunkIndex = item.first; list2add = &item.second; MetaStatusCode rc = inodeStorage_->ModifyInodeS3ChunkInfoList( - old.fsid(), old.inodeid(), chunkIndex, list2add, nullptr); + &txn, old.fsid(), old.inodeid(), chunkIndex, list2add, nullptr, + logIndex); if (rc != MetaStatusCode::OK) { LOG(ERROR) << "Modify inode s3chunkinfo list failed, fsId=" << old.fsid() << ", inodeId=" << old.inodeid() << ", retCode=" << rc; + if (txn != nullptr && !txn->Rollback().ok()) { + LOG(ERROR) << "Rollback transaction failed"; + } return rc; } } @@ -382,34 +414,49 @@ MetaStatusCode InodeManager::UpdateInode(const UpdateInodeRequest& request) { const auto fsId = old.fsid(); const auto inodeId = old.inodeid(); for (const auto &slice : request.volumeextents().slices()) { - auto rc = UpdateVolumeExtentSliceLocked(fsId, inodeId, slice); + auto rc = UpdateVolumeExtentSliceLocked(&txn, fsId, inodeId, slice, + logIndex); if (rc != MetaStatusCode::OK) { LOG(ERROR) << "UpdateVolumeExtent failed, err: " << MetaStatusCode_Name(rc) << ", fsId: " << fsId << ", inodeId: " << inodeId; + if (txn != nullptr && !txn->Rollback().ok()) { + LOG(ERROR) << "Rollback transaction failed"; + } return rc; } } } - + if (txn != nullptr) { + auto s = txn->Commit(); + if (!s.ok()) { + LOG(ERROR) << "Commit transaction failed, status = " + << s.ToString(); + if (!txn->Rollback().ok()) { + LOG(ERROR) << "Rollback transaction failed"; + } + return MetaStatusCode::STORAGE_INTERNAL_ERROR; + } + } VLOG(9) << "UpdateInode success, " << request.ShortDebugString(); return MetaStatusCode::OK; } MetaStatusCode InodeManager::GetOrModifyS3ChunkInfo( - uint32_t fsId, uint64_t inodeId, - const S3ChunkInfoMap& map2add, - const S3ChunkInfoMap& map2del, - bool returnS3ChunkInfoMap, - std::shared_ptr* iterator4InodeS3Meta) { + uint32_t fsId, uint64_t inodeId, const S3ChunkInfoMap& map2add, + const S3ChunkInfoMap& map2del, bool returnS3ChunkInfoMap, + std::shared_ptr* iterator4InodeS3Meta, int64_t logIndex) { VLOG(6) << "GetOrModifyS3ChunkInfo, fsId: " << fsId << ", inodeId: " << inodeId; NameLockGuard lg(inodeLock_, GetInodeLockName(fsId, inodeId)); + CHECK_APPLIED(); + const S3ChunkInfoList* list2add; const S3ChunkInfoList* list2del; std::unordered_set deleted; + std::shared_ptr txn; for (const auto& item : map2add) { uint64_t chunkIndex = item.first; list2add = &item.second; @@ -420,11 +467,14 @@ MetaStatusCode InodeManager::GetOrModifyS3ChunkInfo( list2del = nullptr; } - MetaStatusCode rc = inodeStorage_->ModifyInodeS3ChunkInfoList( - fsId, inodeId, chunkIndex, list2add, list2del); + auto rc = inodeStorage_->ModifyInodeS3ChunkInfoList( + &txn, fsId, inodeId, chunkIndex, list2add, list2del, logIndex); if (rc != MetaStatusCode::OK) { LOG(ERROR) << "Modify inode s3chunkinfo list failed, fsId=" << fsId << ", inodeId=" << inodeId << ", retCode=" << rc; + if (txn != nullptr && !txn->Rollback().ok()) { + LOG(ERROR) << "Rollback transaction failed"; + } return rc; } deleted.insert(chunkIndex); @@ -438,19 +488,34 @@ MetaStatusCode InodeManager::GetOrModifyS3ChunkInfo( list2add = nullptr; list2del = &item.second; - MetaStatusCode rc = inodeStorage_->ModifyInodeS3ChunkInfoList( - fsId, inodeId, chunkIndex, list2add, list2del); + auto rc = inodeStorage_->ModifyInodeS3ChunkInfoList( + &txn, fsId, inodeId, chunkIndex, list2add, list2del, logIndex); if (rc != MetaStatusCode::OK) { LOG(ERROR) << "Modify inode s3chunkinfo list failed, fsId=" << fsId << ", inodeId=" << inodeId << ", retCode=" << rc; + if (txn != nullptr && !txn->Rollback().ok()) { + LOG(ERROR) << "Rollback transaction failed"; + } return rc; } } + if (txn != nullptr) { + auto s = txn->Commit(); + if (!s.ok()) { + LOG(ERROR) << "Commit transaction failed, status = " + << s.ToString(); + if (!txn->Rollback().ok()) { + LOG(ERROR) << "Rollback transaction failed"; + } + return MetaStatusCode::STORAGE_INTERNAL_ERROR; + } + } + // return if needed if (returnS3ChunkInfoMap) { - *iterator4InodeS3Meta = inodeStorage_->GetInodeS3ChunkInfoList( - fsId, inodeId); + *iterator4InodeS3Meta = + inodeStorage_->GetInodeS3ChunkInfoList(fsId, inodeId); if ((*iterator4InodeS3Meta)->Status() != 0) { return MetaStatusCode::STORAGE_INTERNAL_ERROR; } @@ -471,19 +536,22 @@ MetaStatusCode InodeManager::PaddingInodeS3ChunkInfo(int32_t fsId, } MetaStatusCode InodeManager::UpdateInodeWhenCreateOrRemoveSubNode( - const Dentry &dentry, - uint64_t now, - uint32_t now_ns, - bool isCreate) { + const Dentry& dentry, uint64_t now, uint32_t now_ns, bool isCreate, + int64_t logIndex) { uint64_t fsId = dentry.fsid(); uint64_t parentInodeId = dentry.parentinodeid(); FsFileType type = dentry.type(); MetaStatusCode ret = MetaStatusCode::OK; VLOG(6) << "UpdateInodeWhenCreateOrRemoveSubNode, fsId = " << fsId - << ", inodeId = " << parentInodeId - << ", isCreate = " << isCreate; + << ", inodeId = " << parentInodeId << ", isCreate = " << isCreate; + if (logIndex <= appliedIndex_) { + LOG(INFO) << "Log entry already be applied, index = " << logIndex + << " applied index = " << appliedIndex_; + return MetaStatusCode::IDEMPOTENCE_OK; + } NameLockGuard lg(inodeLock_, GetInodeLockName(fsId, parentInodeId)); + Inode parentInode; ret = inodeStorage_->Get( Key4Inode(fsId, parentInodeId), &parentInode); @@ -524,7 +592,7 @@ MetaStatusCode InodeManager::UpdateInodeWhenCreateOrRemoveSubNode( parentInode.set_ctime_ns(now_ns); } - ret = inodeStorage_->Update(parentInode); + ret = inodeStorage_->Update(parentInode, logIndex); if (ret != MetaStatusCode::OK) { LOG(ERROR) << "UpdateInode fail, " << parentInode.ShortDebugString() << ", ret = " << MetaStatusCode_Name(ret); @@ -536,11 +604,12 @@ MetaStatusCode InodeManager::UpdateInodeWhenCreateOrRemoveSubNode( return MetaStatusCode::OK; } -MetaStatusCode InodeManager::InsertInode(const Inode &inode) { +MetaStatusCode InodeManager::InsertInode(const Inode& inode, int64_t logIndex) { + CHECK_APPLIED(); VLOG(6) << "InsertInode, " << inode.ShortDebugString(); // 2. insert inode - MetaStatusCode ret = inodeStorage_->Insert(inode); + MetaStatusCode ret = inodeStorage_->Insert(inode, logIndex); if (ret != MetaStatusCode::OK) { LOG(ERROR) << "InsertInode fail, " << inode.ShortDebugString() << ", ret = " << MetaStatusCode_Name(ret); @@ -559,32 +628,37 @@ bool InodeManager::GetInodeIdList(std::list* inodeIdList) { } MetaStatusCode InodeManager::UpdateVolumeExtentSliceLocked( - uint32_t fsId, - uint64_t inodeId, - const VolumeExtentSlice &slice) { - return inodeStorage_->UpdateVolumeExtentSlice(fsId, inodeId, slice); + uint32_t fsId, uint64_t inodeId, const VolumeExtentSlice& slice, + int64_t logIndex) { + return inodeStorage_->UpdateVolumeExtentSlice(fsId, inodeId, slice, + logIndex); +} + +MetaStatusCode InodeManager::UpdateVolumeExtentSliceLocked( + std::shared_ptr* txn, uint32_t fsId, + uint64_t inodeId, const VolumeExtentSlice& slice, int64_t logIndex) { + return inodeStorage_->UpdateVolumeExtentSlice(txn, fsId, inodeId, slice, + logIndex); } MetaStatusCode InodeManager::UpdateVolumeExtentSlice( - uint32_t fsId, - uint64_t inodeId, - const VolumeExtentSlice &slice) { + uint32_t fsId, uint64_t inodeId, const VolumeExtentSlice& slice, + int64_t logIndex) { VLOG(6) << "UpdateInodeExtent, fsId: " << fsId << ", inodeId: " << inodeId << ", slice offset: " << slice.offset(); NameLockGuard guard(inodeLock_, GetInodeLockName(fsId, inodeId)); - return UpdateVolumeExtentSliceLocked(fsId, inodeId, slice); + return UpdateVolumeExtentSliceLocked(fsId, inodeId, slice, logIndex); } MetaStatusCode InodeManager::UpdateVolumeExtent( - uint32_t fsId, - uint64_t inodeId, - const VolumeExtentSliceList &extents) { + uint32_t fsId, uint64_t inodeId, const VolumeExtentSliceList& extents, + int64_t logIndex) { VLOG(6) << "UpdateInodeExtent, fsId: " << fsId << ", inodeId: " << inodeId; NameLockGuard guard(inodeLock_, GetInodeLockName(fsId, inodeId)); MetaStatusCode st = MetaStatusCode::UNKNOWN_ERROR; for (const auto &slice : extents.slices()) { - st = UpdateVolumeExtentSliceLocked(fsId, inodeId, slice); + st = UpdateVolumeExtentSliceLocked(fsId, inodeId, slice, logIndex); if (st != MetaStatusCode::OK) { LOG(ERROR) << "UpdateVolumeExtent failed, err: " << MetaStatusCode_Name(st) << ", fsId: " << fsId @@ -597,10 +671,8 @@ MetaStatusCode InodeManager::UpdateVolumeExtent( } MetaStatusCode InodeManager::GetVolumeExtent( - uint32_t fsId, - uint64_t inodeId, - const std::vector &slices, - VolumeExtentSliceList *extents) { + uint32_t fsId, uint64_t inodeId, const std::vector& slices, + VolumeExtentSliceList* extents) { VLOG(6) << "GetInodeExtent, fsId: " << fsId << ", inodeId: " << inodeId; if (slices.empty()) { @@ -622,5 +694,12 @@ MetaStatusCode InodeManager::GetVolumeExtent( return MetaStatusCode::OK; } +MetaStatusCode InodeManager::UpdateDeallocatableBlockGroup( + const UpdateDeallocatableBlockGroupRequest& request, int64_t logIndex) { + CHECK_APPLIED(); + return inodeStorage_->UpdateDeallocatableBlockGroup( + request.fsid(), request.update(), logIndex); +} + } // namespace metaserver } // namespace curvefs diff --git a/curvefs/src/metaserver/inode_manager.h b/curvefs/src/metaserver/inode_manager.h index e1abbb1627..bd28f65790 100644 --- a/curvefs/src/metaserver/inode_manager.h +++ b/curvefs/src/metaserver/inode_manager.h @@ -26,16 +26,17 @@ #include #include +#include #include #include #include -#include + #include "curvefs/proto/metaserver.pb.h" #include "curvefs/src/metaserver/inode_storage.h" +#include "curvefs/src/metaserver/storage/storage.h" #include "curvefs/src/metaserver/trash.h" #include "src/common/concurrent/name_lock.h" - using ::curve::common::NameLock; using ::curvefs::metaserver::S3ChunkInfoList; @@ -66,19 +67,22 @@ class InodeManager { FileType2InodeNumMap* type2InodeNum) : inodeStorage_(inodeStorage), trash_(trash), - type2InodeNum_(type2InodeNum) {} + type2InodeNum_(type2InodeNum), + appliedIndex_(-1) { + // for compatibility, we initialize applied index to -1 + } - MetaStatusCode CreateInode(uint64_t inodeId, const InodeParam ¶m, - Inode *inode); - MetaStatusCode CreateRootInode(const InodeParam ¶m); + bool Init(); + + MetaStatusCode CreateInode(uint64_t inodeId, const InodeParam& param, + Inode* inode, int64_t logIndex); + MetaStatusCode CreateRootInode(const InodeParam& param, int64_t logIndex); - MetaStatusCode CreateManageInode(const InodeParam ¶m, - ManageInodeType manageType, - Inode *inode); + MetaStatusCode CreateManageInode(const InodeParam& param, + ManageInodeType manageType, Inode* inode, + int64_t logIndex); - MetaStatusCode GetInode(uint32_t fsId, - uint64_t inodeId, - Inode *inode, + MetaStatusCode GetInode(uint32_t fsId, uint64_t inodeId, Inode* inode, bool paddingS3ChunkInfo = false); MetaStatusCode GetInodeAttr(uint32_t fsId, uint64_t inodeId, @@ -86,69 +90,73 @@ class InodeManager { MetaStatusCode GetXAttr(uint32_t fsId, uint64_t inodeId, XAttr *xattr); - MetaStatusCode DeleteInode(uint32_t fsId, uint64_t inodeId); + MetaStatusCode DeleteInode(uint32_t fsId, uint64_t inodeId, + int64_t logIndex); - MetaStatusCode UpdateInode(const UpdateInodeRequest& request); + MetaStatusCode UpdateInode(const UpdateInodeRequest& request, + int64_t logIndex); MetaStatusCode GetOrModifyS3ChunkInfo( - uint32_t fsId, - uint64_t inodeId, - const S3ChunkInfoMap& map2add, - const S3ChunkInfoMap& map2del, - bool returnS3ChunkInfoMap, - std::shared_ptr* iterator4InodeS3Meta); - - MetaStatusCode PaddingInodeS3ChunkInfo(int32_t fsId, - uint64_t inodeId, + uint32_t fsId, uint64_t inodeId, const S3ChunkInfoMap& map2add, + const S3ChunkInfoMap& map2del, bool returnS3ChunkInfoMap, + std::shared_ptr* iterator4InodeS3Meta, int64_t logIndex); + + MetaStatusCode PaddingInodeS3ChunkInfo(int32_t fsId, uint64_t inodeId, S3ChunkInfoMap* m, uint64_t limit = 0); - MetaStatusCode UpdateInodeWhenCreateOrRemoveSubNode( - const Dentry &dentry, - uint64_t now, - uint32_t now_ns, - bool isCreate); + MetaStatusCode UpdateInodeWhenCreateOrRemoveSubNode(const Dentry& dentry, + uint64_t now, + uint32_t now_ns, + bool isCreate, + int64_t logIndex); - MetaStatusCode InsertInode(const Inode &inode); + MetaStatusCode InsertInode(const Inode& inode, int64_t logIndex); bool GetInodeIdList(std::list* inodeIdList); // Update one or more volume extent slice - MetaStatusCode UpdateVolumeExtent(uint32_t fsId, - uint64_t inodeId, - const VolumeExtentSliceList &extents); + MetaStatusCode UpdateVolumeExtent(uint32_t fsId, uint64_t inodeId, + const VolumeExtentSliceList& extents, + int64_t logIndex); // Update only one volume extent slice - MetaStatusCode UpdateVolumeExtentSlice(uint32_t fsId, - uint64_t inodeId, - const VolumeExtentSlice &slice); + MetaStatusCode UpdateVolumeExtentSlice(uint32_t fsId, uint64_t inodeId, + const VolumeExtentSlice& slice, + int64_t logIndex); - MetaStatusCode GetVolumeExtent(uint32_t fsId, - uint64_t inodeId, - const std::vector &slices, - VolumeExtentSliceList *extents); + MetaStatusCode GetVolumeExtent(uint32_t fsId, uint64_t inodeId, + const std::vector& slices, + VolumeExtentSliceList* extents); + + MetaStatusCode UpdateDeallocatableBlockGroup( + const UpdateDeallocatableBlockGroupRequest& request, int64_t logIndex); private: void GenerateInodeInternal(uint64_t inodeId, const InodeParam ¶m, Inode *inode); - bool AppendS3ChunkInfo(uint32_t fsId, - uint64_t inodeId, + bool AppendS3ChunkInfo(uint32_t fsId, uint64_t inodeId, S3ChunkInfoMap added); static std::string GetInodeLockName(uint32_t fsId, uint64_t inodeId) { return std::to_string(fsId) + "_" + std::to_string(inodeId); } + MetaStatusCode UpdateVolumeExtentSliceLocked(uint32_t fsId, + uint64_t inodeId, + const VolumeExtentSlice& slice, + int64_t logIndex); + MetaStatusCode UpdateVolumeExtentSliceLocked( - uint32_t fsId, - uint64_t inodeId, - const VolumeExtentSlice &slice); + std::shared_ptr* txn, uint32_t fsId, + uint64_t inodeId, const VolumeExtentSlice& slice, int64_t logIndex); private: std::shared_ptr inodeStorage_; std::shared_ptr trash_; FileType2InodeNumMap* type2InodeNum_; + int64_t appliedIndex_; NameLock inodeLock_; }; diff --git a/curvefs/src/metaserver/inode_storage.cpp b/curvefs/src/metaserver/inode_storage.cpp index 3dae80bba7..8e5da8a913 100644 --- a/curvefs/src/metaserver/inode_storage.cpp +++ b/curvefs/src/metaserver/inode_storage.cpp @@ -20,41 +20,47 @@ * Author: chenwei */ +#include "curvefs/src/metaserver/inode_storage.h" + #include +#include #include -#include #include -#include -#include #include +#include +#include -#include "src/common/concurrent/rw_lock.h" -#include "src/common/string_util.h" -#include "curvefs/proto/metaserver.pb.h" #include "curvefs/proto/common.pb.h" -#include "curvefs/src/metaserver/storage/status.h" -#include "curvefs/src/metaserver/inode_storage.h" -#include "curvefs/src/metaserver/storage/converter.h" +#include "curvefs/proto/metaserver.pb.h" #include "curvefs/src/metaserver/common/types.h" +#include "curvefs/src/metaserver/storage/converter.h" +#include "curvefs/src/metaserver/storage/status.h" +#include "curvefs/src/metaserver/storage/storage.h" +#include "src/common/concurrent/rw_lock.h" +#include "src/common/string_util.h" namespace curvefs { namespace metaserver { using ::curve::common::ReadLockGuard; -using ::curve::common::WriteLockGuard; using ::curve::common::StringStartWith; -using ::curvefs::metaserver::storage::Status; -using ::curvefs::metaserver::storage::KVStorage; +using ::curve::common::WriteLockGuard; +using ::curvefs::metaserver::storage::Key4DeallocatableBlockGroup; +using ::curvefs::metaserver::storage::Key4InodeAuxInfo; using ::curvefs::metaserver::storage::Key4S3ChunkInfoList; using ::curvefs::metaserver::storage::Key4VolumeExtentSlice; -using ::curvefs::metaserver::storage::Prefix4InodeVolumeExtent; +using ::curvefs::metaserver::storage::KVStorage; +using ::curvefs::metaserver::storage::Prefix4AllDeallocatableBlockGroup; +using ::curvefs::metaserver::storage::Prefix4AllInode; using ::curvefs::metaserver::storage::Prefix4ChunkIndexS3ChunkInfoList; using ::curvefs::metaserver::storage::Prefix4InodeS3ChunkInfoList; -using ::curvefs::metaserver::storage::Prefix4AllInode; -using ::curvefs::metaserver::storage::Key4InodeAuxInfo; -using ::curvefs::metaserver::storage::Key4DeallocatableBlockGroup; -using ::curvefs::metaserver::storage::Prefix4AllDeallocatableBlockGroup; +using ::curvefs::metaserver::storage::Prefix4InodeVolumeExtent; +using ::curvefs::metaserver::storage::Status; + +const char* InodeStorage::kInodeCountKey("count"); + +const char* InodeStorage::kInodeAppliedKey("inode"); InodeStorage::InodeStorage(std::shared_ptr kvStorage, std::shared_ptr nameGenerator, @@ -68,13 +74,70 @@ InodeStorage::InodeStorage(std::shared_ptr kvStorage, nameGenerator->GetDeallocatableInodeTableName()), table4DeallocatableBlockGroup_( nameGenerator->GetDeallocatableBlockGroupTableName()), - nInode_(nInode), conv_() {} + table4AppliedIndex_(nameGenerator->GetAppliedIndexTableName()), + table4InodeCount_(nameGenerator->GetInodeCountTableName()), + nInode_(nInode), + conv_() { + // NOTE: for compatibility with older versions + // we cannot ignore `nInode` argument +} -MetaStatusCode InodeStorage::Insert(const Inode& inode) { +bool InodeStorage::Init() { + // try get inode count for rocksdb + // if we got it, replace old value + auto s = GetInodeCount(&nInode_); + return s.ok() || s.IsNotFound(); +} + +storage::Status InodeStorage::GetInodeCount(std::size_t* count) { + common::ItemCount val; + auto s = kvStorage_->SGet(table4InodeCount_, kInodeCountKey, &val); + if (s.ok()) { + *count = static_cast(val.count()); + } + return s; +} + +storage::Status InodeStorage::SetInodeCount(storage::StorageTransaction* txn, + std::size_t count) { + common::ItemCount val; + val.set_count(count); + return txn->SSet(table4InodeCount_, kInodeCountKey, val); +} + +storage::Status InodeStorage::DelInodeCount(storage::StorageTransaction* txn) { + return txn->SDel(table4InodeCount_, kInodeCountKey); +} + +storage::Status InodeStorage::SetAppliedIndex( + storage::StorageTransaction* transaction, int64_t index) { + common::AppliedIndex val; + val.set_index(index); + return transaction->SSet(table4AppliedIndex_, kInodeAppliedKey, val); +} + +storage::Status InodeStorage::DelAppliedIndex( + storage::StorageTransaction* transaction) { + return transaction->SDel(table4AppliedIndex_, kInodeAppliedKey); +} + +MetaStatusCode InodeStorage::GetAppliedIndex(int64_t* index) { + common::AppliedIndex val; + auto s = kvStorage_->SGet(table4AppliedIndex_, kInodeAppliedKey, &val); + if (s.ok()) { + *index = val.index(); + return MetaStatusCode::OK; + } + if (s.IsNotFound()) { + return MetaStatusCode::NOT_FOUND; + } + return MetaStatusCode::STORAGE_INTERNAL_ERROR; +} + +MetaStatusCode InodeStorage::Insert(const Inode& inode, int64_t logIndex) { WriteLockGuard lg(rwLock_); Key4Inode key(inode.fsid(), inode.inodeid()); std::string skey = conv_.SerializeToString(key); - // NOTE: HGet() is cheap, because the key not found in most cases, // so the rocksdb storage only should check bloom filter. Inode out; @@ -84,14 +147,41 @@ MetaStatusCode InodeStorage::Insert(const Inode& inode) { } else if (!s.IsNotFound()) { return MetaStatusCode::STORAGE_INTERNAL_ERROR; } - // key not found - s = kvStorage_->HSet(table4Inode_, skey, inode); - if (s.ok()) { + const char* step = "Begin transaction"; + std::shared_ptr txn; + do { + txn = kvStorage_->BeginTransaction(); + if (txn == nullptr) { + break; + } + s = txn->HSet(table4Inode_, skey, inode); + if (!s.ok()) { + step = "Insert inode to transaction"; + break; + } + s = SetAppliedIndex(txn.get(), logIndex); + if (!s.ok()) { + step = "Insert applied index to transaction"; + break; + } + s = SetInodeCount(txn.get(), nInode_ + 1); + if (!s.ok()) { + step = "Insert inode count to transaction"; + break; + } + s = txn->Commit(); + if (!s.ok()) { + step = "Insert inode failed"; + break; + } nInode_++; return MetaStatusCode::OK; + } while (false); + LOG(ERROR) << step << " failed, status = " << s.ToString(); + if (txn != nullptr && !txn->Rollback().ok()) { + LOG(ERROR) << "Rollback transaction failed"; } - LOG(ERROR) << "Insert inode failed, status = " << s.ToString(); return MetaStatusCode::STORAGE_INTERNAL_ERROR; } @@ -111,8 +201,7 @@ MetaStatusCode InodeStorage::Get(const Key4Inode& key, Inode* inode) { return MetaStatusCode::STORAGE_INTERNAL_ERROR; } -MetaStatusCode InodeStorage::GetAttr(const Key4Inode& key, - InodeAttr *attr) { +MetaStatusCode InodeStorage::GetAttr(const Key4Inode& key, InodeAttr* attr) { ReadLockGuard lg(rwLock_); Inode inode; std::string skey = conv_.SerializeToString(key); @@ -154,7 +243,7 @@ MetaStatusCode InodeStorage::GetAttr(const Key4Inode& key, return MetaStatusCode::OK; } -MetaStatusCode InodeStorage::GetXAttr(const Key4Inode& key, XAttr *xattr) { +MetaStatusCode InodeStorage::GetXAttr(const Key4Inode& key, XAttr* xattr) { ReadLockGuard lg(rwLock_); Inode inode; std::string skey = conv_.SerializeToString(key); @@ -171,70 +260,163 @@ MetaStatusCode InodeStorage::GetXAttr(const Key4Inode& key, XAttr *xattr) { return MetaStatusCode::OK; } -MetaStatusCode InodeStorage::Delete(const Key4Inode& key) { - WriteLockGuard lg(rwLock_); +// NOTE: if transaction success +// we will commit transaction +// it should be the last step of your operations +storage::Status InodeStorage::DeleteInternal( + storage::StorageTransaction* transaction, const Key4Inode& key) { std::string skey = conv_.SerializeToString(key); - Status s = kvStorage_->HDel(table4Inode_, skey); - if (s.ok()) { + Status s; + const char* step = "Delete inode from transaction"; + do { + s = transaction->HDel(table4Inode_, skey); + if (!s.ok()) { + break; + } // NOTE: for rocksdb storage, it will never check whether // the key exist in delete(), so if the client delete the - // unexist inode in some anbormal cases, it will cause the + // non-exist inode in some abnormal cases, it will cause the + // nInode less then the real value. + if (nInode_ > 0) { + s = SetInodeCount(transaction, nInode_ - 1); + if (!s.ok()) { + step = "Insert inode count to transaction"; + break; + } + } + s = transaction->Commit(); + if (!s.ok()) { + step = "Delete inode"; + break; + } + // NOTE: for rocksdb storage, it will never check whether + // the key exist in delete(), so if the client delete the + // non-exist inode in some abnormal cases, it will cause the // nInode less then the real value. if (nInode_ > 0) { nInode_--; } + return s; + } while (false); + LOG(ERROR) << step << " failed, status = " << s.ToString(); + return s; +} + +MetaStatusCode InodeStorage::Delete(const Key4Inode& key, int64_t logIndex) { + WriteLockGuard lg(rwLock_); + std::shared_ptr txn; + Status s; + const char* step = "Begin transaction"; + do { + txn = kvStorage_->BeginTransaction(); + if (txn == nullptr) { + break; + } + s = SetAppliedIndex(txn.get(), logIndex); + if (!s.ok()) { + step = "Insert applied index to transaction"; + break; + } + s = DeleteInternal(txn.get(), key); + if (!s.ok()) { + step = "Delete inode from transaction"; + break; + } return MetaStatusCode::OK; + } while (false); + LOG(ERROR) << step << " failed, status = " << s.ToString(); + if (txn != nullptr && !txn->Rollback().ok()) { + LOG(ERROR) << "Rollback delete inode transaction failed, status = " + << s.ToString(); } + return MetaStatusCode::STORAGE_INTERNAL_ERROR; +} +MetaStatusCode InodeStorage::ForceDelete(const Key4Inode& key) { + WriteLockGuard lg(rwLock_); + std::shared_ptr txn = nullptr; + Status s; + const char* step = "Begin transaction"; + do { + txn = kvStorage_->BeginTransaction(); + if (txn == nullptr) { + break; + } + s = DeleteInternal(txn.get(), key); + if (!s.ok()) { + step = "Delete inode from transaction"; + break; + } + return MetaStatusCode::OK; + } while (false); + LOG(ERROR) << step << " failed, status = " << s.ToString(); + if (txn != nullptr && !txn->Rollback().ok()) { + LOG(ERROR) << "Rollback delete inode transaction failed, status = " + << s.ToString(); + } return MetaStatusCode::STORAGE_INTERNAL_ERROR; } -MetaStatusCode InodeStorage::Update(const Inode &inode, bool inodeDeallocate) { +MetaStatusCode InodeStorage::Update( + std::shared_ptr* txn, const Inode& inode, + int64_t logIndex, bool inodeDeallocate) { + if (*txn == nullptr) { + *txn = kvStorage_->BeginTransaction(); + if (*txn == nullptr) { + LOG(ERROR) << "Begin transaction failed"; + return MetaStatusCode::STORAGE_INTERNAL_ERROR; + } + } WriteLockGuard lg(rwLock_); Key4Inode key(inode.fsid(), inode.inodeid()); std::string skey = conv_.SerializeToString(key); - - // only update inodes + storage::Status s; + s = SetAppliedIndex(txn->get(), logIndex); + if (!s.ok()) { + LOG(ERROR) << "Insert applied index to transaction failed, status = " + << s.ToString(); + return MetaStatusCode::STORAGE_INTERNAL_ERROR; + } if (!inodeDeallocate) { - Status s = kvStorage_->HSet(table4Inode_, skey, inode); + s = (*txn)->HSet(table4Inode_, skey, inode); if (s.ok()) { return MetaStatusCode::OK; } return MetaStatusCode::STORAGE_INTERNAL_ERROR; } - - // update inode and update deallocatable inode list google::protobuf::Empty value; - auto txn = kvStorage_->BeginTransaction(); - if (nullptr == txn) { - return MetaStatusCode::STORAGE_INTERNAL_ERROR; - } - std::string step = "update inode " + key.SerializeToString(); - - Status s = txn->HSet(table4Inode_, skey, inode); + s = (*txn)->HSet(table4Inode_, skey, inode); if (s.ok()) { - s = txn->HSet(table4DeallocatableInode_, skey, value); + s = (*txn)->HSet(table4DeallocatableInode_, skey, value); step = "add inode " + key.SerializeToString() + " to inode deallocatable list"; } - if (!s.ok()) { LOG(ERROR) << "txn is failed in " << step; - if (!txn->Rollback().ok()) { - LOG(ERROR) << "rollback transaction failed, inode=" - << key.SerializeToString(); - return MetaStatusCode::STORAGE_INTERNAL_ERROR; - } - } else if (!txn->Commit().ok()) { - LOG(ERROR) << "commit transaction failed, inode=" - << key.SerializeToString(); return MetaStatusCode::STORAGE_INTERNAL_ERROR; } - return MetaStatusCode::OK; } +MetaStatusCode InodeStorage::Update(const Inode& inode, int64_t logIndex, + bool inodeDeallocate) { + std::shared_ptr txn; + if (Update(&txn, inode, logIndex, inodeDeallocate) == MetaStatusCode::OK) { + storage::Status s = txn->Commit(); + if (!s.ok()) { + LOG(ERROR) << "Commit update inode transaction failed, status = " + << s.ToString(); + } else { + return MetaStatusCode::OK; + } + } + if (txn != nullptr && !txn->Rollback().ok()) { + LOG(ERROR) << "Rollback transaction failed"; + } + LOG(ERROR) << "Update inode failed"; + return MetaStatusCode::STORAGE_INTERNAL_ERROR; +} std::shared_ptr InodeStorage::GetAllInode() { ReadLockGuard lg(rwLock_); @@ -279,34 +461,75 @@ bool InodeStorage::Empty() { return true; } +// NOTE: we will clear all apply metadata in `Clear()` +// when follower replay logs on this snapshot, it may cause +// repeat apply log entries, and raise some errors +// but we know this partition will be clear at the end of logs MetaStatusCode InodeStorage::Clear() { + // FIXME: non-atomic clear operations + // NOTE: clear operations non-atomic is acceptable + // because if we fail stop, we will replay + // raft logs and clear it again WriteLockGuard lg(rwLock_); + Status s = kvStorage_->HClear(table4Inode_); if (!s.ok()) { - LOG(ERROR) << "InodeStorage clear inode table failed"; + LOG(ERROR) << "InodeStorage clear inode table failed, status = " + << s.ToString(); return MetaStatusCode::STORAGE_INTERNAL_ERROR; } - nInode_ = 0; - s = kvStorage_->SClear(table4S3ChunkInfo_); if (!s.ok()) { - LOG(ERROR) << "InodeStorage clear inode s3chunkinfo table failed"; + LOG(ERROR) + << "InodeStorage clear inode s3chunkinfo table failed, status = " + << s.ToString(); return MetaStatusCode::STORAGE_INTERNAL_ERROR; } - s = kvStorage_->SClear(table4VolumeExtent_); if (!s.ok()) { - LOG(ERROR) << "InodeStorage clear inode volume extent table failed"; + LOG(ERROR) + << "InodeStorage clear inode volume extent table failed, status = " + << s.ToString(); return MetaStatusCode::STORAGE_INTERNAL_ERROR; } s = kvStorage_->HClear(table4InodeAuxInfo_); if (!s.ok()) { - LOG(ERROR) << "InodeStorage clear inode aux info table failed"; + LOG(ERROR) + << "InodeStorage clear inode aux info table failed, status = " + << s.ToString(); return MetaStatusCode::STORAGE_INTERNAL_ERROR; } - - return MetaStatusCode::OK; + std::shared_ptr txn; + const char* step = "Begin transaction"; + do { + txn = kvStorage_->BeginTransaction(); + if (txn == nullptr) { + break; + } + s = DelInodeCount(txn.get()); + if (!s.ok()) { + step = "Delete inode count"; + break; + } + s = DelAppliedIndex(txn.get()); + if (!s.ok()) { + step = "Delete applied index"; + break; + } + s = txn->Commit(); + if (!s.ok()) { + step = "Commit clear InodeStorage transaction"; + break; + } + nInode_ = 0; + return MetaStatusCode::OK; + } while (false); + LOG(ERROR) << step << " failed, status = " << s.ToString(); + if (txn != nullptr && !txn->Rollback().ok()) { + LOG(ERROR) << "Rollback transaction failed"; + } + return MetaStatusCode::STORAGE_INTERNAL_ERROR; } MetaStatusCode InodeStorage::UpdateInodeS3MetaSize(Transaction txn, @@ -365,10 +588,7 @@ uint64_t InodeStorage::GetInodeS3MetaSize(uint32_t fsId, uint64_t inodeId) { } MetaStatusCode InodeStorage::AddS3ChunkInfoList( - Transaction txn, - uint32_t fsId, - uint64_t inodeId, - uint64_t chunkIndex, + Transaction txn, uint32_t fsId, uint64_t inodeId, uint64_t chunkIndex, const S3ChunkInfoList* list2add) { if (nullptr == list2add || list2add->s3chunks_size() == 0) { return MetaStatusCode::OK; @@ -378,8 +598,8 @@ MetaStatusCode InodeStorage::AddS3ChunkInfoList( uint64_t firstChunkId = list2add->s3chunks(0).chunkid(); uint64_t lastChunkId = list2add->s3chunks(size - 1).chunkid(); - Key4S3ChunkInfoList key(fsId, inodeId, chunkIndex, - firstChunkId, lastChunkId, size); + Key4S3ChunkInfoList key(fsId, inodeId, chunkIndex, firstChunkId, + lastChunkId, size); std::string skey = conv_.SerializeToString(key); Status s; if (txn) { @@ -387,15 +607,11 @@ MetaStatusCode InodeStorage::AddS3ChunkInfoList( } else { s = kvStorage_->SSet(table4S3ChunkInfo_, skey, *list2add); } - return s.ok() ? MetaStatusCode::OK : - MetaStatusCode::STORAGE_INTERNAL_ERROR; + return s.ok() ? MetaStatusCode::OK : MetaStatusCode::STORAGE_INTERNAL_ERROR; } MetaStatusCode InodeStorage::DelS3ChunkInfoList( - Transaction txn, - uint32_t fsId, - uint64_t inodeId, - uint64_t chunkIndex, + Transaction txn, uint32_t fsId, uint64_t inodeId, uint64_t chunkIndex, const S3ChunkInfoList* list2del) { if (nullptr == list2del || list2del->s3chunks_size() == 0) { return MetaStatusCode::OK; @@ -429,13 +645,13 @@ MetaStatusCode InodeStorage::DelS3ChunkInfoList( if (delFirstChunkId <= key.firstChunkId && delLastChunkId >= key.lastChunkId) { key2del.push_back(skey); - // current list range: [ ] - // delete list range : [ ] + // current list range: [ ] + // delete list range : [ ] } else if (delLastChunkId < key.firstChunkId) { continue; } else { - LOG(ERROR) << "wrong delete list range (" << delFirstChunkId - << "," << delLastChunkId << "), skey=" << skey; + LOG(ERROR) << "wrong delete list range (" << delFirstChunkId << "," + << delLastChunkId << "), skey=" << skey; return MetaStatusCode::STORAGE_INTERNAL_ERROR; } } @@ -450,22 +666,22 @@ MetaStatusCode InodeStorage::DelS3ChunkInfoList( } MetaStatusCode InodeStorage::ModifyInodeS3ChunkInfoList( - uint32_t fsId, - uint64_t inodeId, - uint64_t chunkIndex, - const S3ChunkInfoList* list2add, - const S3ChunkInfoList* list2del) { + std::shared_ptr* txn, uint32_t fsId, uint64_t inodeId, + uint64_t chunkIndex, const S3ChunkInfoList* list2add, + const S3ChunkInfoList* list2del, int64_t logIndex) { + if (*txn == nullptr) { + *txn = kvStorage_->BeginTransaction(); + if (*txn == nullptr) { + LOG(ERROR) << "Begin transaction failed"; + return MetaStatusCode::STORAGE_INTERNAL_ERROR; + } + } WriteLockGuard lg(rwLock_); - auto txn = kvStorage_->BeginTransaction(); std::string step; - if (nullptr == txn) { - return MetaStatusCode::STORAGE_INTERNAL_ERROR; - } - - auto rc = DelS3ChunkInfoList(txn, fsId, inodeId, chunkIndex, list2del); + auto rc = DelS3ChunkInfoList(*txn, fsId, inodeId, chunkIndex, list2del); step = "del s3 chunkinfo list "; if (rc == MetaStatusCode::OK) { - rc = AddS3ChunkInfoList(txn, fsId, inodeId, chunkIndex, list2add); + rc = AddS3ChunkInfoList(*txn, fsId, inodeId, chunkIndex, list2add); step = "add s3 chunkInfo list "; } @@ -476,16 +692,34 @@ MetaStatusCode InodeStorage::ModifyInodeS3ChunkInfoList( (nullptr == list2del) ? 0 : list2del->s3chunks_size(); // TODO(huyao): I don't think this place is idempotent. If the timeout // is retried, the size will increase. - rc = UpdateInodeS3MetaSize(txn, fsId, inodeId, size4add, size4del); + rc = UpdateInodeS3MetaSize(*txn, fsId, inodeId, size4add, size4del); step = "update inode s3 meta size "; } + if (rc == MetaStatusCode::OK) { + if (!SetAppliedIndex(txn->get(), logIndex).ok()) { + step = "Insert applied index"; + rc = MetaStatusCode::STORAGE_INTERNAL_ERROR; + } + } + if (rc != MetaStatusCode::OK) { + LOG(ERROR) << "Modify inode transaction failed, step = " << step; + } + return rc; +} +MetaStatusCode InodeStorage::ModifyInodeS3ChunkInfoList( + uint32_t fsId, uint64_t inodeId, uint64_t chunkIndex, + const S3ChunkInfoList* list2add, const S3ChunkInfoList* list2del, + int64_t logIndex) { + std::shared_ptr txn; + MetaStatusCode rc = ModifyInodeS3ChunkInfoList( + &txn, fsId, inodeId, chunkIndex, list2add, list2del, logIndex); if (rc != MetaStatusCode::OK) { - LOG(ERROR) << "txn is failed in " << step << "."; - if (!txn->Rollback().ok()) { + if (txn != nullptr && !txn->Rollback().ok()) { LOG(ERROR) << "Rollback transaction failed"; rc = MetaStatusCode::STORAGE_INTERNAL_ERROR; } + LOG(ERROR) << "Modify inode failed"; } else if (!txn->Commit().ok()) { LOG(ERROR) << "Commit transaction failed"; rc = MetaStatusCode::STORAGE_INTERNAL_ERROR; @@ -557,22 +791,58 @@ std::shared_ptr InodeStorage::GetAllVolumeExtentList() { } MetaStatusCode InodeStorage::UpdateVolumeExtentSlice( - uint32_t fsId, - uint64_t inodeId, - const VolumeExtentSlice& slice) { + std::shared_ptr* txn, uint32_t fsId, + uint64_t inodeId, const VolumeExtentSlice& slice, int64_t logIndex) { WriteLockGuard guard(rwLock_); + if (*txn == nullptr) { + *txn = kvStorage_->BeginTransaction(); + if (*txn == nullptr) { + LOG(ERROR) << "Begin transaction failed"; + return MetaStatusCode::STORAGE_INTERNAL_ERROR; + } + } auto key = conv_.SerializeToString( Key4VolumeExtentSlice{fsId, inodeId, slice.offset()}); + auto st = (*txn)->SSet(table4VolumeExtent_, key, slice); + if (!st.ok()) { + LOG(ERROR) + << "Update volume extent slice to transaction failed, status = " + << st.ToString(); + return MetaStatusCode::STORAGE_INTERNAL_ERROR; + } + st = SetAppliedIndex(txn->get(), logIndex); + if (!st.ok()) { + LOG(ERROR) << "Insert applied index to transaction failed, status = " + << st.ToString(); + return MetaStatusCode::STORAGE_INTERNAL_ERROR; + } + return MetaStatusCode::OK; +} - auto st = kvStorage_->SSet(table4VolumeExtent_, key, slice); - - return st.ok() ? MetaStatusCode::OK - : MetaStatusCode::STORAGE_INTERNAL_ERROR; +MetaStatusCode InodeStorage::UpdateVolumeExtentSlice( + uint32_t fsId, uint64_t inodeId, const VolumeExtentSlice& slice, + int64_t logIndex) { + std::shared_ptr txn; + auto rc = UpdateVolumeExtentSlice(&txn, fsId, inodeId, slice, logIndex); + if (rc != MetaStatusCode::OK) { + if (txn != nullptr && !txn->Rollback().ok()) { + LOG(ERROR) << "Rollback transaction failed"; + } + return rc; + } + auto s = txn->Commit(); + if (!s.ok()) { + LOG(ERROR) << "Commit transaction failed, status = " << s.ToString(); + if (!txn->Rollback().ok()) { + LOG(ERROR) << "Rollback transaction failed"; + } + return MetaStatusCode::STORAGE_INTERNAL_ERROR; + } + return MetaStatusCode::OK; } -MetaStatusCode -InodeStorage::GetAllVolumeExtent(uint32_t fsId, uint64_t inodeId, - VolumeExtentSliceList *extents) { +MetaStatusCode InodeStorage::GetAllVolumeExtent( + uint32_t fsId, uint64_t inodeId, VolumeExtentSliceList* extents) { ReadLockGuard guard(rwLock_); auto key = conv_.SerializeToString(Prefix4InodeVolumeExtent{fsId, inodeId}); auto iter = kvStorage_->SSeek(table4VolumeExtent_, key); @@ -622,7 +892,7 @@ MetaStatusCode InodeStorage::GetVolumeExtentByOffset(uint32_t fsId, } MetaStatusCode InodeStorage::GetAllBlockGroup( - std::vector *deallocatableBlockGroupVec) { + std::vector* deallocatableBlockGroupVec) { auto iter = kvStorage_->HGetAll(table4DeallocatableBlockGroup_); if (iter->Status() != 0) { LOG(ERROR) << "InodeStorage failed to get iterator for all " @@ -648,13 +918,13 @@ MetaStatusCode InodeStorage::GetAllBlockGroup( } MetaStatusCode InodeStorage::UpdateDeallocatableBlockGroup( - uint32_t fsId, const DeallocatableBlockGroupVec &update) { + uint32_t fsId, const DeallocatableBlockGroupVec& update, int64_t logIndex) { auto txn = kvStorage_->BeginTransaction(); MetaStatusCode st = MetaStatusCode::OK; std::string step; - for (auto &item : update) { + for (auto& item : update) { Key4DeallocatableBlockGroup key(fsId, item.blockgroupoffset()); std::string skey(key.SerializeToString()); @@ -686,6 +956,15 @@ MetaStatusCode InodeStorage::UpdateDeallocatableBlockGroup( break; } } + if (st == MetaStatusCode::OK) { + auto s = SetAppliedIndex(txn.get(), logIndex); + if (!s.ok()) { + st = MetaStatusCode::STORAGE_INTERNAL_ERROR; + LOG(ERROR) + << "Insert applied index to transaction failed, status = " + << s.ToString(); + } + } if (st != MetaStatusCode::OK) { LOG(ERROR) << "UpdateDeallocatableBlockGroup txn is failed at " << step; @@ -705,10 +984,10 @@ MetaStatusCode InodeStorage::UpdateDeallocatableBlockGroup( return st; } -MetaStatusCode -InodeStorage::Increase(Transaction txn, uint32_t fsId, - const IncreaseDeallocatableBlockGroup &increase, - DeallocatableBlockGroup *out) { +MetaStatusCode InodeStorage::Increase( + Transaction txn, uint32_t fsId, + const IncreaseDeallocatableBlockGroup& increase, + DeallocatableBlockGroup* out) { MetaStatusCode st = MetaStatusCode::OK; // update DeallocatableBlockGroup @@ -721,18 +1000,16 @@ InodeStorage::Increase(Transaction txn, uint32_t fsId, std::set unique_elements(out->inodeidlist().begin(), out->inodeidlist().end()); out->mutable_inodeidlist()->Clear(); - for (auto &elem : unique_elements) { + for (auto& elem : unique_elements) { out->mutable_inodeidlist()->Add(elem); } - VLOG(6) << "InodeStorage handle increase set out=" - << out->DebugString(); + VLOG(6) << "InodeStorage handle increase set out=" << out->DebugString(); // remove related inode in table4DeallocatableInode_ - for (auto &inodeid : increase.inodeidlistadd()) { - auto s = txn->HDel( - table4DeallocatableInode_, - conv_.SerializeToString(Key4Inode{fsId, inodeid})); + for (auto& inodeid : increase.inodeidlistadd()) { + auto s = txn->HDel(table4DeallocatableInode_, + conv_.SerializeToString(Key4Inode{fsId, inodeid})); if (!s.ok()) { st = MetaStatusCode::STORAGE_INTERNAL_ERROR; VLOG(6) << "InodeStorage delete inodeid=" << inodeid << " from " @@ -747,9 +1024,9 @@ InodeStorage::Increase(Transaction txn, uint32_t fsId, return st; } -MetaStatusCode -InodeStorage::Decrease(const DecreaseDeallocatableBlockGroup &decrease, - DeallocatableBlockGroup *out) { +MetaStatusCode InodeStorage::Decrease( + const DecreaseDeallocatableBlockGroup& decrease, + DeallocatableBlockGroup* out) { VLOG(6) << "InodeStorage handle increase=" << decrease.DebugString(); if (!out->IsInitialized() || !out->has_deallocatablesize()) { LOG(ERROR) @@ -786,8 +1063,8 @@ InodeStorage::Decrease(const DecreaseDeallocatableBlockGroup &decrease, return MetaStatusCode::OK; } -MetaStatusCode InodeStorage::Mark(const MarkDeallocatableBlockGroup &mark, - DeallocatableBlockGroup *out) { +MetaStatusCode InodeStorage::Mark(const MarkDeallocatableBlockGroup& mark, + DeallocatableBlockGroup* out) { MetaStatusCode st = MetaStatusCode::OK; VLOG(6) << "InodeStorage handle mark=" << mark.DebugString(); diff --git a/curvefs/src/metaserver/inode_storage.h b/curvefs/src/metaserver/inode_storage.h index a92c6b3650..38ad3c5f56 100644 --- a/curvefs/src/metaserver/inode_storage.h +++ b/curvefs/src/metaserver/inode_storage.h @@ -23,33 +23,36 @@ #ifndef CURVEFS_SRC_METASERVER_INODE_STORAGE_H_ #define CURVEFS_SRC_METASERVER_INODE_STORAGE_H_ +#include +#include +#include #include -#include #include -#include -#include +#include #include #include +#include #include -#include "absl/container/btree_set.h" #include "absl/container/btree_map.h" -#include "src/common/concurrent/rw_lock.h" +#include "absl/container/btree_set.h" #include "curvefs/proto/metaserver.pb.h" #include "curvefs/src/metaserver/storage/converter.h" -#include "curvefs/src/metaserver/storage/utils.h" +#include "curvefs/src/metaserver/storage/status.h" #include "curvefs/src/metaserver/storage/storage.h" +#include "curvefs/src/metaserver/storage/utils.h" +#include "src/common/concurrent/rw_lock.h" namespace curvefs { namespace metaserver { using ::curve::common::RWLock; +using ::curvefs::metaserver::storage::Converter; using ::curvefs::metaserver::storage::Iterator; -using ::curvefs::metaserver::storage::KVStorage; -using ::curvefs::metaserver::storage::StorageTransaction; using ::curvefs::metaserver::storage::Key4Inode; -using ::curvefs::metaserver::storage::Converter; +using ::curvefs::metaserver::storage::KVStorage; using ::curvefs::metaserver::storage::NameGenerator; +using ::curvefs::metaserver::storage::StorageTransaction; using S3ChunkInfoMap = google::protobuf::Map; using DeallocatableBlockGroupVec = @@ -59,15 +62,19 @@ using Transaction = std::shared_ptr; class InodeStorage { public: InodeStorage(std::shared_ptr kvStorage, - std::shared_ptr nameGenerator, - uint64_t nInode); + std::shared_ptr nameGenerator, uint64_t nInode); + + MetaStatusCode GetAppliedIndex(int64_t* index); + + bool Init(); /** * @brief insert inode to storage * @param[in] inode: the inode want to insert + * @param[in] logIndex: the index of raft log * @return If inode exist, return INODE_EXIST; else insert and return OK */ - MetaStatusCode Insert(const Inode& inode); + MetaStatusCode Insert(const Inode& inode, int64_t logIndex); /** * @brief get inode from storage @@ -83,7 +90,7 @@ class InodeStorage { * @param[out] attr: the inode attribute got * @return If inode not exist, return NOT_FOUND; else return OK */ - MetaStatusCode GetAttr(const Key4Inode& key, InodeAttr *attr); + MetaStatusCode GetAttr(const Key4Inode& key, InodeAttr* attr); /** * @brief get inode extended attributes from storage @@ -91,14 +98,17 @@ class InodeStorage { * @param[out] attr: the inode extended attribute got * @return If inode not exist, return NOT_FOUND; else return OK */ - MetaStatusCode GetXAttr(const Key4Inode& key, XAttr *xattr); + MetaStatusCode GetXAttr(const Key4Inode& key, XAttr* xattr); /** * @brief delete inode from storage * @param[in] key: the key of inode want to delete + * @param[in] logIndex: the index of raft log * @return If inode not exist, return NOT_FOUND; else return OK */ - MetaStatusCode Delete(const Key4Inode& key); + MetaStatusCode Delete(const Key4Inode& key, int64_t logIndex); + + MetaStatusCode ForceDelete(const Key4Inode& key); /** * @brief update inode from storage @@ -106,7 +116,12 @@ class InodeStorage { * @param[in] inodeDeallocate: Whether the inode needs to deallocate space * @return If inode not exist, return NOT_FOUND; else replace and return OK */ - MetaStatusCode Update(const Inode& inode, bool inodeDeallocate = false); + MetaStatusCode Update(const Inode& inode, int64_t logIndex, + bool inodeDeallocate = false); + + MetaStatusCode Update(std::shared_ptr* txn, + const Inode& inode, int64_t logIndex, + bool inodeDeallocate = false); std::shared_ptr GetAllInode(); @@ -122,14 +137,18 @@ class InodeStorage { MetaStatusCode Clear(); // s3chunkinfo - MetaStatusCode ModifyInodeS3ChunkInfoList(uint32_t fsId, - uint64_t inodeId, + MetaStatusCode ModifyInodeS3ChunkInfoList(uint32_t fsId, uint64_t inodeId, uint64_t chunkIndex, const S3ChunkInfoList* list2add, - const S3ChunkInfoList* list2del); + const S3ChunkInfoList* list2del, + int64_t logIndex); - MetaStatusCode PaddingInodeS3ChunkInfo(int32_t fsId, - uint64_t inodeId, + MetaStatusCode ModifyInodeS3ChunkInfoList( + std::shared_ptr* txn, uint32_t fsId, + uint64_t inodeId, uint64_t chunkIndex, const S3ChunkInfoList* list2add, + const S3ChunkInfoList* list2del, int64_t logIndex); + + MetaStatusCode PaddingInodeS3ChunkInfo(int32_t fsId, uint64_t inodeId, S3ChunkInfoMap* m, uint64_t limit = 0); @@ -141,30 +160,32 @@ class InodeStorage { // volume extent std::shared_ptr GetAllVolumeExtentList(); - MetaStatusCode UpdateVolumeExtentSlice(uint32_t fsId, - uint64_t inodeId, - const VolumeExtentSlice& slice); + MetaStatusCode UpdateVolumeExtentSlice(uint32_t fsId, uint64_t inodeId, + const VolumeExtentSlice& slice, + int64_t logIndex); + + MetaStatusCode UpdateVolumeExtentSlice( + std::shared_ptr* txn, uint32_t fsId, + uint64_t inodeId, const VolumeExtentSlice& slice, int64_t logIndex); - MetaStatusCode GetAllVolumeExtent(uint32_t fsId, - uint64_t inodeId, + MetaStatusCode GetAllVolumeExtent(uint32_t fsId, uint64_t inodeId, VolumeExtentSliceList* extents); std::shared_ptr GetAllVolumeExtent(uint32_t fsId, uint64_t inodeId); - MetaStatusCode GetVolumeExtentByOffset(uint32_t fsId, - uint64_t inodeId, + MetaStatusCode GetVolumeExtentByOffset(uint32_t fsId, uint64_t inodeId, uint64_t offset, VolumeExtentSlice* slice); // use the transaction to delete {inodes} in the deallocatable_inode_list // and update the statistics of each item of blockgroup_list - MetaStatusCode - UpdateDeallocatableBlockGroup(uint32_t fsId, - const DeallocatableBlockGroupVec &update); + MetaStatusCode UpdateDeallocatableBlockGroup( + uint32_t fsId, const DeallocatableBlockGroupVec& update, + int64_t logIndex); MetaStatusCode GetAllBlockGroup( - std::vector *deallocatableBlockGroupVec); + std::vector* deallocatableBlockGroupVec); private: MetaStatusCode UpdateInodeS3MetaSize(Transaction txn, uint32_t fsId, @@ -173,25 +194,41 @@ class InodeStorage { uint64_t GetInodeS3MetaSize(uint32_t fsId, uint64_t inodeId); - MetaStatusCode DelS3ChunkInfoList(Transaction txn, - uint32_t fsId, uint64_t inodeId, - uint64_t chunkIndex, - const S3ChunkInfoList *list2del); + MetaStatusCode DelS3ChunkInfoList(Transaction txn, uint32_t fsId, + uint64_t inodeId, uint64_t chunkIndex, + const S3ChunkInfoList* list2del); - MetaStatusCode AddS3ChunkInfoList(Transaction txn, - uint32_t fsId, uint64_t inodeId, - uint64_t chunkIndex, - const S3ChunkInfoList *list2add); + MetaStatusCode AddS3ChunkInfoList(Transaction txn, uint32_t fsId, + uint64_t inodeId, uint64_t chunkIndex, + const S3ChunkInfoList* list2add); MetaStatusCode Increase(Transaction txn, uint32_t fsId, - const IncreaseDeallocatableBlockGroup &increase, - DeallocatableBlockGroup *out); + const IncreaseDeallocatableBlockGroup& increase, + DeallocatableBlockGroup* out); - MetaStatusCode Decrease(const DecreaseDeallocatableBlockGroup &decrease, - DeallocatableBlockGroup *out); + MetaStatusCode Decrease(const DecreaseDeallocatableBlockGroup& decrease, + DeallocatableBlockGroup* out); - MetaStatusCode Mark(const MarkDeallocatableBlockGroup &mark, - DeallocatableBlockGroup *out); + MetaStatusCode Mark(const MarkDeallocatableBlockGroup& mark, + DeallocatableBlockGroup* out); + + storage::Status SetAppliedIndex(storage::StorageTransaction* transaction, + int64_t index); + + storage::Status DelAppliedIndex(storage::StorageTransaction* transaction); + + storage::Status GetInodeCount(std::size_t* count); + + storage::Status SetInodeCount(storage::StorageTransaction* transaction, + std::size_t count); + + storage::Status DelInodeCount(storage::StorageTransaction* transaction); + + // NOTE: if transaction success + // we will commit transaction + // it should be the last step of your operations + storage::Status DeleteInternal(storage::StorageTransaction* transaction, + const Key4Inode& key); private: // FIXME: please remove this lock, because we has locked each inode @@ -205,9 +242,14 @@ class InodeStorage { std::string table4InodeAuxInfo_; std::string table4DeallocatableBlockGroup_; std::string table4DeallocatableInode_; + std::string table4AppliedIndex_; + std::string table4InodeCount_; size_t nInode_; Converter conv_; + + static const char* kInodeCountKey; + static const char* kInodeAppliedKey; }; } // namespace metaserver diff --git a/curvefs/src/metaserver/metastore.cpp b/curvefs/src/metaserver/metastore.cpp index 2ba443f60e..875f71cffc 100644 --- a/curvefs/src/metaserver/metastore.cpp +++ b/curvefs/src/metaserver/metastore.cpp @@ -103,6 +103,22 @@ bool MetaStoreImpl::Load(const std::string &pathname) { return false; } + succ = kvStorage_->Recover(pathname); + if (!succ) { + LOG(ERROR) << "Failed to recover storage"; + return false; + } + + // NOTE: for compatibility, we cann't load pending tx + // when construct partition, it must be loaded in here + // `Init()` load dentry and inode metadata also + for (auto it = partitionMap_.begin(); it != partitionMap_.end(); it++) { + if (!it->second->Init()) { + LOG(ERROR) << "Init partition " << it->first << " failed"; + return false; + } + } + for (auto it = partitionMap_.begin(); it != partitionMap_.end(); it++) { uint32_t partitionId = it->second->GetPartitionId(); if (it->second->GetStatus() == PartitionStatus::DELETING) { @@ -130,12 +146,6 @@ bool MetaStoreImpl::Load(const std::string &pathname) { return true; } - succ = kvStorage_->Recover(pathname); - if (!succ) { - LOG(ERROR) << "Failed to recover storage"; - return false; - } - startCompacts(); return true; } @@ -158,45 +168,38 @@ void MetaStoreImpl::SaveBackground(const std::string &path, done->Run(); } -bool MetaStoreImpl::Save(const std::string &dir, - OnSnapshotSaveDoneClosure *done) { - brpc::ClosureGuard doneGuard(done); +bool MetaStoreImpl::SaveMeta(const std::string& dir, + std::vector* files) { + // NOTE: we will keep meta fstream consistent with + // snapshot metadata, so we should hold the locks + // during `fstream.Save()` WriteLockGuard writeLockGuard(rwLock_); - MetaStoreFStream fstream(&partitionMap_, kvStorage_, copysetNode_->GetPoolId(), copysetNode_->GetCopysetId()); const std::string metadata = dir + "/" + kMetaDataFilename; - bool succ = fstream.Save(metadata); - if (!succ) { - done->SetError(MetaStatusCode::SAVE_META_FAIL); - return false; + if (fstream.Save(metadata)) { + files->push_back(kMetaDataFilename); + return true; } + return false; +} - // checkpoint storage +bool MetaStoreImpl::SaveData(const std::string& dir, + std::vector* files) { butil::Timer timer; timer.start(); - std::vector files; - succ = kvStorage_->Checkpoint(dir, &files); + std::vector tmp; + bool succ = kvStorage_->Checkpoint(dir, &tmp); if (!succ) { - done->SetError(MetaStatusCode::SAVE_META_FAIL); return false; } - + for (auto& file : tmp) { + files->push_back(std::move(file)); + } timer.stop(); g_storage_checkpoint_latency << timer.u_elapsed(); - - // add files to snapshot writer - // file is a relative path under the given directory - auto *writer = done->GetSnapshotWriter(); - writer->add_file(kMetaDataFilename); - - for (const auto &f : files) { - writer->add_file(f); - } - - done->SetSuccess(); return true; } @@ -238,9 +241,11 @@ bool MetaStoreImpl::Destroy() { return true; } -MetaStatusCode -MetaStoreImpl::CreatePartition(const CreatePartitionRequest *request, - CreatePartitionResponse *response) { +MetaStatusCode MetaStoreImpl::CreatePartition( + const CreatePartitionRequest* request, CreatePartitionResponse* response, + int64_t logIndex) { + // NOTE: we doesn't filter in-memory data changes + (void)logIndex; WriteLockGuard writeLockGuard(rwLock_); MetaStatusCode status; const auto &partition = request->partition(); @@ -258,9 +263,11 @@ MetaStoreImpl::CreatePartition(const CreatePartitionRequest *request, return MetaStatusCode::OK; } -MetaStatusCode -MetaStoreImpl::DeletePartition(const DeletePartitionRequest *request, - DeletePartitionResponse *response) { +MetaStatusCode MetaStoreImpl::DeletePartition( + const DeletePartitionRequest* request, DeletePartitionResponse* response, + int64_t logIndex) { + // NOTE: we doesn't filter in-memory data changes + (void)logIndex; WriteLockGuard writeLockGuard(rwLock_); uint32_t partitionId = request->partitionid(); auto it = partitionMap_.find(partitionId); @@ -353,8 +360,9 @@ std::shared_ptr MetaStoreImpl::GetStreamServer() { // dentry -MetaStatusCode MetaStoreImpl::CreateDentry(const CreateDentryRequest *request, - CreateDentryResponse *response) { +MetaStatusCode MetaStoreImpl::CreateDentry(const CreateDentryRequest* request, + CreateDentryResponse* response, + int64_t logIndex) { ReadLockGuard readLockGuard(rwLock_); std::shared_ptr partition; GET_PARTITION_OR_RETURN(partition); @@ -368,13 +376,16 @@ MetaStatusCode MetaStoreImpl::CreateDentry(const CreateDentryRequest *request, tm.set_sec(now); tm.set_nsec(now_ns); MetaStatusCode status = - partition->CreateDentry(request->dentry(), tm); + partition->CreateDentry(request->dentry(), tm, logIndex); response->set_statuscode(status); return status; } -MetaStatusCode MetaStoreImpl::GetDentry(const GetDentryRequest *request, - GetDentryResponse *response) { +MetaStatusCode MetaStoreImpl::GetDentry(const GetDentryRequest* request, + GetDentryResponse* response, + int64_t logIndex) { + // NOTE: filter read-only request is unnecessary + (void)logIndex; uint32_t fsId = request->fsid(); uint64_t parentInodeId = request->parentinodeid(); const auto &name = request->name(); @@ -398,8 +409,9 @@ MetaStatusCode MetaStoreImpl::GetDentry(const GetDentryRequest *request, return rc; } -MetaStatusCode MetaStoreImpl::DeleteDentry(const DeleteDentryRequest *request, - DeleteDentryResponse *response) { +MetaStatusCode MetaStoreImpl::DeleteDentry(const DeleteDentryRequest* request, + DeleteDentryResponse* response, + int64_t logIndex) { uint32_t fsId = request->fsid(); uint64_t parentInodeId = request->parentinodeid(); std::string name = request->name(); @@ -416,13 +428,16 @@ MetaStatusCode MetaStoreImpl::DeleteDentry(const DeleteDentryRequest *request, dentry.set_txid(txId); dentry.set_type(request->type()); - auto rc = partition->DeleteDentry(dentry); + auto rc = partition->DeleteDentry(dentry, logIndex); response->set_statuscode(rc); return rc; } -MetaStatusCode MetaStoreImpl::ListDentry(const ListDentryRequest *request, - ListDentryResponse *response) { +MetaStatusCode MetaStoreImpl::ListDentry(const ListDentryRequest* request, + ListDentryResponse* response, + int64_t logIndex) { + // NOTE: filter read-only request is unnecessary + (void)logIndex; uint32_t fsId = request->fsid(); uint64_t parentInodeId = request->dirinodeid(); auto txId = request->txid(); @@ -454,9 +469,9 @@ MetaStatusCode MetaStoreImpl::ListDentry(const ListDentryRequest *request, return rc; } -MetaStatusCode -MetaStoreImpl::PrepareRenameTx(const PrepareRenameTxRequest *request, - PrepareRenameTxResponse *response) { +MetaStatusCode MetaStoreImpl::PrepareRenameTx( + const PrepareRenameTxRequest* request, PrepareRenameTxResponse* response, + int64_t logIndex) { ReadLockGuard readLockGuard(rwLock_); MetaStatusCode rc; std::shared_ptr partition; @@ -464,14 +479,18 @@ MetaStoreImpl::PrepareRenameTx(const PrepareRenameTxRequest *request, std::vector dentrys{request->dentrys().begin(), request->dentrys().end()}; - rc = partition->HandleRenameTx(dentrys); + rc = partition->HandleRenameTx(dentrys, logIndex); + if (rc == MetaStatusCode::IDEMPOTENCE_OK) { + rc = MetaStatusCode::OK; + } response->set_statuscode(rc); return rc; } // inode -MetaStatusCode MetaStoreImpl::CreateInode(const CreateInodeRequest *request, - CreateInodeResponse *response) { +MetaStatusCode MetaStoreImpl::CreateInode(const CreateInodeRequest* request, + CreateInodeResponse* response, + int64_t logIndex) { InodeParam param; param.fsId = request->fsid(); param.length = request->length(); @@ -506,7 +525,7 @@ MetaStatusCode MetaStoreImpl::CreateInode(const CreateInodeRequest *request, GET_PARTITION_OR_RETURN(partition); MetaStatusCode status = - partition->CreateInode(param, response->mutable_inode()); + partition->CreateInode(param, response->mutable_inode(), logIndex); response->set_statuscode(status); if (status != MetaStatusCode::OK) { response->clear_inode(); @@ -514,9 +533,9 @@ MetaStatusCode MetaStoreImpl::CreateInode(const CreateInodeRequest *request, return status; } -MetaStatusCode -MetaStoreImpl::CreateRootInode(const CreateRootInodeRequest *request, - CreateRootInodeResponse *response) { +MetaStatusCode MetaStoreImpl::CreateRootInode( + const CreateRootInodeRequest* request, CreateRootInodeResponse* response, + int64_t logIndex) { InodeParam param; param.fsId = request->fsid(); param.uid = request->uid(); @@ -536,7 +555,7 @@ MetaStoreImpl::CreateRootInode(const CreateRootInodeRequest *request, std::shared_ptr partition; GET_PARTITION_OR_RETURN(partition); - MetaStatusCode status = partition->CreateRootInode(param); + MetaStatusCode status = partition->CreateRootInode(param, logIndex); response->set_statuscode(status); if (status != MetaStatusCode::OK) { LOG(ERROR) << "CreateRootInode fail, fsId = " << param.fsId @@ -547,9 +566,9 @@ MetaStoreImpl::CreateRootInode(const CreateRootInodeRequest *request, return status; } -MetaStatusCode -MetaStoreImpl::CreateManageInode(const CreateManageInodeRequest *request, - CreateManageInodeResponse *response) { +MetaStatusCode MetaStoreImpl::CreateManageInode( + const CreateManageInodeRequest* request, + CreateManageInodeResponse* response, int64_t logIndex) { InodeParam param; param.fsId = request->fsid(); param.uid = request->uid(); @@ -572,7 +591,7 @@ MetaStoreImpl::CreateManageInode(const CreateManageInodeRequest *request, GET_PARTITION_OR_RETURN(partition); MetaStatusCode status = partition->CreateManageInode( - param, request->managetype(), response->mutable_inode()); + param, request->managetype(), response->mutable_inode(), logIndex); response->set_statuscode(status); if (status != MetaStatusCode::OK) { LOG(ERROR) << "CreateManageInode fail, fsId = " << param.fsId @@ -595,8 +614,11 @@ MetaStoreImpl::CreateManageInode(const CreateManageInodeRequest *request, return MetaStatusCode::OK; } -MetaStatusCode MetaStoreImpl::GetInode(const GetInodeRequest *request, - GetInodeResponse *response) { +MetaStatusCode MetaStoreImpl::GetInode(const GetInodeRequest* request, + GetInodeResponse* response, + int64_t logIndex) { + // NOTE: filter read-only request is unnecessary + (void)logIndex; uint32_t fsId = request->fsid(); uint64_t inodeId = request->inodeid(); @@ -629,9 +651,11 @@ MetaStatusCode MetaStoreImpl::GetInode(const GetInodeRequest *request, return rc; } -MetaStatusCode -MetaStoreImpl::BatchGetInodeAttr(const BatchGetInodeAttrRequest *request, - BatchGetInodeAttrResponse *response) { +MetaStatusCode MetaStoreImpl::BatchGetInodeAttr( + const BatchGetInodeAttrRequest* request, + BatchGetInodeAttrResponse* response, int64_t logIndex) { + // NOTE: filter read-only request is unnecessary + (void)logIndex; ReadLockGuard readLockGuard(rwLock_); std::shared_ptr partition; GET_PARTITION_OR_RETURN(partition); @@ -651,8 +675,11 @@ MetaStoreImpl::BatchGetInodeAttr(const BatchGetInodeAttrRequest *request, return status; } -MetaStatusCode MetaStoreImpl::BatchGetXAttr(const BatchGetXAttrRequest *request, - BatchGetXAttrResponse *response) { +MetaStatusCode MetaStoreImpl::BatchGetXAttr(const BatchGetXAttrRequest* request, + BatchGetXAttrResponse* response, + int64_t logIndex) { + // NOTE: filter read-only request is unnecessary + (void)logIndex; ReadLockGuard readLockGuard(rwLock_); std::shared_ptr partition; GET_PARTITION_OR_RETURN(partition); @@ -672,8 +699,9 @@ MetaStatusCode MetaStoreImpl::BatchGetXAttr(const BatchGetXAttrRequest *request, return status; } -MetaStatusCode MetaStoreImpl::DeleteInode(const DeleteInodeRequest *request, - DeleteInodeResponse *response) { +MetaStatusCode MetaStoreImpl::DeleteInode(const DeleteInodeRequest* request, + DeleteInodeResponse* response, + int64_t logIndex) { uint32_t fsId = request->fsid(); uint64_t inodeId = request->inodeid(); @@ -681,27 +709,28 @@ MetaStatusCode MetaStoreImpl::DeleteInode(const DeleteInodeRequest *request, std::shared_ptr partition; GET_PARTITION_OR_RETURN(partition); - MetaStatusCode status = partition->DeleteInode(fsId, inodeId); + MetaStatusCode status = partition->DeleteInode(fsId, inodeId, logIndex); response->set_statuscode(status); return status; } -MetaStatusCode MetaStoreImpl::UpdateInode(const UpdateInodeRequest *request, - UpdateInodeResponse *response) { +MetaStatusCode MetaStoreImpl::UpdateInode(const UpdateInodeRequest* request, + UpdateInodeResponse* response, + int64_t logIndex) { ReadLockGuard readLockGuard(rwLock_); VLOG(9) << "UpdateInode inode " << request->inodeid(); std::shared_ptr partition; GET_PARTITION_OR_RETURN(partition); - MetaStatusCode status = partition->UpdateInode(*request); + MetaStatusCode status = partition->UpdateInode(*request, logIndex); response->set_statuscode(status); return status; } MetaStatusCode MetaStoreImpl::GetOrModifyS3ChunkInfo( - const GetOrModifyS3ChunkInfoRequest *request, - GetOrModifyS3ChunkInfoResponse *response, - std::shared_ptr *iterator) { + const GetOrModifyS3ChunkInfoRequest* request, + GetOrModifyS3ChunkInfoResponse* response, + std::shared_ptr* iterator, int64_t logIndex) { MetaStatusCode rc; ReadLockGuard readLockGuard(rwLock_); std::shared_ptr partition; @@ -711,7 +740,7 @@ MetaStatusCode MetaStoreImpl::GetOrModifyS3ChunkInfo( uint64_t inodeId = request->inodeid(); rc = partition->GetOrModifyS3ChunkInfo( fsId, inodeId, request->s3chunkinfoadd(), request->s3chunkinforemove(), - request->returns3chunkinfomap(), iterator); + request->returns3chunkinfomap(), iterator, logIndex); if (rc == MetaStatusCode::OK && !request->supportstreaming() && request->returns3chunkinfomap()) { rc = partition->PaddingInodeS3ChunkInfo( @@ -768,9 +797,11 @@ std::shared_ptr MetaStoreImpl::GetPartition(uint32_t partitionId) { return nullptr; } -MetaStatusCode -MetaStoreImpl::GetVolumeExtent(const GetVolumeExtentRequest *request, - GetVolumeExtentResponse *response) { +MetaStatusCode MetaStoreImpl::GetVolumeExtent( + const GetVolumeExtentRequest* request, GetVolumeExtentResponse* response, + int64_t logIndex) { + // NOTE: filter read-only request is unnecessary + (void)logIndex; ReadLockGuard guard(rwLock_); std::shared_ptr partition; GET_PARTITION_OR_RETURN(partition); @@ -787,9 +818,9 @@ MetaStoreImpl::GetVolumeExtent(const GetVolumeExtentRequest *request, return st; } -MetaStatusCode -MetaStoreImpl::UpdateVolumeExtent(const UpdateVolumeExtentRequest *request, - UpdateVolumeExtentResponse *response) { +MetaStatusCode MetaStoreImpl::UpdateVolumeExtent( + const UpdateVolumeExtentRequest* request, + UpdateVolumeExtentResponse* response, int64_t logIndex) { ReadLockGuard guard(rwLock_); std::shared_ptr partition; GET_PARTITION_OR_RETURN(partition); @@ -797,14 +828,14 @@ MetaStoreImpl::UpdateVolumeExtent(const UpdateVolumeExtentRequest *request, VLOG(9) << "UpdateVolumeExtent, request: " << request->ShortDebugString(); auto st = partition->UpdateVolumeExtent(request->fsid(), request->inodeid(), - request->extents()); + request->extents(), logIndex); response->set_statuscode(st); return st; } MetaStatusCode MetaStoreImpl::UpdateDeallocatableBlockGroup( - const UpdateDeallocatableBlockGroupRequest *request, - UpdateDeallocatableBlockGroupResponse *response) { + const UpdateDeallocatableBlockGroupRequest* request, + UpdateDeallocatableBlockGroupResponse* response, int64_t logIndex) { ReadLockGuard guard(rwLock_); std::shared_ptr partition; GET_PARTITION_OR_RETURN(partition); @@ -812,7 +843,7 @@ MetaStatusCode MetaStoreImpl::UpdateDeallocatableBlockGroup( VLOG(9) << "UpdateDeallocatableBlockGroup, request: " << request->ShortDebugString(); - auto st = partition->UpdateDeallocatableBlockGroup(*request); + auto st = partition->UpdateDeallocatableBlockGroup(*request, logIndex); response->set_statuscode(st); return st; diff --git a/curvefs/src/metaserver/metastore.h b/curvefs/src/metaserver/metastore.h index 0c8061d2f5..a13c0a4980 100644 --- a/curvefs/src/metaserver/metastore.h +++ b/curvefs/src/metaserver/metastore.h @@ -29,6 +29,7 @@ #include #include #include +#include #include "curvefs/proto/metaserver.pb.h" #include "curvefs/src/common/rpc_stream.h" @@ -101,24 +102,29 @@ using ::curvefs::metaserver::storage::StorageOptions; // (table1 table2 table3) // | | | // dentry s3chunkinfo volumnextent - +// +// NOTE: we need to add a `logIndex` argument to +// each function, than we can filter the log entry +// already applied during `on_snapshot_save` class MetaStore { public: MetaStore() = default; virtual ~MetaStore() = default; virtual bool Load(const std::string& pathname) = 0; - virtual bool Save(const std::string& dir, - OnSnapshotSaveDoneClosure* done) = 0; + virtual bool SaveMeta(const std::string& dir, + std::vector* files) = 0; + virtual bool SaveData(const std::string& dir, + std::vector* files) = 0; virtual bool Clear() = 0; virtual bool Destroy() = 0; virtual MetaStatusCode CreatePartition( const CreatePartitionRequest* request, - CreatePartitionResponse* response) = 0; + CreatePartitionResponse* response, int64_t logIndex) = 0; virtual MetaStatusCode DeletePartition( const DeletePartitionRequest* request, - DeletePartitionResponse* response) = 0; + DeletePartitionResponse* response, int64_t logIndex) = 0; virtual bool GetPartitionInfoList( std::list *partitionInfoList) = 0; @@ -130,53 +136,62 @@ class MetaStore { // dentry virtual MetaStatusCode CreateDentry(const CreateDentryRequest* request, - CreateDentryResponse* response) = 0; + CreateDentryResponse* response, + int64_t logIndex) = 0; virtual MetaStatusCode GetDentry(const GetDentryRequest* request, - GetDentryResponse* response) = 0; + GetDentryResponse* response, + int64_t logIndex) = 0; virtual MetaStatusCode DeleteDentry(const DeleteDentryRequest* request, - DeleteDentryResponse* response) = 0; + DeleteDentryResponse* response, + int64_t logIndex) = 0; virtual MetaStatusCode ListDentry(const ListDentryRequest* request, - ListDentryResponse* response) = 0; + ListDentryResponse* response, + int64_t logIndex) = 0; virtual MetaStatusCode PrepareRenameTx( const PrepareRenameTxRequest* request, - PrepareRenameTxResponse* response) = 0; + PrepareRenameTxResponse* response, int64_t logIndex) = 0; // inode virtual MetaStatusCode CreateInode(const CreateInodeRequest* request, - CreateInodeResponse* response) = 0; + CreateInodeResponse* response, + int64_t logIndex) = 0; virtual MetaStatusCode CreateRootInode( const CreateRootInodeRequest* request, - CreateRootInodeResponse* response) = 0; + CreateRootInodeResponse* response, int64_t logIndex) = 0; virtual MetaStatusCode CreateManageInode( - const CreateManageInodeRequest* request, - CreateManageInodeResponse* response) = 0; + const CreateManageInodeRequest* request, + CreateManageInodeResponse* response, int64_t logIndex) = 0; virtual MetaStatusCode GetInode(const GetInodeRequest* request, - GetInodeResponse* response) = 0; + GetInodeResponse* response, + int64_t logIndex) = 0; virtual MetaStatusCode BatchGetInodeAttr( - const BatchGetInodeAttrRequest* request, - BatchGetInodeAttrResponse* response) = 0; + const BatchGetInodeAttrRequest* request, + BatchGetInodeAttrResponse* response, int64_t logIndex) = 0; virtual MetaStatusCode BatchGetXAttr(const BatchGetXAttrRequest* request, - BatchGetXAttrResponse* response) = 0; + BatchGetXAttrResponse* response, + int64_t logIndex) = 0; virtual MetaStatusCode DeleteInode(const DeleteInodeRequest* request, - DeleteInodeResponse* response) = 0; + DeleteInodeResponse* response, + int64_t logIndex) = 0; virtual MetaStatusCode UpdateInode(const UpdateInodeRequest* request, - UpdateInodeResponse* response) = 0; + UpdateInodeResponse* response, + int64_t logIndex) = 0; virtual MetaStatusCode GetOrModifyS3ChunkInfo( const GetOrModifyS3ChunkInfoRequest* request, GetOrModifyS3ChunkInfoResponse* response, - std::shared_ptr* iterator) = 0; + std::shared_ptr* iterator, int64_t logIndex) = 0; virtual MetaStatusCode SendS3ChunkInfoByStream( std::shared_ptr connection, @@ -184,15 +199,15 @@ class MetaStore { virtual MetaStatusCode GetVolumeExtent( const GetVolumeExtentRequest* request, - GetVolumeExtentResponse* response) = 0; + GetVolumeExtentResponse* response, int64_t logIndex) = 0; virtual MetaStatusCode UpdateVolumeExtent( const UpdateVolumeExtentRequest* request, - UpdateVolumeExtentResponse* response) = 0; + UpdateVolumeExtentResponse* response, int64_t logIndex) = 0; virtual MetaStatusCode UpdateDeallocatableBlockGroup( - const UpdateDeallocatableBlockGroupRequest *request, - UpdateDeallocatableBlockGroupResponse *response) = 0; + const UpdateDeallocatableBlockGroupRequest* request, + UpdateDeallocatableBlockGroupResponse* response, int64_t logIndex) = 0; }; class MetaStoreImpl : public MetaStore { @@ -202,16 +217,20 @@ class MetaStoreImpl : public MetaStore { const storage::StorageOptions& storageOptions); bool Load(const std::string& checkpoint) override; - bool Save(const std::string& dir, - OnSnapshotSaveDoneClosure* done) override; + bool SaveMeta(const std::string& dir, + std::vector* files) override; + bool SaveData(const std::string& dir, + std::vector* files) override; bool Clear() override; bool Destroy() override; MetaStatusCode CreatePartition(const CreatePartitionRequest* request, - CreatePartitionResponse* response) override; + CreatePartitionResponse* response, + int64_t logIndex) override; MetaStatusCode DeletePartition(const DeletePartitionRequest* request, - DeletePartitionResponse* response) override; + DeletePartitionResponse* response, + int64_t logIndex) override; bool GetPartitionInfoList( std::list *partitionInfoList) override; @@ -223,68 +242,82 @@ class MetaStoreImpl : public MetaStore { // dentry MetaStatusCode CreateDentry(const CreateDentryRequest* request, - CreateDentryResponse* response) override; + CreateDentryResponse* response, + int64_t logIndex) override; MetaStatusCode GetDentry(const GetDentryRequest* request, - GetDentryResponse* response) override; + GetDentryResponse* response, + int64_t logIndex) override; MetaStatusCode DeleteDentry(const DeleteDentryRequest* request, - DeleteDentryResponse* response) override; + DeleteDentryResponse* response, + int64_t logIndex) override; MetaStatusCode ListDentry(const ListDentryRequest* request, - ListDentryResponse* response) override; + ListDentryResponse* response, + int64_t logIndex) override; MetaStatusCode PrepareRenameTx(const PrepareRenameTxRequest* request, - PrepareRenameTxResponse* response) override; + PrepareRenameTxResponse* response, + int64_t logIndex) override; // inode MetaStatusCode CreateInode(const CreateInodeRequest* request, - CreateInodeResponse* response) override; + CreateInodeResponse* response, + int64_t logIndex) override; MetaStatusCode CreateRootInode(const CreateRootInodeRequest* request, - CreateRootInodeResponse* response) override; + CreateRootInodeResponse* response, + int64_t logIndex) override; - MetaStatusCode CreateManageInode( - const CreateManageInodeRequest* request, - CreateManageInodeResponse* response) override; + MetaStatusCode CreateManageInode(const CreateManageInodeRequest* request, + CreateManageInodeResponse* response, + int64_t logIndex) override; MetaStatusCode GetInode(const GetInodeRequest* request, - GetInodeResponse* response) override; + GetInodeResponse* response, + int64_t logIndex) override; MetaStatusCode BatchGetInodeAttr(const BatchGetInodeAttrRequest* request, - BatchGetInodeAttrResponse* response) override; + BatchGetInodeAttrResponse* response, + int64_t logIndex) override; MetaStatusCode BatchGetXAttr(const BatchGetXAttrRequest* request, - BatchGetXAttrResponse* response) override; + BatchGetXAttrResponse* response, + int64_t logIndex) override; MetaStatusCode DeleteInode(const DeleteInodeRequest* request, - DeleteInodeResponse* response) override; + DeleteInodeResponse* response, + int64_t logIndex) override; MetaStatusCode UpdateInode(const UpdateInodeRequest* request, - UpdateInodeResponse* response) override; + UpdateInodeResponse* response, + int64_t logIndex) override; std::shared_ptr GetPartition(uint32_t partitionId); MetaStatusCode GetOrModifyS3ChunkInfo( const GetOrModifyS3ChunkInfoRequest* request, GetOrModifyS3ChunkInfoResponse* response, - std::shared_ptr* iterator) override; + std::shared_ptr* iterator, int64_t logIndex) override; MetaStatusCode SendS3ChunkInfoByStream( std::shared_ptr connection, std::shared_ptr iterator) override; MetaStatusCode GetVolumeExtent(const GetVolumeExtentRequest* request, - GetVolumeExtentResponse* response) override; + GetVolumeExtentResponse* response, + int64_t logIndex) override; - MetaStatusCode UpdateVolumeExtent( - const UpdateVolumeExtentRequest* request, - UpdateVolumeExtentResponse* response) override; + MetaStatusCode UpdateVolumeExtent(const UpdateVolumeExtentRequest* request, + UpdateVolumeExtentResponse* response, + int64_t logIndex) override; // block group MetaStatusCode UpdateDeallocatableBlockGroup( - const UpdateDeallocatableBlockGroupRequest *request, - UpdateDeallocatableBlockGroupResponse *response) override; + const UpdateDeallocatableBlockGroupRequest* request, + UpdateDeallocatableBlockGroupResponse* response, + int64_t logIndex) override; private: FRIEND_TEST(MetastoreTest, partition); diff --git a/curvefs/src/metaserver/metastore_fstream.cpp b/curvefs/src/metaserver/metastore_fstream.cpp index 02f4292007..c2782bfd26 100644 --- a/curvefs/src/metaserver/metastore_fstream.cpp +++ b/curvefs/src/metaserver/metastore_fstream.cpp @@ -95,62 +95,6 @@ bool MetaStoreFStream::LoadPartition(uint32_t partitionId, return true; } -bool MetaStoreFStream::LoadInode(uint32_t partitionId, const std::string &key, - const std::string &value) { - (void)key; - auto partition = GetPartition(partitionId); - if (nullptr == partition) { - LOG(ERROR) << "Partition not found, partitionId = " << partitionId; - return false; - } - - Inode inode; - if (!conv_->ParseFromString(value, &inode)) { - LOG(ERROR) << "Decode inode failed"; - return false; - } - - MetaStatusCode rc = partition->InsertInode(inode); - if (rc != MetaStatusCode::OK) { - LOG(ERROR) << "InsertInode failed, retCode = " - << MetaStatusCode_Name(rc); - return false; - } - return true; -} - -bool MetaStoreFStream::LoadDentry(uint8_t version, uint32_t partitionId, - const std::string &key, - const std::string &value) { - (void)key; - auto partition = GetPartition(partitionId); - if (nullptr == partition) { - LOG(ERROR) << "Partition not found, partitionId = " << partitionId; - return false; - } - - DentryVec vec; - if (version == 1) { - Dentry dentry; - if (!conv_->ParseFromString(value, &dentry)) { - LOG(ERROR) << "Decode dentry failed"; - return false; - } - *vec.add_dentrys() = dentry; - } else if (!conv_->ParseFromString(value, &vec)) { - LOG(ERROR) << "Decode dentry vector failed"; - return false; - } - - MetaStatusCode rc = partition->LoadDentry(vec, version == 1); - if (rc != MetaStatusCode::OK) { - LOG(ERROR) << "LoadDentry failed, retCode = " - << MetaStatusCode_Name(rc); - return false; - } - return true; -} - bool MetaStoreFStream::LoadPendingTx(uint32_t partitionId, const std::string &key, const std::string &value) { @@ -174,72 +118,6 @@ bool MetaStoreFStream::LoadPendingTx(uint32_t partitionId, return succ; } -bool MetaStoreFStream::LoadInodeS3ChunkInfoList(uint32_t partitionId, - const std::string &key, - const std::string &value) { - auto partition = GetPartition(partitionId); - if (nullptr == partition) { - LOG(ERROR) << "Partition not found, partitionId = " << partitionId; - return false; - } - - S3ChunkInfoList list; - Key4S3ChunkInfoList key4list; - if (!conv_->ParseFromString(key, &key4list)) { - LOG(ERROR) << "Decode Key4S3ChunkInfoList failed"; - return false; - } else if (!conv_->ParseFromString(value, &list)) { - LOG(ERROR) << "Decode S3ChunkInfoList failed"; - return false; - } - - S3ChunkInfoMap map2add; - S3ChunkInfoMap map2del; - std::shared_ptr iterator; - map2add.insert({key4list.chunkIndex, list}); - MetaStatusCode rc = partition->GetOrModifyS3ChunkInfo( - key4list.fsId, key4list.inodeId, map2add, map2del, false, &iterator); - if (rc != MetaStatusCode::OK) { - LOG(ERROR) << "GetOrModifyS3ChunkInfo failed, retCode = " - << MetaStatusCode_Name(rc); - return false; - } - return true; -} - -bool MetaStoreFStream::LoadVolumeExtentList(uint32_t partitionId, - const std::string &key, - const std::string &value) { - auto partition = GetPartition(partitionId); - if (!partition) { - LOG(ERROR) << "Partition not found, partitionId: " << partitionId; - return false; - } - - Key4VolumeExtentSlice sliceKey; - VolumeExtentSlice slice; - - if (!sliceKey.ParseFromString(key)) { - LOG(ERROR) << "Fail to decode Key4VolumeExtentSlice, key: `" << key - << "`"; - return false; - } - - if (!conv_->ParseFromString(value, &slice)) { - LOG(ERROR) << "Decode VolumeExtentSlice failed"; - return false; - } - - auto st = partition->UpdateVolumeExtentSlice(sliceKey.fsId_, - sliceKey.inodeId_, slice); - - LOG_IF(ERROR, st != MetaStatusCode::OK) - << "LoadVolumeExtentList update extent failed, error: " - << MetaStatusCode_Name(st); - - return st == MetaStatusCode::OK; -} - std::shared_ptr MetaStoreFStream::NewPartitionIterator() { std::string value; auto container = std::make_shared(); @@ -327,36 +205,36 @@ MetaStoreFStream::NewVolumeExtentListIterator(Partition *partition) { bool MetaStoreFStream::Load(const std::string &pathname, uint8_t *version) { uint64_t totalPartition = 0; - uint64_t totalInode = 0; - uint64_t totalDentry = 0; - uint64_t totalS3ChunkInfoList = 0; - uint64_t totalVolumeExtent = 0; uint64_t totalPendingTx = 0; - auto callback = [&](uint8_t version, ENTRY_TYPE entryType, - uint32_t partitionId, const std::string &key, - const std::string &value) -> bool { + uint32_t partitionId, const std::string& key, + const std::string& value) -> bool { + (void)version; switch (entryType) { - case ENTRY_TYPE::PARTITION: - ++totalPartition; - return LoadPartition(partitionId, key, value); - case ENTRY_TYPE::INODE: - ++totalInode; - return LoadInode(partitionId, key, value); - case ENTRY_TYPE::DENTRY: - ++totalDentry; - return LoadDentry(version, partitionId, key, value); - case ENTRY_TYPE::PENDING_TX: - ++totalPendingTx; - return LoadPendingTx(partitionId, key, value); - case ENTRY_TYPE::S3_CHUNK_INFO_LIST: - ++totalS3ChunkInfoList; - return LoadInodeS3ChunkInfoList(partitionId, key, value); - case ENTRY_TYPE::VOLUME_EXTENT: - ++totalVolumeExtent; - return LoadVolumeExtentList(partitionId, key, value); - case ENTRY_TYPE::UNKNOWN: - break; + case ENTRY_TYPE::PARTITION: + ++totalPartition; + return LoadPartition(partitionId, key, value); + case ENTRY_TYPE::INODE: + LOG(ERROR) + << "Snapshot is too old, incompatible with current version"; + break; + case ENTRY_TYPE::DENTRY: + LOG(ERROR) + << "Snapshot is too old, incompatible with current version"; + break; + case ENTRY_TYPE::PENDING_TX: + ++totalPendingTx; + return LoadPendingTx(partitionId, key, value); + case ENTRY_TYPE::S3_CHUNK_INFO_LIST: + LOG(ERROR) + << "Snapshot is too old, incompatible with current version"; + break; + case ENTRY_TYPE::VOLUME_EXTENT: + LOG(ERROR) + << "Snapshot is too old, incompatible with current version"; + break; + case ENTRY_TYPE::UNKNOWN: + break; } LOG(ERROR) << "Load failed, unknown entry type"; @@ -367,9 +245,6 @@ bool MetaStoreFStream::Load(const std::string &pathname, uint8_t *version) { std::ostringstream oss; oss << "total partition: " << totalPartition - << ", total inode: " << totalInode << ", total dentry: " << totalDentry - << ", total s3chunkinfolist: " << totalS3ChunkInfoList - << ", total volumeextent: " << totalVolumeExtent << ", total pendingtx: " << totalPendingTx; if (ret) { @@ -389,9 +264,6 @@ bool MetaStoreFStream::Save(const std::string &path, DumpFileClosure *done) { ChildrenType children; children.push_back(NewPartitionIterator()); - for (const auto &item : *partitionMap_) { - children.push_back(NewPendingTxIterator(item.second)); - } for (const auto &child : children) { if (nullptr == child) { diff --git a/curvefs/src/metaserver/metastore_fstream.h b/curvefs/src/metaserver/metastore_fstream.h index fe124c97f1..17581abf8d 100644 --- a/curvefs/src/metaserver/metastore_fstream.h +++ b/curvefs/src/metaserver/metastore_fstream.h @@ -61,27 +61,10 @@ class MetaStoreFStream { const std::string& key, const std::string& value); - bool LoadInode(uint32_t partitionId, - const std::string& key, - const std::string& value); - - bool LoadDentry(uint8_t version, - uint32_t partitionId, - const std::string& key, - const std::string& value); - bool LoadPendingTx(uint32_t partitionId, const std::string& key, const std::string& value); - bool LoadInodeS3ChunkInfoList(uint32_t partitionId, - const std::string& key, - const std::string& value); - - bool LoadVolumeExtentList(uint32_t partitionId, - const std::string& key, - const std::string& value); - std::shared_ptr NewPartitionIterator(); std::shared_ptr NewInodeIterator( diff --git a/curvefs/src/metaserver/partition.cpp b/curvefs/src/metaserver/partition.cpp index 7b7e745a6c..3c3e7d7b99 100644 --- a/curvefs/src/metaserver/partition.cpp +++ b/curvefs/src/metaserver/partition.cpp @@ -25,18 +25,20 @@ #include #include +#include #include +#include #include #include "curvefs/proto/metaserver.pb.h" -#include "curvefs/src/metaserver/s3compact_manager.h" -#include "curvefs/src/metaserver/trash_manager.h" -#include "curvefs/src/metaserver/storage/converter.h" +#include "curvefs/src/metaserver/copyset/copyset_node_manager.h" +#include "curvefs/src/metaserver/mds/fsinfo_manager.h" #include "curvefs/src/metaserver/s3compact.h" +#include "curvefs/src/metaserver/s3compact_manager.h" #include "curvefs/src/metaserver/space/inode_volume_space_deallocate.h" #include "curvefs/src/metaserver/space/volume_deallocate_manager.h" -#include "curvefs/src/metaserver/copyset/copyset_node_manager.h" -#include "curvefs/src/metaserver/mds/fsinfo_manager.h" +#include "curvefs/src/metaserver/storage/converter.h" +#include "curvefs/src/metaserver/trash_manager.h" namespace curvefs { namespace metaserver { @@ -44,8 +46,8 @@ namespace metaserver { using ::curvefs::metaserver::storage::NameGenerator; Partition::Partition(PartitionInfo partition, - std::shared_ptr kvStorage, - bool startCompact, bool startVolumeDeallocate) { + std::shared_ptr kvStorage, bool startCompact, + bool startVolumeDeallocate) { assert(partition.start() <= partition.end()); partitionInfo_ = std::move(partition); uint32_t partitionId = partitionInfo_.partitionid(); @@ -69,7 +71,7 @@ Partition::Partition(PartitionInfo partition, auto trash = std::make_shared(inodeStorage_); inodeManager_ = std::make_shared( inodeStorage_, trash, partitionInfo_.mutable_filetype2inodenum()); - txManager_ = std::make_shared(dentryStorage_); + txManager_ = std::make_shared(dentryStorage_, partitionInfo_); dentryManager_ = std::make_shared(dentryStorage_, txManager_); if (!partitionInfo_.has_nextid()) { @@ -88,73 +90,95 @@ Partition::Partition(PartitionInfo partition, } } -#define PRECHECK(fsId, inodeId) \ - do { \ - if (!IsInodeBelongs((fsId), (inodeId))) { \ - return MetaStatusCode::PARTITION_ID_MISSMATCH; \ - } \ - if (GetStatus() == PartitionStatus::DELETING) { \ - return MetaStatusCode::PARTITION_DELETING; \ - } \ +#define PRECHECK(fsId, inodeId) \ + do { \ + if (!IsInodeBelongs((fsId), (inodeId))) { \ + return MetaStatusCode::PARTITION_ID_MISSMATCH; \ + } \ + if (GetStatus() == PartitionStatus::DELETING) { \ + return MetaStatusCode::PARTITION_DELETING; \ + } \ } while (0) -#define PRECHECK_FSID(fsId) \ - do { \ - if (!IsInodeBelongs((fsId))) { \ - return MetaStatusCode::PARTITION_ID_MISSMATCH; \ - } \ - if (GetStatus() == PartitionStatus::DELETING) { \ - return MetaStatusCode::PARTITION_DELETING; \ - } \ +#define PRECHECK_FSID(fsId) \ + do { \ + if (!IsInodeBelongs((fsId))) { \ + return MetaStatusCode::PARTITION_ID_MISSMATCH; \ + } \ + if (GetStatus() == PartitionStatus::DELETING) { \ + return MetaStatusCode::PARTITION_DELETING; \ + } \ } while (0) -MetaStatusCode Partition::CreateDentry(const Dentry& dentry, - const Time& tm) { +MetaStatusCode Partition::CreateDentry(const Dentry& dentry, const Time& tm, + int64_t logIndex) { PRECHECK(dentry.fsid(), dentry.parentinodeid()); - - MetaStatusCode ret = dentryManager_->CreateDentry(dentry); + MetaStatusCode ret = dentryManager_->CreateDentry(dentry, logIndex); if (MetaStatusCode::OK == ret) { if (dentry.has_type()) { return inodeManager_->UpdateInodeWhenCreateOrRemoveSubNode( - dentry, tm.sec(), tm.nsec(), true); + dentry, tm.sec(), tm.nsec(), true, logIndex); } else { LOG(ERROR) << "CreateDentry does not have type, " << dentry.ShortDebugString(); return MetaStatusCode::PARAM_ERROR; } } else if (MetaStatusCode::IDEMPOTENCE_OK == ret) { + if (dentry.has_type()) { + // NOTE: we enter here means that + // this log maybe is "half apply" + ret = inodeManager_->UpdateInodeWhenCreateOrRemoveSubNode( + dentry, tm.sec(), tm.nsec(), true, logIndex); + if (ret == MetaStatusCode::IDEMPOTENCE_OK) { + ret = MetaStatusCode::OK; + } + if (ret != MetaStatusCode::OK) { + return ret; + } + } return MetaStatusCode::OK; } else { return ret; } } -MetaStatusCode Partition::LoadDentry(const DentryVec& vec, bool merge) { +MetaStatusCode Partition::LoadDentry(const DentryVec& vec, bool merge, + int64_t logIndex) { auto dentry = vec.dentrys(0); PRECHECK(dentry.fsid(), dentry.parentinodeid()); - MetaStatusCode rc = dentryManager_->CreateDentry(vec, merge); - if (rc == MetaStatusCode::OK || - rc == MetaStatusCode::IDEMPOTENCE_OK) { + MetaStatusCode rc = dentryManager_->CreateDentry(vec, merge, logIndex); + if (rc == MetaStatusCode::OK || rc == MetaStatusCode::IDEMPOTENCE_OK) { return MetaStatusCode::OK; } return rc; } -MetaStatusCode Partition::DeleteDentry(const Dentry& dentry) { +MetaStatusCode Partition::DeleteDentry(const Dentry& dentry, int64_t logIndex) { PRECHECK(dentry.fsid(), dentry.parentinodeid()); - MetaStatusCode ret = dentryManager_->DeleteDentry(dentry); + MetaStatusCode ret = dentryManager_->DeleteDentry(dentry, logIndex); if (MetaStatusCode::OK == ret) { if (dentry.has_type()) { return inodeManager_->UpdateInodeWhenCreateOrRemoveSubNode( - dentry, 0, 0, false); + dentry, 0, 0, false, logIndex); } else { LOG(ERROR) << "DeleteDentry does not have type, " << dentry.ShortDebugString(); return MetaStatusCode::PARAM_ERROR; } + } else if (MetaStatusCode::IDEMPOTENCE_OK == ret) { + if (dentry.has_type()) { + // NOTE: we enter here means that + // this log maybe is "half apply" + ret = inodeManager_->UpdateInodeWhenCreateOrRemoveSubNode( + dentry, 0, 0, false, logIndex); + } + if (ret == MetaStatusCode::IDEMPOTENCE_OK) { + ret = MetaStatusCode::OK; + } + return ret; } else { return ret; } @@ -167,22 +191,20 @@ MetaStatusCode Partition::GetDentry(Dentry* dentry) { MetaStatusCode Partition::ListDentry(const Dentry& dentry, std::vector* dentrys, - uint32_t limit, - bool onlyDir) { + uint32_t limit, bool onlyDir) { PRECHECK(dentry.fsid(), dentry.parentinodeid()); return dentryManager_->ListDentry(dentry, dentrys, limit, onlyDir); } -void Partition::ClearDentry() { - dentryManager_->ClearDentry(); -} +void Partition::ClearDentry() { dentryManager_->ClearDentry(); } -MetaStatusCode Partition::HandleRenameTx(const std::vector& dentrys) { - for (const auto &it : dentrys) { +MetaStatusCode Partition::HandleRenameTx(const std::vector& dentrys, + int64_t logIndex) { + for (const auto& it : dentrys) { PRECHECK(it.fsid(), it.parentinodeid()); } - return dentryManager_->HandleRenameTx(dentrys); + return dentryManager_->HandleRenameTx(dentrys, logIndex); } bool Partition::InsertPendingTx(const PrepareRenameTxRequest& pendingTx) { @@ -198,6 +220,19 @@ bool Partition::InsertPendingTx(const PrepareRenameTxRequest& pendingTx) { return txManager_->InsertPendingTx(renameTx); } +bool Partition::Init() { + // NOTE: invoke `dentryStorage::Init()` + // and `inodeStorage::Init()` is unnecessary + // they will be invoked by `manager` + return dentryManager_->Init() && inodeManager_->Init() && + txManager_->Init(); +} + +void Partition::SerializeRenameTx(const RenameTx& in, + PrepareRenameTxRequest* out) { + txManager_->SerializeRenameTx(in, out); +} + bool Partition::FindPendingTx(PrepareRenameTxRequest* pendingTx) { if (GetStatus() == PartitionStatus::DELETING) { return false; @@ -209,17 +244,13 @@ bool Partition::FindPendingTx(PrepareRenameTxRequest* pendingTx) { return false; } - auto dentrys = renameTx.GetDentrys(); - pendingTx->set_poolid(partitionInfo_.poolid()); - pendingTx->set_copysetid(partitionInfo_.copysetid()); - pendingTx->set_partitionid(partitionInfo_.partitionid()); - *pendingTx->mutable_dentrys() = {dentrys->begin(), dentrys->end()}; + SerializeRenameTx(renameTx, pendingTx); return true; } // inode -MetaStatusCode Partition::CreateInode(const InodeParam ¶m, - Inode* inode) { +MetaStatusCode Partition::CreateInode(const InodeParam& param, Inode* inode, + int64_t logIndex) { if (GetStatus() == PartitionStatus::READONLY) { return MetaStatusCode::PARTITION_ALLOC_ID_FAIL; } @@ -237,19 +268,33 @@ MetaStatusCode Partition::CreateInode(const InodeParam ¶m, return MetaStatusCode::PARTITION_ID_MISSMATCH; } - return inodeManager_->CreateInode(inodeId, param, inode); + auto ret = inodeManager_->CreateInode(inodeId, param, inode, logIndex); + if (ret == MetaStatusCode::IDEMPOTENCE_OK) { + ret = MetaStatusCode::OK; + } + return ret; } -MetaStatusCode Partition::CreateRootInode(const InodeParam ¶m) { +MetaStatusCode Partition::CreateRootInode(const InodeParam& param, + int64_t logIndex) { PRECHECK_FSID(param.fsId); - return inodeManager_->CreateRootInode(param); + auto ret = inodeManager_->CreateRootInode(param, logIndex); + if (ret == MetaStatusCode::IDEMPOTENCE_OK) { + ret = MetaStatusCode::OK; + } + return ret; } -MetaStatusCode Partition::CreateManageInode(const InodeParam ¶m, +MetaStatusCode Partition::CreateManageInode(const InodeParam& param, ManageInodeType manageType, - Inode* inode) { + Inode* inode, int64_t logIndex) { PRECHECK_FSID(param.fsId); - return inodeManager_->CreateManageInode(param, manageType, inode); + auto ret = + inodeManager_->CreateManageInode(param, manageType, inode, logIndex); + if (ret == MetaStatusCode::IDEMPOTENCE_OK) { + ret = MetaStatusCode::OK; + } + return ret; } MetaStatusCode Partition::GetInode(uint32_t fsId, uint64_t inodeId, @@ -270,26 +315,38 @@ MetaStatusCode Partition::GetXAttr(uint32_t fsId, uint64_t inodeId, return inodeManager_->GetXAttr(fsId, inodeId, xattr); } -MetaStatusCode Partition::DeleteInode(uint32_t fsId, uint64_t inodeId) { +MetaStatusCode Partition::DeleteInode(uint32_t fsId, uint64_t inodeId, + int64_t logIndex) { PRECHECK(fsId, inodeId); - return inodeManager_->DeleteInode(fsId, inodeId); + auto ret = inodeManager_->DeleteInode(fsId, inodeId, logIndex); + if (ret == MetaStatusCode::IDEMPOTENCE_OK) { + ret = MetaStatusCode::OK; + } + return ret; } -MetaStatusCode Partition::UpdateInode(const UpdateInodeRequest& request) { +MetaStatusCode Partition::UpdateInode(const UpdateInodeRequest& request, + int64_t logIndex) { PRECHECK(request.fsid(), request.inodeid()); - return inodeManager_->UpdateInode(request); + auto ret = inodeManager_->UpdateInode(request, logIndex); + if (ret == MetaStatusCode::IDEMPOTENCE_OK) { + ret = MetaStatusCode::OK; + } + return ret; } MetaStatusCode Partition::GetOrModifyS3ChunkInfo( - uint32_t fsId, - uint64_t inodeId, - const S3ChunkInfoMap& map2add, - const S3ChunkInfoMap& map2del, - bool returnS3ChunkInfoMap, - std::shared_ptr* iterator) { + uint32_t fsId, uint64_t inodeId, const S3ChunkInfoMap& map2add, + const S3ChunkInfoMap& map2del, bool returnS3ChunkInfoMap, + std::shared_ptr* iterator, int64_t logIndex) { PRECHECK(fsId, inodeId); - return inodeManager_->GetOrModifyS3ChunkInfo( - fsId, inodeId, map2add, map2del, returnS3ChunkInfoMap, iterator); + auto ret = inodeManager_->GetOrModifyS3ChunkInfo( + fsId, inodeId, map2add, map2del, returnS3ChunkInfoMap, iterator, + logIndex); + if (ret == MetaStatusCode::IDEMPOTENCE_OK) { + ret = MetaStatusCode::OK; + } + return ret; } MetaStatusCode Partition::PaddingInodeS3ChunkInfo(int32_t fsId, @@ -300,9 +357,13 @@ MetaStatusCode Partition::PaddingInodeS3ChunkInfo(int32_t fsId, return inodeManager_->PaddingInodeS3ChunkInfo(fsId, inodeId, m, limit); } -MetaStatusCode Partition::InsertInode(const Inode& inode) { +MetaStatusCode Partition::InsertInode(const Inode& inode, int64_t logIndex) { PRECHECK(inode.fsid(), inode.inodeid()); - return inodeManager_->InsertInode(inode); + auto ret = inodeManager_->InsertInode(inode, logIndex); + if (ret == MetaStatusCode::IDEMPOTENCE_OK) { + ret = MetaStatusCode::OK; + } + return ret; } bool Partition::GetInodeIdList(std::list* InodeIdList) { @@ -393,6 +454,10 @@ bool Partition::Clear() { return true; } +// NOTE: store nextid to kvstroage is unnecessary +// we will replay the logs filter the log entries that +// already applied, but keep nextid changes in memory +// so it will grow to corrected value after replay uint64_t Partition::GetNewInodeId() { if (partitionInfo_.nextid() > partitionInfo_.end()) { partitionInfo_.set_status(PartitionStatus::READONLY); @@ -411,9 +476,7 @@ uint32_t Partition::GetDentryNum() { return static_cast(dentryStorage_->Size()); } -bool Partition::EmptyInodeStorage() { - return inodeStorage_->Empty(); -} +bool Partition::EmptyInodeStorage() { return inodeStorage_->Empty(); } std::string Partition::GetInodeTablename() { std::ostringstream oss; @@ -427,23 +490,31 @@ std::string Partition::GetDentryTablename() { return oss.str(); } -MetaStatusCode -Partition::UpdateVolumeExtent(uint32_t fsId, uint64_t inodeId, - const VolumeExtentSliceList &extents) { +MetaStatusCode Partition::UpdateVolumeExtent( + uint32_t fsId, uint64_t inodeId, const VolumeExtentSliceList& extents, + int64_t logIndex) { PRECHECK(fsId, inodeId); - return inodeManager_->UpdateVolumeExtent(fsId, inodeId, extents); + auto ret = + inodeManager_->UpdateVolumeExtent(fsId, inodeId, extents, logIndex); + if (ret == MetaStatusCode::IDEMPOTENCE_OK) { + ret = MetaStatusCode::OK; + } + return ret; } MetaStatusCode Partition::UpdateVolumeExtentSlice( - uint32_t fsId, - uint64_t inodeId, - const VolumeExtentSlice& slice) { + uint32_t fsId, uint64_t inodeId, const VolumeExtentSlice& slice, + int64_t logIndex) { PRECHECK(fsId, inodeId); - return inodeManager_->UpdateVolumeExtentSlice(fsId, inodeId, slice); + auto ret = + inodeManager_->UpdateVolumeExtentSlice(fsId, inodeId, slice, logIndex); + if (ret == MetaStatusCode::IDEMPOTENCE_OK) { + ret = MetaStatusCode::OK; + } + return ret; } -MetaStatusCode Partition::GetVolumeExtent(uint32_t fsId, - uint64_t inodeId, +MetaStatusCode Partition::GetVolumeExtent(uint32_t fsId, uint64_t inodeId, const std::vector& slices, VolumeExtentSliceList* extents) { PRECHECK(fsId, inodeId); @@ -451,14 +522,17 @@ MetaStatusCode Partition::GetVolumeExtent(uint32_t fsId, } MetaStatusCode Partition::UpdateDeallocatableBlockGroup( - const UpdateDeallocatableBlockGroupRequest &request) { + const UpdateDeallocatableBlockGroupRequest& request, int64_t logIndex) { PRECHECK_FSID(request.fsid()); - return inodeStorage_->UpdateDeallocatableBlockGroup(request.fsid(), - request.update()); + auto ret = inodeManager_->UpdateDeallocatableBlockGroup(request, logIndex); + if (ret == MetaStatusCode::IDEMPOTENCE_OK) { + ret = MetaStatusCode::OK; + } + return ret; } MetaStatusCode Partition::GetAllBlockGroup( - std::vector *deallocatableBlockGroupVec) { + std::vector* deallocatableBlockGroupVec) { return inodeStorage_->GetAllBlockGroup(deallocatableBlockGroupVec); } diff --git a/curvefs/src/metaserver/partition.h b/curvefs/src/metaserver/partition.h index e24c0abd40..c78738cb85 100644 --- a/curvefs/src/metaserver/partition.h +++ b/curvefs/src/metaserver/partition.h @@ -22,11 +22,13 @@ #ifndef CURVEFS_SRC_METASERVER_PARTITION_H_ #define CURVEFS_SRC_METASERVER_PARTITION_H_ +#include #include #include #include #include #include + #include "curvefs/proto/common.pb.h" #include "curvefs/proto/metaserver.pb.h" #include "curvefs/src/common/define.h" @@ -34,15 +36,16 @@ #include "curvefs/src/metaserver/dentry_storage.h" #include "curvefs/src/metaserver/inode_manager.h" #include "curvefs/src/metaserver/inode_storage.h" -#include "curvefs/src/metaserver/trash_manager.h" #include "curvefs/src/metaserver/storage/iterator.h" +#include "curvefs/src/metaserver/trash_manager.h" namespace curvefs { namespace metaserver { +using curvefs::common::AppliedIndex; using curvefs::common::PartitionInfo; using curvefs::common::PartitionStatus; -using ::curvefs::metaserver::storage::KVStorage; using ::curvefs::metaserver::storage::Iterator; +using ::curvefs::metaserver::storage::KVStorage; using S3ChunkInfoMap = google::protobuf::Map; // skip ROOTINODEID and RECYCLEINODEID @@ -50,43 +53,47 @@ constexpr uint64_t kMinPartitionStartId = ROOTINODEID + 2; class Partition { public: - Partition(PartitionInfo partition, - std::shared_ptr kvStorage, + Partition(PartitionInfo partition, std::shared_ptr kvStorage, bool startCompact = true, bool startVolumeDeallocate = true); Partition() = default; // dentry - MetaStatusCode CreateDentry(const Dentry& dentry, - const Time& tm); + MetaStatusCode CreateDentry(const Dentry& dentry, const Time& tm, + int64_t logIndex); - MetaStatusCode LoadDentry(const DentryVec& vec, bool merge); + MetaStatusCode LoadDentry(const DentryVec& vec, bool merge, + int64_t logIndex); - MetaStatusCode DeleteDentry(const Dentry& dentry); + MetaStatusCode DeleteDentry(const Dentry& dentry, int64_t logIndex); MetaStatusCode GetDentry(Dentry* dentry); MetaStatusCode ListDentry(const Dentry& dentry, - std::vector* dentrys, - uint32_t limit, + std::vector* dentrys, uint32_t limit, bool onlyDir = false); void ClearDentry(); - MetaStatusCode HandleRenameTx(const std::vector& dentrys); + MetaStatusCode HandleRenameTx(const std::vector& dentrys, + int64_t logIndex); bool InsertPendingTx(const PrepareRenameTxRequest& pendingTx); bool FindPendingTx(PrepareRenameTxRequest* pendingTx); + void SerializeRenameTx(const RenameTx& in, PrepareRenameTxRequest* out); + + bool Init(); + // inode - MetaStatusCode CreateInode(const InodeParam ¶m, - Inode* inode); + MetaStatusCode CreateInode(const InodeParam& param, Inode* inode, + int64_t logIndex); - MetaStatusCode CreateRootInode(const InodeParam ¶m); + MetaStatusCode CreateRootInode(const InodeParam& param, int64_t logIndex); - MetaStatusCode CreateManageInode(const InodeParam ¶m, - ManageInodeType manageType, - Inode* inode); + MetaStatusCode CreateManageInode(const InodeParam& param, + ManageInodeType manageType, Inode* inode, + int64_t logIndex); MetaStatusCode GetInode(uint32_t fsId, uint64_t inodeId, Inode* inode); @@ -95,43 +102,42 @@ class Partition { MetaStatusCode GetXAttr(uint32_t fsId, uint64_t inodeId, XAttr* xattr); - MetaStatusCode DeleteInode(uint32_t fsId, uint64_t inodeId); + MetaStatusCode DeleteInode(uint32_t fsId, uint64_t inodeId, + int64_t logIndex); - MetaStatusCode UpdateInode(const UpdateInodeRequest& request); + MetaStatusCode UpdateInode(const UpdateInodeRequest& request, + int64_t logIndex); - MetaStatusCode GetOrModifyS3ChunkInfo(uint32_t fsId, - uint64_t inodeId, + MetaStatusCode GetOrModifyS3ChunkInfo(uint32_t fsId, uint64_t inodeId, const S3ChunkInfoMap& map2add, const S3ChunkInfoMap& map2del, bool returnS3ChunkInfoMap, - std::shared_ptr* iterator); + std::shared_ptr* iterator, + int64_t logIndex); - MetaStatusCode PaddingInodeS3ChunkInfo(int32_t fsId, - uint64_t inodeId, + MetaStatusCode PaddingInodeS3ChunkInfo(int32_t fsId, uint64_t inodeId, S3ChunkInfoMap* m, uint64_t limit = 0); - MetaStatusCode UpdateVolumeExtent(uint32_t fsId, - uint64_t inodeId, - const VolumeExtentSliceList& extents); + MetaStatusCode UpdateVolumeExtent(uint32_t fsId, uint64_t inodeId, + const VolumeExtentSliceList& extents, + int64_t logIndex); - MetaStatusCode UpdateVolumeExtentSlice(uint32_t fsId, - uint64_t inodeId, - const VolumeExtentSlice& slice); + MetaStatusCode UpdateVolumeExtentSlice(uint32_t fsId, uint64_t inodeId, + const VolumeExtentSlice& slice, + int64_t logIndex); - MetaStatusCode GetVolumeExtent(uint32_t fsId, - uint64_t inodeId, + MetaStatusCode GetVolumeExtent(uint32_t fsId, uint64_t inodeId, const std::vector& slices, VolumeExtentSliceList* extents); - MetaStatusCode UpdateDeallocatableBlockGroup( - const UpdateDeallocatableBlockGroupRequest &request); + const UpdateDeallocatableBlockGroupRequest& request, int64_t logIndex); virtual MetaStatusCode GetAllBlockGroup( - std::vector *deallocatableBlockGroupVec); + std::vector* deallocatableBlockGroupVec); - MetaStatusCode InsertInode(const Inode& inode); + MetaStatusCode InsertInode(const Inode& inode, int64_t logIndex); bool GetInodeIdList(std::list* InodeIdList); diff --git a/curvefs/src/metaserver/s3compact_inode.cpp b/curvefs/src/metaserver/s3compact_inode.cpp index feb195883a..b50e61ad59 100644 --- a/curvefs/src/metaserver/s3compact_inode.cpp +++ b/curvefs/src/metaserver/s3compact_inode.cpp @@ -123,7 +123,7 @@ CompactInodeJob::BuildValidList(const S3ChunkInfoList& s3chunkinfolist, if (begin <= b) { if (end < b) { return; - } else if (end >= b && end < e) { + } else if (end < e) { // free [it->begin, it->end] -> [end+1, it->end] // used [it->begin, end] *it = std::make_pair(end + 1, e); @@ -134,7 +134,7 @@ CompactInodeJob::BuildValidList(const S3ChunkInfoList& s3chunkinfolist, freeList.erase(it); used[b] = std::make_pair(e, i); } - } else if (begin > b && begin <= e) { + } else if (begin <= e) { if (end < e) { // free [it-begin, it->end] // -> [it->begin, begin-1], [end+1, it->end] @@ -579,7 +579,6 @@ void CompactInodeJob::CompactChunks(const S3CompactTask& task) { std::unordered_map> objsAddedMap; ::google::protobuf::Map s3ChunkInfoAdd; ::google::protobuf::Map s3ChunkInfoRemove; - std::vector indexToDelete; VLOG(6) << "s3compact: begin to compact fsId:" << fsId << ", inodeId:" << inodeId; for (const auto& index : needCompact) { diff --git a/curvefs/src/metaserver/storage/converter.cpp b/curvefs/src/metaserver/storage/converter.cpp index 39a18dc3b0..2e48b20d50 100644 --- a/curvefs/src/metaserver/storage/converter.cpp +++ b/curvefs/src/metaserver/storage/converter.cpp @@ -40,9 +40,9 @@ namespace curvefs { namespace metaserver { namespace storage { +using ::curve::common::SplitString; using ::curve::common::StringToUl; using ::curve::common::StringToUll; -using ::curve::common::SplitString; using ::curvefs::common::PartitionInfo; static const char* const kDelimiter = ":"; @@ -61,7 +61,11 @@ NameGenerator::NameGenerator(uint32_t partitionId) tableName4S3ChunkInfo_(Format(kTypeS3ChunkInfo, partitionId)), tableName4Dentry_(Format(kTypeDentry, partitionId)), tableName4VolumeExtent_(Format(kTypeVolumeExtent, partitionId)), - tableName4InodeAuxInfo_(Format(kTypeInodeAuxInfo, partitionId)) {} + tableName4InodeAuxInfo_(Format(kTypeInodeAuxInfo, partitionId)), + tableName4AppliedIndex_(Format(kTypeAppliedIndex, partitionId)), + tableName4Transaction_(Format(kTypeTransaction, partitionId)), + tableName4InodeCount_(Format(kTypeInodeCount, partitionId)), + tableName4DentryCount_(Format(kTypeDentryCount, partitionId)) {} std::string NameGenerator::GetInodeTableName() const { return tableName4Inode_; @@ -91,6 +95,22 @@ std::string NameGenerator::GetInodeAuxInfoTableName() const { return tableName4InodeAuxInfo_; } +std::string NameGenerator::GetAppliedIndexTableName() const { + return tableName4AppliedIndex_; +} + +std::string NameGenerator::GetTransactionTableName() const { + return tableName4Transaction_; +} + +std::string NameGenerator::GetInodeCountTableName() const { + return tableName4InodeCount_; +} + +std::string NameGenerator::GetDentryCountTableName() const { + return tableName4DentryCount_; +} + size_t NameGenerator::GetFixedLength() { size_t length = sizeof(kTypeInode) + sizeof(uint32_t) + strlen(kDelimiter); LOG(INFO) << "Tablename fixed length is " << length; @@ -99,20 +119,17 @@ size_t NameGenerator::GetFixedLength() { std::string NameGenerator::Format(KEY_TYPE type, uint32_t partitionId) { char buf[sizeof(partitionId)]; - std::memcpy(buf, reinterpret_cast(&partitionId), - sizeof(buf)); - return absl::StrCat(type, kDelimiter, - absl::string_view(buf, sizeof(buf))); + std::memcpy(buf, reinterpret_cast(&partitionId), sizeof(buf)); + return absl::StrCat(type, kDelimiter, absl::string_view(buf, sizeof(buf))); } -Key4Inode::Key4Inode() - : fsId(0), inodeId(0) {} +Key4Inode::Key4Inode() : fsId(0), inodeId(0) {} Key4Inode::Key4Inode(uint32_t fsId, uint64_t inodeId) - : fsId(fsId), inodeId(inodeId) {} + : fsId(fsId), inodeId(inodeId) {} -Key4Inode::Key4Inode(const Inode& inode): - fsId(inode.fsid()), inodeId(inode.inodeid()) {} +Key4Inode::Key4Inode(const Inode& inode) + : fsId(inode.fsid()), inodeId(inode.inodeid()) {} bool Key4Inode::operator==(const Key4Inode& rhs) { return fsId == rhs.fsId && inodeId == rhs.inodeId; @@ -126,7 +143,7 @@ bool Key4Inode::ParseFromString(const std::string& value) { std::vector items; SplitString(value, ":", &items); return items.size() == 3 && CompareType(items[0], keyType_) && - StringToUl(items[1], &fsId) && StringToUll(items[2], &inodeId); + StringToUl(items[1], &fsId) && StringToUll(items[2], &inodeId); } std::string Prefix4AllInode::SerializeToString() const { @@ -150,12 +167,10 @@ Key4S3ChunkInfoList::Key4S3ChunkInfoList() lastChunkId(0), size(0) {} -Key4S3ChunkInfoList::Key4S3ChunkInfoList(uint32_t fsId, - uint64_t inodeId, +Key4S3ChunkInfoList::Key4S3ChunkInfoList(uint32_t fsId, uint64_t inodeId, uint64_t chunkIndex, uint64_t firstChunkId, - uint64_t lastChunkId, - uint64_t size) + uint64_t lastChunkId, uint64_t size) : fsId(fsId), inodeId(inodeId), chunkIndex(chunkIndex), @@ -164,34 +179,32 @@ Key4S3ChunkInfoList::Key4S3ChunkInfoList(uint32_t fsId, size(size) {} std::string Key4S3ChunkInfoList::SerializeToString() const { - return absl::StrCat(keyType_, ":", fsId, ":", inodeId, ":", - chunkIndex, ":", absl::StrFormat("%020" PRIu64"", firstChunkId), ":", - absl::StrFormat("%020" PRIu64"", lastChunkId), ":", size); + return absl::StrCat(keyType_, ":", fsId, ":", inodeId, ":", chunkIndex, ":", + absl::StrFormat("%020" PRIu64 "", firstChunkId), ":", + absl::StrFormat("%020" PRIu64 "", lastChunkId), ":", + size); } bool Key4S3ChunkInfoList::ParseFromString(const std::string& value) { std::vector items; SplitString(value, ":", &items); return items.size() == 7 && CompareType(items[0], keyType_) && - StringToUl(items[1], &fsId) && StringToUll(items[2], &inodeId) && - StringToUll(items[3], &chunkIndex) && - StringToUll(items[4], &firstChunkId) && - StringToUll(items[5], &lastChunkId) && - StringToUll(items[6], &size); + StringToUl(items[1], &fsId) && StringToUll(items[2], &inodeId) && + StringToUll(items[3], &chunkIndex) && + StringToUll(items[4], &firstChunkId) && + StringToUll(items[5], &lastChunkId) && StringToUll(items[6], &size); } Prefix4ChunkIndexS3ChunkInfoList::Prefix4ChunkIndexS3ChunkInfoList() : fsId(0), inodeId(0), chunkIndex(0) {} Prefix4ChunkIndexS3ChunkInfoList::Prefix4ChunkIndexS3ChunkInfoList( - uint32_t fsId, - uint64_t inodeId, - uint64_t chunkIndex) + uint32_t fsId, uint64_t inodeId, uint64_t chunkIndex) : fsId(fsId), inodeId(inodeId), chunkIndex(chunkIndex) {} std::string Prefix4ChunkIndexS3ChunkInfoList::SerializeToString() const { - return absl::StrCat(keyType_, ":", fsId, ":", inodeId, ":", - chunkIndex, ":"); + return absl::StrCat(keyType_, ":", fsId, ":", inodeId, ":", chunkIndex, + ":"); } bool Prefix4ChunkIndexS3ChunkInfoList::ParseFromString( @@ -199,8 +212,8 @@ bool Prefix4ChunkIndexS3ChunkInfoList::ParseFromString( std::vector items; SplitString(value, ":", &items); return items.size() == 4 && CompareType(items[0], keyType_) && - StringToUl(items[1], &fsId) && StringToUll(items[2], &inodeId) && - StringToUll(items[3], &chunkIndex); + StringToUl(items[1], &fsId) && StringToUll(items[2], &inodeId) && + StringToUll(items[3], &chunkIndex); } Prefix4InodeS3ChunkInfoList::Prefix4InodeS3ChunkInfoList() @@ -218,7 +231,7 @@ bool Prefix4InodeS3ChunkInfoList::ParseFromString(const std::string& value) { std::vector items; SplitString(value, ":", &items); return items.size() == 3 && CompareType(items[0], keyType_) && - StringToUl(items[1], &fsId) && StringToUll(items[2], &inodeId); + StringToUl(items[1], &fsId) && StringToUll(items[2], &inodeId); } std::string Prefix4AllS3ChunkInfoList::SerializeToString() const { @@ -231,30 +244,25 @@ bool Prefix4AllS3ChunkInfoList::ParseFromString(const std::string& value) { return items.size() == 1 && CompareType(items[0], keyType_); } -Key4Dentry::Key4Dentry(uint32_t fsId, - uint64_t parentInodeId, +Key4Dentry::Key4Dentry(uint32_t fsId, uint64_t parentInodeId, const std::string& name) : fsId(fsId), parentInodeId(parentInodeId), name(name) {} std::string Key4Dentry::SerializeToString() const { - return absl::StrCat(keyType_, kDelimiter, fsId, - kDelimiter, parentInodeId, + return absl::StrCat(keyType_, kDelimiter, fsId, kDelimiter, parentInodeId, kDelimiter, name); } bool Key4Dentry::ParseFromString(const std::string& value) { std::vector items; SplitString(value, ":", &items); - if (items.size() < 3 || - !CompareType(items[0], keyType_) || + if (items.size() < 3 || !CompareType(items[0], keyType_) || !StringToUl(items[1], &fsId) || !StringToUll(items[2], &parentInodeId)) { return false; } - size_t prefixLength = items[0].size() + - items[1].size() + - items[2].size() + + size_t prefixLength = items[0].size() + items[1].size() + items[2].size() + 3 * strlen(kDelimiter); if (value.size() < prefixLength) { return false; @@ -268,8 +276,7 @@ Prefix4SameParentDentry::Prefix4SameParentDentry(uint32_t fsId, : fsId(fsId), parentInodeId(parentInodeId) {} std::string Prefix4SameParentDentry::SerializeToString() const { - return absl::StrCat(keyType_, kDelimiter, fsId, - kDelimiter, parentInodeId, + return absl::StrCat(keyType_, kDelimiter, fsId, kDelimiter, parentInodeId, kDelimiter); } @@ -290,8 +297,7 @@ bool Prefix4AllDentry::ParseFromString(const std::string& value) { return items.size() == 1 && CompareType(items[0], keyType_); } -Key4VolumeExtentSlice::Key4VolumeExtentSlice(uint32_t fsId, - uint64_t inodeId, +Key4VolumeExtentSlice::Key4VolumeExtentSlice(uint32_t fsId, uint64_t inodeId, uint64_t offset) : fsId_(fsId), inodeId_(inodeId), offset_(offset) {} @@ -320,7 +326,7 @@ std::string Prefix4InodeVolumeExtent::SerializeToString() const { kDelimiter); } -bool Prefix4InodeVolumeExtent::ParseFromString(const std::string &value) { +bool Prefix4InodeVolumeExtent::ParseFromString(const std::string& value) { std::vector items; SplitString(value, kDelimiter, &items); return items.size() == 3 && CompareType(items[0], keyType_) && @@ -331,14 +337,13 @@ std::string Prefix4AllVolumeExtent::SerializeToString() const { return absl::StrCat(keyType_, kDelimiter); } -bool Prefix4AllVolumeExtent::ParseFromString(const std::string &value) { +bool Prefix4AllVolumeExtent::ParseFromString(const std::string& value) { std::vector items; SplitString(value, kDelimiter, &items); return items.size() == 1 && CompareType(items[0], keyType_); } -Key4InodeAuxInfo::Key4InodeAuxInfo(uint32_t fsId, - uint64_t inodeId) +Key4InodeAuxInfo::Key4InodeAuxInfo(uint32_t fsId, uint64_t inodeId) : fsId(fsId), inodeId(inodeId) {} std::string Key4InodeAuxInfo::SerializeToString() const { @@ -356,7 +361,7 @@ std::string Key4DeallocatableBlockGroup::SerializeToString() const { return absl::StrCat(keyType_, kDelimiter, fsId, kDelimiter, volumeOffset); } -bool Key4DeallocatableBlockGroup::ParseFromString(const std::string &value) { +bool Key4DeallocatableBlockGroup::ParseFromString(const std::string& value) { std::vector items; SplitString(value, kDelimiter, &items); return items.size() == 3 && CompareType(items[0], keyType_) && @@ -368,7 +373,7 @@ std::string Prefix4AllDeallocatableBlockGroup::SerializeToString() const { } bool Prefix4AllDeallocatableBlockGroup::ParseFromString( - const std::string &value) { + const std::string& value) { std::vector items; SplitString(value, ":", &items); return items.size() == 1 && CompareType(items[0], keyType_); diff --git a/curvefs/src/metaserver/storage/converter.h b/curvefs/src/metaserver/storage/converter.h index 8d29471322..41870c6b67 100644 --- a/curvefs/src/metaserver/storage/converter.h +++ b/curvefs/src/metaserver/storage/converter.h @@ -46,6 +46,10 @@ enum KEY_TYPE : unsigned char { kTypeBlockGroup = 6, kTypeDeallocatableBlockGroup = 7, kTypeDeallocatableInode = 8, + kTypeAppliedIndex = 9, + kTypeTransaction = 10, + kTypeInodeCount = 11, + kTypeDentryCount = 12 }; // NOTE: you must generate all table name by NameGenerator class for @@ -71,6 +75,14 @@ class NameGenerator { std::string GetDeallocatableBlockGroupTableName() const; + std::string GetAppliedIndexTableName() const; + + std::string GetTransactionTableName() const; + + std::string GetInodeCountTableName() const; + + std::string GetDentryCountTableName() const; + static size_t GetFixedLength(); private: @@ -84,6 +96,10 @@ class NameGenerator { std::string tableName4Dentry_; std::string tableName4VolumeExtent_; std::string tableName4InodeAuxInfo_; + std::string tableName4AppliedIndex_; + std::string tableName4Transaction_; + std::string tableName4InodeCount_; + std::string tableName4DentryCount_; }; class StorageKey { diff --git a/curvefs/src/metaserver/storage/dumpfile.cpp b/curvefs/src/metaserver/storage/dumpfile.cpp index dd2d03c4f0..9aa1e5f33a 100644 --- a/curvefs/src/metaserver/storage/dumpfile.cpp +++ b/curvefs/src/metaserver/storage/dumpfile.cpp @@ -67,7 +67,7 @@ using ::curve::common::CRC32; const std::string DumpFile::kCurvefs_ = "CURVEFS"; // NOLINT const uint32_t DumpFile::kEOF_ = 0; -const uint8_t DumpFile::kVersion_ = kDumpFileV3; +const uint8_t DumpFile::kVersion_ = kDumpFileV4; const uint32_t DumpFile::kMaxStringLength_ = 1024 * 1024 * 1024; // 1GB diff --git a/curvefs/src/metaserver/storage/dumpfile.h b/curvefs/src/metaserver/storage/dumpfile.h index 3bcc3104a3..f107a6e8a1 100644 --- a/curvefs/src/metaserver/storage/dumpfile.h +++ b/curvefs/src/metaserver/storage/dumpfile.h @@ -87,6 +87,10 @@ enum DumpFileVersion : uint8_t { // kDumpFileV2 (because they're not inserted into rocksdb), other metadata // is saved by rocksdb kDumpFileV3 = 3, + // Version 4 only dumps partitions into file based on + // kDumpFileV3 (because they're not inserted into rocksdb), other metadata + // is saved by rocksdb + kDumpFileV4 = 4, }; std::ostream& operator<<(std::ostream& os, DUMPFILE_ERROR code); diff --git a/curvefs/src/metaserver/storage/rocksdb_storage.cpp b/curvefs/src/metaserver/storage/rocksdb_storage.cpp index 1080b857a3..5875ba6817 100644 --- a/curvefs/src/metaserver/storage/rocksdb_storage.cpp +++ b/curvefs/src/metaserver/storage/rocksdb_storage.cpp @@ -290,6 +290,8 @@ Status RocksDBStorage::Clear(const std::string& name, bool ordered) { if (!inited_) { return Status::DBClosed(); } else if (InTransaction_) { + // NOTE: rocksdb transaction has no `DeleteRange` function + // maybe we can implement `Clear` by "iterate and delete" return Status::NotSupported(); } @@ -436,7 +438,11 @@ bool RocksDBStorage::Checkpoint(const std::string& dir, std::vector* files) { rocksdb::FlushOptions options; options.wait = true; - options.allow_write_stall = true; + // NOTE: for asynchronous snapshot + // we cannot allow write stall + // rocksdb will wait until flush + // can be performed without causing write stall + options.allow_write_stall = false; auto status = db_->Flush(options, handles_); if (!status.ok()) { LOG(ERROR) << "Failed to flush DB, " << status.ToString(); diff --git a/curvefs/src/metaserver/storage/storage_fstream.h b/curvefs/src/metaserver/storage/storage_fstream.h index c4f24aa28d..e26dcf463d 100644 --- a/curvefs/src/metaserver/storage/storage_fstream.h +++ b/curvefs/src/metaserver/storage/storage_fstream.h @@ -183,6 +183,11 @@ inline bool LoadFromFile(const std::string &pathname, uint8_t *version, std::string key = ukey.second; std::string value = iter->Value(); uint8_t version = dumpfile.GetVersion(); + if (version < static_cast(DumpFileVersion::kDumpFileV3)) { + LOG(ERROR) << "The dumpfile is too old, " + << "the version of dumpfile should be V3 at least"; + return false; + } switch (entryType) { CASE_TYPE_CALLBACK(INODE); CASE_TYPE_CALLBACK(DENTRY); diff --git a/curvefs/src/metaserver/transaction.cpp b/curvefs/src/metaserver/transaction.cpp index fb3659f96d..0eb20c084f 100644 --- a/curvefs/src/metaserver/transaction.cpp +++ b/curvefs/src/metaserver/transaction.cpp @@ -29,50 +29,36 @@ namespace metaserver { using curve::common::ReadLockGuard; using curve::common::WriteLockGuard; -#define FOR_EACH_DENTRY(action) \ -do { \ - for (const auto& dentry : dentrys_) { \ - auto rc = storage_->HandleTx( \ - DentryStorage::TX_OP_TYPE::action, dentry); \ - if (rc != MetaStatusCode::OK) { \ - return false; \ - } \ - } \ -} while (0) - RenameTx::RenameTx(const std::vector& dentrys, std::shared_ptr storage) - : txId_(dentrys[0].txid()) , + : txId_(dentrys[0].txid()), txSequence_(dentrys[0].txsequence()), dentrys_(dentrys), storage_(storage) {} -bool RenameTx::Prepare() { - FOR_EACH_DENTRY(PREPARE); - return true; +bool RenameTx::Prepare(const std::string& txPayload, int64_t logIndex) { + metaserver::TransactionRequest request; + request.set_type(metaserver::TransactionRequest::Rename); + request.set_rawpayload(txPayload); + return storage_->PrepareTx(dentrys_, request, logIndex) == + MetaStatusCode::OK; } -bool RenameTx::Commit() { - FOR_EACH_DENTRY(COMMIT); - return true; +bool RenameTx::Commit(int64_t logIndex) { + return storage_->CommitTx(dentrys_, logIndex) == MetaStatusCode::OK; } -bool RenameTx::Rollback() { - FOR_EACH_DENTRY(ROLLBACK); - return true; +bool RenameTx::Rollback(int64_t logIndex) { + return storage_->RollbackTx(dentrys_, logIndex) == MetaStatusCode::OK; } -uint64_t RenameTx::GetTxId() { - return txId_; -} +uint64_t RenameTx::GetTxId() { return txId_; } -uint64_t RenameTx::GetTxSequence() { - return txSequence_; -} +uint64_t RenameTx::GetTxSequence() { return txSequence_; } -std::vector* RenameTx::GetDentrys() { - return &dentrys_; -} +std::vector* RenameTx::GetDentrys() { return &dentrys_; } + +const std::vector* RenameTx::GetDentrys() const { return &dentrys_; } inline bool RenameTx::operator==(const RenameTx& rhs) { return dentrys_ == rhs.dentrys_; @@ -82,23 +68,26 @@ std::ostream& operator<<(std::ostream& os, const RenameTx& renameTx) { auto dentrys = renameTx.dentrys_; os << "txId = " << renameTx.txId_; for (size_t i = 0; i < dentrys.size(); i++) { - os << ", dentry[" << i << "] = (" - << dentrys[i].ShortDebugString() << ")"; + os << ", dentry[" << i << "] = (" << dentrys[i].ShortDebugString() + << ")"; } return os; } -TxManager::TxManager(std::shared_ptr storage) - : storage_(storage) {} +TxManager::TxManager(std::shared_ptr storage, + common::PartitionInfo partitionInfo) + : storage_(std::move(storage)), + conv_(), + partitionInfo_(std::move(partitionInfo)) {} MetaStatusCode TxManager::PreCheck(const std::vector& dentrys) { auto size = dentrys.size(); if (size != 1 && size != 2) { return MetaStatusCode::PARAM_ERROR; } else if (size == 2) { - if (dentrys[0].fsid() != dentrys[1].fsid() || - dentrys[0].txid() != dentrys[1].txid() || - dentrys[0].txsequence() != dentrys[1].txsequence()) { + if (dentrys[0].fsid() != dentrys[1].fsid() || + dentrys[0].txid() != dentrys[1].txid() || + dentrys[0].txsequence() != dentrys[1].txsequence()) { return MetaStatusCode::PARAM_ERROR; } } @@ -106,7 +95,39 @@ MetaStatusCode TxManager::PreCheck(const std::vector& dentrys) { return MetaStatusCode::OK; } -MetaStatusCode TxManager::HandleRenameTx(const std::vector& dentrys) { +void TxManager::SerializeRenameTx(const RenameTx& in, + PrepareRenameTxRequest* out) { + const auto* dentrys = in.GetDentrys(); + out->set_poolid(partitionInfo_.poolid()); + out->set_copysetid(partitionInfo_.copysetid()); + out->set_partitionid(partitionInfo_.partitionid()); + *out->mutable_dentrys() = {dentrys->begin(), dentrys->end()}; +} + +bool TxManager::Init() { + metaserver::TransactionRequest request; + auto s = storage_->GetPendingTx(&request); + if (s == MetaStatusCode::OK) { + auto txType = request.type(); + if (txType == metaserver::TransactionRequest::None) { + // NOTE: if tx type is none + // means that pending tx is empty + pendingTx_ = EMPTY_TX; + } else if (txType == metaserver::TransactionRequest::Rename) { + std::string txPayload = request.rawpayload(); + PrepareRenameTxRequest request; + conv_.ParseFromString(txPayload, &request); + RenameTx tx({request.dentrys().begin(), request.dentrys().end()}, + storage_); + pendingTx_ = tx; + } + return true; + } + return s == MetaStatusCode::NOT_FOUND; +} + +MetaStatusCode TxManager::HandleRenameTx(const std::vector& dentrys, + int64_t logIndex) { auto rc = PreCheck(dentrys); if (rc != MetaStatusCode::OK) { return rc; @@ -123,7 +144,7 @@ MetaStatusCode TxManager::HandleRenameTx(const std::vector& dentrys) { << txSequence << ", pending tx sequence = " << pendingTx.GetTxSequence(); return MetaStatusCode::HANDLE_PENDING_TX_FAILED; - } else if (!HandlePendingTx(txId, &pendingTx)) { + } else if (!HandlePendingTx(txId, &pendingTx, logIndex)) { LOG(ERROR) << "HandlePendingTx failed, pendingTx: " << pendingTx; return MetaStatusCode::HANDLE_PENDING_TX_FAILED; } @@ -135,11 +156,16 @@ MetaStatusCode TxManager::HandleRenameTx(const std::vector& dentrys) { if (!InsertPendingTx(renameTx)) { LOG(ERROR) << "InsertPendingTx failed, renameTx: " << renameTx; return MetaStatusCode::HANDLE_TX_FAILED; - } else if (!renameTx.Prepare()) { - LOG(ERROR) << "Prepare for RenameTx failed, renameTx: " << renameTx; - return MetaStatusCode::HANDLE_TX_FAILED; + } else { + PrepareRenameTxRequest request; + SerializeRenameTx(renameTx, &request); + std::string txPayload; + conv_.SerializeToString(request, &txPayload); + if (!renameTx.Prepare(txPayload, logIndex)) { + LOG(ERROR) << "Prepare for RenameTx failed, renameTx: " << renameTx; + return MetaStatusCode::HANDLE_TX_FAILED; + } } - return MetaStatusCode::OK; } @@ -166,11 +192,12 @@ bool TxManager::FindPendingTx(RenameTx* pendingTx) { return true; } -bool TxManager::HandlePendingTx(uint64_t txId, RenameTx* pendingTx) { +bool TxManager::HandlePendingTx(uint64_t txId, RenameTx* pendingTx, + int64_t logIndex) { if (txId > pendingTx->GetTxId()) { - return pendingTx->Commit(); + return pendingTx->Commit(logIndex); } - return pendingTx->Rollback(); + return pendingTx->Rollback(logIndex); } }; // namespace metaserver diff --git a/curvefs/src/metaserver/transaction.h b/curvefs/src/metaserver/transaction.h index c01ce4b994..4172288c6d 100644 --- a/curvefs/src/metaserver/transaction.h +++ b/curvefs/src/metaserver/transaction.h @@ -23,12 +23,14 @@ #ifndef CURVEFS_SRC_METASERVER_TRANSACTION_H_ #define CURVEFS_SRC_METASERVER_TRANSACTION_H_ -#include #include +#include #include +#include +#include -#include "src/common/concurrent/rw_lock.h" #include "curvefs/src/metaserver/dentry_storage.h" +#include "src/common/concurrent/rw_lock.h" namespace curvefs { namespace metaserver { @@ -40,11 +42,11 @@ class RenameTx { RenameTx(const std::vector& dentrys, std::shared_ptr storage); - bool Prepare(); + bool Prepare(const std::string& txPayload, int64_t logIndex); - bool Commit(); + bool Commit(int64_t logIndex); - bool Rollback(); + bool Rollback(int64_t logIndex); uint64_t GetTxId(); @@ -52,6 +54,8 @@ class RenameTx { std::vector* GetDentrys(); + const std::vector* GetDentrys() const; + bool operator==(const RenameTx& rhs); friend std::ostream& operator<<(std::ostream& os, const RenameTx& renameTx); @@ -69,9 +73,11 @@ class RenameTx { class TxManager { public: - explicit TxManager(std::shared_ptr storage); + explicit TxManager(std::shared_ptr storage, + common::PartitionInfo partitionInfo); - MetaStatusCode HandleRenameTx(const std::vector& dentrys); + MetaStatusCode HandleRenameTx(const std::vector& dentrys, + int64_t logIndex); MetaStatusCode PreCheck(const std::vector& dentrys); @@ -81,7 +87,11 @@ class TxManager { void DeletePendingTx(); - bool HandlePendingTx(uint64_t txId, RenameTx* pendingTx); + bool HandlePendingTx(uint64_t txId, RenameTx* pendingTx, int64_t logIndex); + + void SerializeRenameTx(const RenameTx& in, PrepareRenameTxRequest* out); + + bool Init(); private: RWLock rwLock_; @@ -89,6 +99,10 @@ class TxManager { std::shared_ptr storage_; RenameTx EMPTY_TX, pendingTx_; + + Converter conv_; + + common::PartitionInfo partitionInfo_; }; } // namespace metaserver diff --git a/curvefs/src/metaserver/trash.cpp b/curvefs/src/metaserver/trash.cpp index c7f1763009..db44dea151 100644 --- a/curvefs/src/metaserver/trash.cpp +++ b/curvefs/src/metaserver/trash.cpp @@ -231,7 +231,7 @@ MetaStatusCode TrashImpl::DeleteInodeAndData(const TrashItem &item) { return MetaStatusCode::S3_DELETE_ERR; } } - ret = inodeStorage_->Delete(Key4Inode(item.fsId, item.inodeId)); + ret = inodeStorage_->ForceDelete(Key4Inode(item.fsId, item.inodeId)); if (ret != MetaStatusCode::OK && ret != MetaStatusCode::NOT_FOUND) { LOG(ERROR) << "Delete Inode fail, fsId = " << item.fsId << ", inodeId = " << item.inodeId diff --git a/curvefs/src/tools/create/curvefs_create_topology_tool.cpp b/curvefs/src/tools/create/curvefs_create_topology_tool.cpp index 59e7a27588..4fb92e59df 100644 --- a/curvefs/src/tools/create/curvefs_create_topology_tool.cpp +++ b/curvefs/src/tools/create/curvefs_create_topology_tool.cpp @@ -282,7 +282,7 @@ int CurvefsBuildTopologyTool::ScanCluster() { for (auto it = poolInfos.begin(); it != poolInfos.end(); it++) { auto ix = std::find_if( poolDatas.begin(), poolDatas.end(), - [it](Pool& data) { return data.name == it->poolname(); }); + [it](const Pool& data) { return data.name == it->poolname(); }); if (ix != poolDatas.end()) { poolDatas.erase(ix); } else { @@ -301,8 +301,8 @@ int CurvefsBuildTopologyTool::ScanCluster() { } for (auto it = zoneInfos.begin(); it != zoneInfos.end(); it++) { - auto ix = - std::find_if(zoneDatas.begin(), zoneDatas.end(), [it](Zone& data) { + auto ix = std::find_if( + zoneDatas.begin(), zoneDatas.end(), [it](const Zone& data) { return (data.poolName == it->poolname()) && (data.name == it->zonename()); }); @@ -325,7 +325,7 @@ int CurvefsBuildTopologyTool::ScanCluster() { for (auto it = serverInfos.begin(); it != serverInfos.end(); it++) { auto ix = std::find_if(serverDatas.begin(), serverDatas.end(), - [it](Server& data) { + [it](const Server& data) { return (data.name == it->hostname()) && (data.zoneName == it->zonename()) && (data.poolName == it->poolname()); diff --git a/curvefs/src/volume/free_extents.cpp b/curvefs/src/volume/free_extents.cpp index 5cfc609b1d..3a9f762e31 100644 --- a/curvefs/src/volume/free_extents.cpp +++ b/curvefs/src/volume/free_extents.cpp @@ -290,7 +290,7 @@ void FreeExtents::MarkUsedInternal(const uint64_t off, const uint64_t len) { auto iter = extents_.lower_bound(off); if (iter != extents_.end() && iter->first == off) { - if (iter->first == off && iter->second == len) { + if (iter->second == len) { extents_.erase(iter); } else { auto newOff = iter->first + len; diff --git a/curvefs/test/client/test_fuse_s3_client.cpp b/curvefs/test/client/test_fuse_s3_client.cpp index 1bb629f99f..d22369abfa 100644 --- a/curvefs/test/client/test_fuse_s3_client.cpp +++ b/curvefs/test/client/test_fuse_s3_client.cpp @@ -303,7 +303,8 @@ TEST_F(TestFuseS3Client, warmUp_inodeBadFd) { client_->GetFsInfo()->set_fstype(FSType::TYPE_S3); client_->PutWarmFilelistTask( inodeid, - curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk); + curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk, "", + "", ""); warmup::WarmupProgress progress; bool ret = client_->GetWarmupProgress(inodeid, &progress); LOG(INFO) << "ret:" << ret << " Warmup progress: " << progress.ToString(); @@ -360,7 +361,8 @@ TEST_F(TestFuseS3Client, warmUp_Warmfile_error_GetDentry01) { client_->GetFsInfo()->set_fstype(FSType::TYPE_S3); client_->PutWarmFilelistTask( inodeid, - curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk); + curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk, "", + "", ""); warmup::WarmupProgress progress; bool ret = client_->GetWarmupProgress(inodeid, &progress); @@ -418,7 +420,8 @@ TEST_F(TestFuseS3Client, warmUp_Warmfile_error_GetDentry02) { client_->GetFsInfo()->set_fstype(FSType::TYPE_S3); client_->PutWarmFilelistTask( inodeid, - curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk); + curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk, "", + "", ""); warmup::WarmupProgress progress; bool ret = client_->GetWarmupProgress(inodeid, &progress); @@ -476,7 +479,8 @@ TEST_F(TestFuseS3Client, warmUp_fetchDataEnqueue__error_getinode) { client_->GetFsInfo()->set_fstype(FSType::TYPE_S3); client_->PutWarmFilelistTask( inodeid, - curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk); + curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk, "", + "", ""); warmup::WarmupProgress progress; bool ret = client_->GetWarmupProgress(inodeid, &progress); @@ -534,7 +538,8 @@ TEST_F(TestFuseS3Client, warmUp_fetchDataEnqueue_chunkempty) { client_->GetFsInfo()->set_fstype(FSType::TYPE_S3); client_->PutWarmFilelistTask( inodeid, - curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk); + curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk, "", + "", ""); warmup::WarmupProgress progress; bool ret = client_->GetWarmupProgress(inodeid, &progress); @@ -599,7 +604,8 @@ TEST_F(TestFuseS3Client, warmUp_FetchDentry_TYPE_SYM_LINK) { client_->GetFsInfo()->set_fstype(FSType::TYPE_S3); client_->PutWarmFilelistTask( inodeid, - curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk); + curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk, "", + "", ""); warmup::WarmupProgress progress; bool ret = client_->GetWarmupProgress(inodeid, &progress); @@ -666,7 +672,8 @@ TEST_F(TestFuseS3Client, warmUp_FetchDentry_error_TYPE_DIRECTORY) { client_->GetFsInfo()->set_fstype(FSType::TYPE_S3); client_->PutWarmFilelistTask( inodeid, - curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk); + curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk, "", + "", ""); warmup::WarmupProgress progress; bool ret = client_->GetWarmupProgress(inodeid, &progress); @@ -730,7 +737,8 @@ TEST_F(TestFuseS3Client, warmUp_lookpath_multilevel) { client_->GetFsInfo()->set_fstype(FSType::TYPE_S3); client_->PutWarmFilelistTask( inodeid, - curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk); + curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk, "", + "", ""); warmup::WarmupProgress progress; bool ret = client_->GetWarmupProgress(inodeid, &progress); @@ -781,7 +789,8 @@ TEST_F(TestFuseS3Client, warmUp_lookpath_unkown) { client_->GetFsInfo()->set_fstype(FSType::TYPE_S3); client_->PutWarmFilelistTask( inodeid, - curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk); + curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk, "", + "", ""); warmup::WarmupProgress progress; bool ret = client_->GetWarmupProgress(inodeid, &progress); @@ -838,7 +847,8 @@ TEST_F(TestFuseS3Client, warmUp_FetchChildDentry_error_ListDentry) { client_->GetFsInfo()->set_fstype(FSType::TYPE_S3); client_->PutWarmFilelistTask( inodeid, - curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk); + curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk, "", + "", ""); warmup::WarmupProgress progress; bool ret = client_->GetWarmupProgress(inodeid, &progress); @@ -955,7 +965,8 @@ TEST_F(TestFuseS3Client, warmUp_FetchChildDentry_suc_ListDentry) { client_->GetFsInfo()->set_fstype(FSType::TYPE_S3); client_->PutWarmFilelistTask( inodeid, - curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk); + curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk, "", + "", ""); warmup::WarmupProgress progress; bool ret = client_->GetWarmupProgress(inodeid, &progress); diff --git a/curvefs/test/mds/fs_manager_test.cpp b/curvefs/test/mds/fs_manager_test.cpp index 6794bca072..af5d78d80d 100644 --- a/curvefs/test/mds/fs_manager_test.cpp +++ b/curvefs/test/mds/fs_manager_test.cpp @@ -262,7 +262,8 @@ class FSManagerTest : public ::testing::Test { .WillOnce(DoAll( SetArgPointee<2>(response), Invoke(RpcService{}))); - EXPECT_CALL(*s3Adapter_, BucketExist()).WillOnce(Return(true)); + EXPECT_CALL(*s3Adapter_, PutObject(_, _)).WillOnce(Return(0)); + EXPECT_CALL(*s3Adapter_, DeleteObject(_)).WillOnce(Return(0)); return fsManager_->CreateFs(&s3Req, &s3FsInfo); } @@ -538,7 +539,8 @@ TEST_F(FSManagerTest, test_fail_create_s3_fs_on_failed_root_node_creation) { .WillOnce(DoAll( SetArgPointee<2>(response), Invoke(RpcService{}))); - EXPECT_CALL(*s3Adapter_, BucketExist()).WillOnce(Return(true)); + EXPECT_CALL(*s3Adapter_, PutObject(_, _)).WillOnce(Return(0)); + EXPECT_CALL(*s3Adapter_, DeleteObject(_)).WillOnce(Return(0)); EXPECT_CALL(*topoManager_, DeletePartition(_)) .WillOnce(Return(TopoStatusCode::TOPO_OK)); ret = fsManager_->CreateFs(&s3Req, &s3FsInfo); @@ -548,11 +550,10 @@ TEST_F(FSManagerTest, test_fail_create_s3_fs_on_failed_root_node_creation) { TEST_F(FSManagerTest, test_fail_create_duplicate_s3_fs_with_different_fsname) { CreateS3Fs(); - FSStatusCode ret; // create s3 fs fail std::string fsName3 = "fs3"; - EXPECT_CALL(*s3Adapter_, BucketExist()).WillOnce(Return(false)); + EXPECT_CALL(*s3Adapter_, PutObject(_, _)).WillOnce(Return(-1)); s3Req.set_fsname(fsName3); ret = fsManager_->CreateFs(&s3Req, &s3FsInfo); diff --git a/curvefs/test/mds/mds_service_test.cpp b/curvefs/test/mds/mds_service_test.cpp index e4c5150ba9..4c14a6a161 100644 --- a/curvefs/test/mds/mds_service_test.cpp +++ b/curvefs/test/mds/mds_service_test.cpp @@ -225,7 +225,8 @@ class MdsServiceTest : public ::testing::Test { .WillOnce(DoAll( SetArgPointee<2>(getLeaderResponse), Invoke(RpcService{}))); - EXPECT_CALL(*s3Adapter_, BucketExist()).WillOnce(Return(true)); + EXPECT_CALL(*s3Adapter_, PutObject(_, _)).WillOnce(Return(0)); + EXPECT_CALL(*s3Adapter_, DeleteObject(_)).WillOnce(Return(0)); cntl.set_timeout_ms(5000); stub_->CreateFs(&cntl, &createRequest, &createResponse, nullptr); diff --git a/curvefs/test/mds/topology/test_topology_manager.cpp b/curvefs/test/mds/topology/test_topology_manager.cpp index cd9097a166..86f4edc583 100644 --- a/curvefs/test/mds/topology/test_topology_manager.cpp +++ b/curvefs/test/mds/topology/test_topology_manager.cpp @@ -1632,6 +1632,105 @@ TEST_F(TestTopologyManager, test_CreatePartitionWithAvailableCopyset_Success) { ASSERT_EQ(1, info.GetPartitionNum()); } +TEST_F(TestTopologyManager, test_CreatePartitionAfterChangeInodeRange_Success) { + PoolIdType poolId = 0x11; + CopySetIdType copysetId = 0x51; + PartitionIdType partitionId = 0x61; + PartitionIdType partitionId1 = 0x62; + PartitionIdType partitionId2 = 0x63; + + PrepareAddPool(poolId); + PrepareAddZone(0x21, "zone1", poolId); + PrepareAddZone(0x22, "zone2", poolId); + PrepareAddZone(0x23, "zone3", poolId); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); + PrepareAddMetaServer(0x41, "ms1", "token1", 0x31, "127.0.0.1", 7777, "ip2", + 8888); + PrepareAddMetaServer(0x42, "ms2", "token2", 0x32, "127.0.0.1", 7777, "ip2", + 8888); + PrepareAddMetaServer(0x43, "ms3", "token3", 0x33, "127.0.0.1", 7777, "ip2", + 8888); + + std::set replicas; + replicas.insert(0x41); + replicas.insert(0x42); + replicas.insert(0x43); + PrepareAddCopySet(copysetId, poolId, replicas); + + EXPECT_CALL(*idGenerator_, GenPartitionId()) + .WillOnce(Return(partitionId)) + .WillOnce(Return(partitionId1)) + .WillOnce(Return(partitionId2)); + + std::string leader = "127.0.0.1:7777"; + + EXPECT_CALL(*storage_, StoragePartition(_)) + .Times(3) + .WillRepeatedly(Return(true)); + EXPECT_CALL(*storage_, StorageClusterInfo(_)) + .Times(3) + .WillRepeatedly(Return(true)); + + EXPECT_CALL(*mockMetaserverClient_, CreatePartition(_, _, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(Return(FSStatusCode::OK)); + + CreatePartitionRequest request; + CreatePartitionResponse response; + request.set_fsid(0x01); + request.set_count(1); + // first time partition inode size 100 [0-99] + TopologyOption topologyOption; + topologyOption.idNumberInPartition = 100; + serviceManager_->Init(topologyOption); + serviceManager_->CreatePartitions(&request, &response); + ASSERT_EQ(TopoStatusCode::TOPO_OK, response.statuscode()); + ASSERT_EQ(1, response.partitioninfolist().size()); + + Partition partition; + ASSERT_TRUE(topology_->GetPartition(partitionId, &partition)); + ASSERT_EQ(copysetId, partition.GetCopySetId()); + ASSERT_EQ(0, partition.GetIdStart()); + ASSERT_EQ(99, partition.GetIdEnd()); + + CopySetInfo info; + CopySetKey key(poolId, copysetId); + ASSERT_TRUE(topology_->GetCopySet(key, &info)); + ASSERT_EQ(copysetId, info.GetId()); + ASSERT_EQ(1, info.GetPartitionNum()); + + // second time partition inode size 80 [100-179] + topologyOption.idNumberInPartition = 80; + serviceManager_->Init(topologyOption); + response.clear_partitioninfolist(); + serviceManager_->CreatePartitions(&request, &response); + ASSERT_EQ(TopoStatusCode::TOPO_OK, response.statuscode()); + ASSERT_EQ(1, response.partitioninfolist().size()); + + ASSERT_TRUE(topology_->GetPartition(partitionId1, &partition)); + ASSERT_EQ(copysetId, partition.GetCopySetId()); + ASSERT_EQ(100, partition.GetIdStart()); + ASSERT_EQ(179, partition.GetIdEnd()); + + // third time partition inode size 120 [180-299] + topologyOption.idNumberInPartition = 120; + serviceManager_->Init(topologyOption); + response.clear_partitioninfolist(); + serviceManager_->CreatePartitions(&request, &response); + ASSERT_EQ(TopoStatusCode::TOPO_OK, response.statuscode()); + ASSERT_EQ(1, response.partitioninfolist().size()); + + ASSERT_TRUE(topology_->GetPartition(partitionId2, &partition)); + ASSERT_EQ(copysetId, partition.GetCopySetId()); + ASSERT_EQ(180, partition.GetIdStart()); + ASSERT_EQ(299, partition.GetIdEnd()); +} + TEST_F(TestTopologyManager, test_CreatePartitionsAndGetMinPartition_Success) { PoolIdType poolId = 0x11; diff --git a/curvefs/test/metaserver/copyset/concurrent_apply_queue_test.cpp b/curvefs/test/metaserver/copyset/concurrent_apply_queue_test.cpp index 28763dd63a..0f1ce9a68f 100644 --- a/curvefs/test/metaserver/copyset/concurrent_apply_queue_test.cpp +++ b/curvefs/test/metaserver/copyset/concurrent_apply_queue_test.cpp @@ -197,6 +197,7 @@ TEST(ApplyQueue, ConcurrentTest) { }; auto flush = [&concurrentapply, &stop, &testnum]() { + (void)testnum; while (!stop.load()) { std::this_thread::sleep_for(std::chrono::milliseconds(100)); concurrentapply.Flush(); diff --git a/curvefs/test/metaserver/copyset/copyset_node_block_group_test.cpp b/curvefs/test/metaserver/copyset/copyset_node_block_group_test.cpp index 4dec486122..979a0c654f 100644 --- a/curvefs/test/metaserver/copyset/copyset_node_block_group_test.cpp +++ b/curvefs/test/metaserver/copyset/copyset_node_block_group_test.cpp @@ -76,7 +76,7 @@ class CopysetNodeBlockGroupTest : public testing::Test { ASSERT_TRUE(nodeManager_->Init(options_)); } - void TearDown() { + void TearDown() override { system(std::string("rm -rf " + dataPath_).c_str()); } diff --git a/curvefs/test/metaserver/copyset/copyset_node_snapshot_test.cpp b/curvefs/test/metaserver/copyset/copyset_node_snapshot_test.cpp index a4e4f83026..b13392865c 100644 --- a/curvefs/test/metaserver/copyset/copyset_node_snapshot_test.cpp +++ b/curvefs/test/metaserver/copyset/copyset_node_snapshot_test.cpp @@ -108,7 +108,7 @@ class CopysetNodeRaftSnapshotTest : public testing::Test { options_.port = kTestPort; options_.dataUri = "local://" + dataPath_; - options_.raftNodeOptions.log_uri = "local://" + dataPath_; + options_.raftNodeOptions.log_uri = "local://" + dataPath_; options_.raftNodeOptions.raft_meta_uri = "local://" + dataPath_; options_.raftNodeOptions.snapshot_uri = "local://" + dataPath_; @@ -185,14 +185,14 @@ TEST_F(CopysetNodeRaftSnapshotTest, SnapshotSaveTest_SaveConfEpochFailed) { MockSnapshotWriter writer; FakeSnapshotSaveClosure done; - EXPECT_CALL(writer, get_path()) - .WillRepeatedly(Return(dataPath_)); + EXPECT_CALL(writer, get_path()).WillRepeatedly(Return(dataPath_)); EXPECT_CALL(*mockfs_, Open(_, _)) .WillOnce(Invoke([](const std::string&, int) { errno = EINVAL; return -1; })); - EXPECT_CALL(*mockMetaStore_, Save(_, _)).Times(0); + EXPECT_CALL(*mockMetaStore_, SaveMeta(_, _)).Times(0); + EXPECT_CALL(*mockMetaStore_, SaveData(_, _)).Times(0); node->on_snapshot_save(&writer, &done); @@ -202,7 +202,7 @@ TEST_F(CopysetNodeRaftSnapshotTest, SnapshotSaveTest_SaveConfEpochFailed) { EXPECT_EQ(EINVAL, done.status().error_code()); } -TEST_F(CopysetNodeRaftSnapshotTest, SnapshotSaveTest_MetaStoreSaveFailed) { +TEST_F(CopysetNodeRaftSnapshotTest, SnapshotSaveTest_MetaStoreSaveFailed_Meta) { ASSERT_TRUE(CreateOneCopyset()); auto* node = nodeManager_->GetCopysetNode(poolId_, copysetId_); @@ -214,31 +214,69 @@ TEST_F(CopysetNodeRaftSnapshotTest, SnapshotSaveTest_MetaStoreSaveFailed) { MockSnapshotWriter writer; FakeSnapshotSaveClosure done; - EXPECT_CALL(writer, get_path()) - .WillRepeatedly(Return(dataPath_)); - EXPECT_CALL(writer, add_file(_)) - .Times(1); - EXPECT_CALL(*mockfs_, Open(_, _)) - .WillOnce(Return(0)); + EXPECT_CALL(writer, get_path()).WillRepeatedly(Return(dataPath_)); + EXPECT_CALL(writer, add_file(_)).Times(1); + EXPECT_CALL(*mockfs_, Open(_, _)).WillOnce(Return(0)); EXPECT_CALL(*mockfs_, Write(_, Matcher(_), _, _)) .WillOnce(Invoke( [](int fd, const char*, uint64_t, int length) { return length; })); - EXPECT_CALL(*mockfs_, Fsync(_)) - .WillOnce(Return(0)); - EXPECT_CALL(*mockfs_, Close(_)) - .Times(1); - EXPECT_CALL(*mockMetaStore, Save(_, _)) - .WillOnce(Invoke([](std::string path, OnSnapshotSaveDoneClosure* done) { - done->SetError(MetaStatusCode::UNKNOWN_ERROR); - done->Run(); - return false; - })); + EXPECT_CALL(*mockfs_, Fsync(_)).WillOnce(Return(0)); + EXPECT_CALL(*mockfs_, Close(_)).Times(1); + EXPECT_CALL(*mockMetaStore, SaveMeta(_, _)) + .WillOnce( + Invoke([](const std::string& dir, std::vector* files) { + (void)dir; + (void)files; + return false; + })); node->on_snapshot_save(&writer, &done); done.WaitRunned(); EXPECT_FALSE(done.status().ok()); - EXPECT_EQ(MetaStatusCode::UNKNOWN_ERROR, done.status().error_code()); + EXPECT_EQ(MetaStatusCode::SAVE_META_FAIL, done.status().error_code()); +} + +TEST_F(CopysetNodeRaftSnapshotTest, SnapshotSaveTest_MetaStoreSaveFailed_Data) { + ASSERT_TRUE(CreateOneCopyset()); + + auto* node = nodeManager_->GetCopysetNode(poolId_, copysetId_); + ASSERT_NE(nullptr, node); + + auto mockMetaStore = mockMetaStore_.get(); + node->SetMetaStore(mockMetaStore_.release()); + + MockSnapshotWriter writer; + FakeSnapshotSaveClosure done; + + EXPECT_CALL(writer, get_path()).WillRepeatedly(Return(dataPath_)); + EXPECT_CALL(writer, add_file(_)).Times(1); + EXPECT_CALL(*mockfs_, Open(_, _)).WillOnce(Return(0)); + EXPECT_CALL(*mockfs_, Write(_, Matcher(_), _, _)) + .WillOnce(Invoke( + [](int fd, const char*, uint64_t, int length) { return length; })); + EXPECT_CALL(*mockfs_, Fsync(_)).WillOnce(Return(0)); + EXPECT_CALL(*mockfs_, Close(_)).Times(1); + EXPECT_CALL(*mockMetaStore, SaveMeta(_, _)) + .WillOnce( + Invoke([](const std::string& dir, std::vector* files) { + (void)dir; + (void)files; + return true; + })); + EXPECT_CALL(*mockMetaStore, SaveData(_, _)) + .WillOnce( + Invoke([](const std::string& dir, std::vector* files) { + (void)dir; + (void)files; + return false; + })); + + node->on_snapshot_save(&writer, &done); + done.WaitRunned(); + + EXPECT_FALSE(done.status().ok()); + EXPECT_EQ(MetaStatusCode::SAVE_META_FAIL, done.status().error_code()); // TODO(wuhanqing): check metric } @@ -255,25 +293,28 @@ TEST_F(CopysetNodeRaftSnapshotTest, SnapshotSaveTest_Success) { MockSnapshotWriter writer; FakeSnapshotSaveClosure done; - EXPECT_CALL(writer, get_path()) - .WillRepeatedly(Return(dataPath_)); - EXPECT_CALL(writer, add_file(_)) - .Times(1); - EXPECT_CALL(*mockfs_, Open(_, _)) - .WillOnce(Return(0)); + EXPECT_CALL(writer, get_path()).WillRepeatedly(Return(dataPath_)); + EXPECT_CALL(writer, add_file(_)).Times(1); + EXPECT_CALL(*mockfs_, Open(_, _)).WillOnce(Return(0)); EXPECT_CALL(*mockfs_, Write(_, Matcher(_), _, _)) .WillOnce(Invoke( [](int fd, const char*, uint64_t, int length) { return length; })); - EXPECT_CALL(*mockfs_, Fsync(_)) - .WillOnce(Return(0)); - EXPECT_CALL(*mockfs_, Close(_)) - .Times(1); - EXPECT_CALL(*mockMetaStore, Save(_, _)) - .WillOnce(Invoke([](std::string path, OnSnapshotSaveDoneClosure* done) { - done->SetSuccess(); - done->Run(); - return true; - })); + EXPECT_CALL(*mockfs_, Fsync(_)).WillOnce(Return(0)); + EXPECT_CALL(*mockfs_, Close(_)).Times(1); + EXPECT_CALL(*mockMetaStore, SaveMeta(_, _)) + .WillOnce( + Invoke([](const std::string& dir, std::vector* files) { + (void)dir; + (void)files; + return true; + })); + EXPECT_CALL(*mockMetaStore, SaveData(_, _)) + .WillOnce( + Invoke([](const std::string& dir, std::vector* files) { + (void)dir; + (void)files; + return true; + })); node->on_snapshot_save(&writer, &done); done.WaitRunned(); @@ -291,12 +332,9 @@ TEST_F(CopysetNodeRaftSnapshotTest, SnapshotLoadTest_LoadConfFileFailed) { ASSERT_NE(nullptr, node); MockSnapshotReader reader; - EXPECT_CALL(reader, get_path()) - .WillRepeatedly(Return(dataPath_)); - EXPECT_CALL(*mockfs_, FileExists(_)) - .WillOnce(Return(true)); - EXPECT_CALL(*mockfs_, Open(_, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(reader, get_path()).WillRepeatedly(Return(dataPath_)); + EXPECT_CALL(*mockfs_, FileExists(_)).WillOnce(Return(true)); + EXPECT_CALL(*mockfs_, Open(_, _)).WillOnce(Return(-1)); ASSERT_FALSE(node->IsLoading()); EXPECT_NE(0, node->on_snapshot_load(&reader)); @@ -314,16 +352,11 @@ TEST_F(CopysetNodeRaftSnapshotTest, node->SetMetaStore(mockMetaStore_.release()); MockSnapshotReader reader; - EXPECT_CALL(reader, get_path()) - .WillRepeatedly(Return(dataPath_)); - EXPECT_CALL(*mockfs_, FileExists(_)) - .WillOnce(Return(false)); - EXPECT_CALL(*mockfs_, Open(_, _)) - .Times(0); - EXPECT_CALL(*mockMetaStore, Clear()) - .Times(1); - EXPECT_CALL(*mockMetaStore, Load(_)) - .WillOnce(Return(true)); + EXPECT_CALL(reader, get_path()).WillRepeatedly(Return(dataPath_)); + EXPECT_CALL(*mockfs_, FileExists(_)).WillOnce(Return(false)); + EXPECT_CALL(*mockfs_, Open(_, _)).Times(0); + EXPECT_CALL(*mockMetaStore, Clear()).Times(1); + EXPECT_CALL(*mockMetaStore, Load(_)).WillOnce(Return(true)); braft::SnapshotMeta meta; meta.set_last_included_index(100); @@ -367,18 +400,12 @@ TEST_F(CopysetNodeRaftSnapshotTest, SnapshotLoadTest_MetaStoreLoadFailed) { node->SetMetaStore(mockMetaStore_.release()); MockSnapshotReader reader; - EXPECT_CALL(reader, get_path()) - .WillRepeatedly(Return(dataPath_)); - EXPECT_CALL(*mockfs_, FileExists(_)) - .WillOnce(Return(false)); - EXPECT_CALL(*mockfs_, Open(_, _)) - .Times(0); - EXPECT_CALL(*mockMetaStore, Clear()) - .Times(1); - EXPECT_CALL(*mockMetaStore, Load(_)) - .WillOnce(Return(false)); - EXPECT_CALL(reader, load_meta(_)) - .Times(0); + EXPECT_CALL(reader, get_path()).WillRepeatedly(Return(dataPath_)); + EXPECT_CALL(*mockfs_, FileExists(_)).WillOnce(Return(false)); + EXPECT_CALL(*mockfs_, Open(_, _)).Times(0); + EXPECT_CALL(*mockMetaStore, Clear()).Times(1); + EXPECT_CALL(*mockMetaStore, Load(_)).WillOnce(Return(false)); + EXPECT_CALL(reader, load_meta(_)).Times(0); ASSERT_FALSE(node->IsLoading()); EXPECT_NE(0, node->on_snapshot_load(&reader)); @@ -387,7 +414,7 @@ TEST_F(CopysetNodeRaftSnapshotTest, SnapshotLoadTest_MetaStoreLoadFailed) { node->SetMetaStore(nullptr); } -void RunOnSnapshotLoad(CopysetNode* node, MockSnapshotReader *reader, +void RunOnSnapshotLoad(CopysetNode* node, MockSnapshotReader* reader, uint32_t sleepSec) { sleep(sleepSec); ASSERT_FALSE(node->IsLoading()); @@ -402,7 +429,7 @@ void RunGetPartitionInfoList(CopysetNode* node, uint32_t sleepSec, ASSERT_EQ(node->GetPartitionInfoList(&partitionInfoList), expectedValue); } -void RunGetBlockStatInfo(CopysetNode *node, uint32_t sleepSec, +void RunGetBlockStatInfo(CopysetNode* node, uint32_t sleepSec, bool expectedValue) { sleep(sleepSec); std::map blockStatInfoMap; @@ -421,21 +448,16 @@ TEST_F(CopysetNodeRaftSnapshotTest, SnapshotLoadTest_MetaStoreLoadSuccess1) { node->SetMetaStore(mockMetaStore_.release()); MockSnapshotReader reader; - EXPECT_CALL(reader, get_path()) - .WillRepeatedly(Return(dataPath_)); - EXPECT_CALL(*mockfs_, FileExists(_)) - .WillOnce(Return(false)); - EXPECT_CALL(*mockfs_, Open(_, _)) - .Times(0); - EXPECT_CALL(*mockMetaStore, Clear()) - .Times(1); + EXPECT_CALL(reader, get_path()).WillRepeatedly(Return(dataPath_)); + EXPECT_CALL(*mockfs_, FileExists(_)).WillOnce(Return(false)); + EXPECT_CALL(*mockfs_, Open(_, _)).Times(0); + EXPECT_CALL(*mockMetaStore, Clear()).Times(1); EXPECT_CALL(*mockMetaStore, Load(_)) - .WillOnce(Invoke([](const std::string& pathname){ + .WillOnce(Invoke([](const std::string& pathname) { sleep(3); return true; })); - EXPECT_CALL(reader, load_meta(_)) - .Times(1); + EXPECT_CALL(reader, load_meta(_)).Times(1); std::thread thread1(RunOnSnapshotLoad, node, &reader, 0); std::thread thread2(RunGetPartitionInfoList, node, 3, false); @@ -458,23 +480,17 @@ TEST_F(CopysetNodeRaftSnapshotTest, SnapshotLoadTest_MetaStoreLoadSuccess2) { node->SetMetaStore(mockMetaStore_.release()); MockSnapshotReader reader; - EXPECT_CALL(reader, get_path()) - .WillRepeatedly(Return(dataPath_)); - EXPECT_CALL(*mockfs_, FileExists(_)) - .WillOnce(Return(false)); - EXPECT_CALL(*mockfs_, Open(_, _)) - .Times(0); - EXPECT_CALL(*mockMetaStore, Clear()) - .Times(1); + EXPECT_CALL(reader, get_path()).WillRepeatedly(Return(dataPath_)); + EXPECT_CALL(*mockfs_, FileExists(_)).WillOnce(Return(false)); + EXPECT_CALL(*mockfs_, Open(_, _)).Times(0); + EXPECT_CALL(*mockMetaStore, Clear()).Times(1); EXPECT_CALL(*mockMetaStore, Load(_)) - .WillOnce(Invoke([](const std::string& pathname){ + .WillOnce(Invoke([](const std::string& pathname) { sleep(1); return true; })); - EXPECT_CALL(reader, load_meta(_)) - .Times(1); - EXPECT_CALL(*mockMetaStore, GetPartitionInfoList(_)) - .WillOnce(Return(true)); + EXPECT_CALL(reader, load_meta(_)).Times(1); + EXPECT_CALL(*mockMetaStore, GetPartitionInfoList(_)).WillOnce(Return(true)); EXPECT_CALL(*mockMetaStore, GetPartitionSnap(_)) .Times(2) .WillOnce(Return(false)) @@ -502,23 +518,17 @@ TEST_F(CopysetNodeRaftSnapshotTest, SnapshotLoadTest_MetaStoreLoadSuccess3) { node->SetMetaStore(mockMetaStore_.release()); MockSnapshotReader reader; - EXPECT_CALL(reader, get_path()) - .WillRepeatedly(Return(dataPath_)); - EXPECT_CALL(*mockfs_, FileExists(_)) - .WillOnce(Return(false)); - EXPECT_CALL(*mockfs_, Open(_, _)) - .Times(0); - EXPECT_CALL(*mockMetaStore, Clear()) - .Times(1); + EXPECT_CALL(reader, get_path()).WillRepeatedly(Return(dataPath_)); + EXPECT_CALL(*mockfs_, FileExists(_)).WillOnce(Return(false)); + EXPECT_CALL(*mockfs_, Open(_, _)).Times(0); + EXPECT_CALL(*mockMetaStore, Clear()).Times(1); EXPECT_CALL(*mockMetaStore, Load(_)) - .WillOnce(Invoke([](const std::string& pathname){ + .WillOnce(Invoke([](const std::string& pathname) { sleep(1); return true; })); - EXPECT_CALL(reader, load_meta(_)) - .Times(1); - EXPECT_CALL(*mockMetaStore, GetPartitionInfoList(_)) - .WillOnce(Return(true)); + EXPECT_CALL(reader, load_meta(_)).Times(1); + EXPECT_CALL(*mockMetaStore, GetPartitionInfoList(_)).WillOnce(Return(true)); std::thread thread1(RunOnSnapshotLoad, node, &reader, 2); std::thread thread2(RunGetPartitionInfoList, node, 1, true); diff --git a/curvefs/test/metaserver/copyset/meta_operator_test.cpp b/curvefs/test/metaserver/copyset/meta_operator_test.cpp index 010e69ef38..0dae55632b 100644 --- a/curvefs/test/metaserver/copyset/meta_operator_test.cpp +++ b/curvefs/test/metaserver/copyset/meta_operator_test.cpp @@ -45,7 +45,9 @@ const int kDummyServerPort = 32000; template -MetaStatusCode FakeOnApplyFunc(const RequestT* request, ResponseT* response) { +MetaStatusCode FakeOnApplyFunc(const RequestT* request, ResponseT* response, + int64_t logIndex) { + (void)logIndex; response->set_statuscode(code); return code; } @@ -63,9 +65,7 @@ class FakeClosure : public google::protobuf::Closure { cond_.wait(lk, [this]() { return runned_; }); } - bool Runned() const { - return runned_; - } + bool Runned() const { return runned_; } private: std::mutex mtx_; @@ -89,14 +89,16 @@ std::string Exec(const std::string& cmd) { using ::curve::common::TimeUtility; using ::testing::_; +using ::testing::AtLeast; using ::testing::DoAll; using ::testing::Invoke; using ::testing::Return; -using ::testing::AtLeast; using ::testing::SetArgPointee; class MetaOperatorTest : public testing::Test { protected: + void SetUp() override { logIndex_ = 0; } + static void SetUpTestCase() { ASSERT_EQ(0, brpc::StartDummyServerAt(kDummyServerPort)); } @@ -120,6 +122,7 @@ class MetaOperatorTest : public testing::Test { protected: MockCopysetNodeManager mockNodeManager_; + int64_t logIndex_; }; TEST_F(MetaOperatorTest, OperatorTypeTest) { @@ -162,21 +165,20 @@ TEST_F(MetaOperatorTest, OnApplyErrorTest) { mock::MockMetaStore* mockMetaStore = new mock::MockMetaStore(); node.SetMetaStore(mockMetaStore); - ON_CALL(*mockMetaStore, Clear()) - .WillByDefault(Return(true)); + ON_CALL(*mockMetaStore, Clear()).WillByDefault(Return(true)); brpc::Controller cntl; #define OPERATOR_ON_APPLY_TEST(TYPE) \ { \ - EXPECT_CALL(*mockMetaStore, TYPE(_, _)) \ + EXPECT_CALL(*mockMetaStore, TYPE(_, _, _)) \ .WillOnce(Invoke(FakeOnApplyFunc)); \ TYPE##Request request; \ TYPE##Response response; \ FakeClosure closure; \ auto op = absl::make_unique(&node, &cntl, &request, \ &response, nullptr); \ - op->OnApply(1, &closure, TimeUtility::GetTimeofDayUs()); \ + op->OnApply(logIndex_++, &closure, TimeUtility::GetTimeofDayUs()); \ closure.WaitRunned(); \ EXPECT_EQ(MetaStatusCode::UNKNOWN_ERROR, response.statuscode()); \ } @@ -201,19 +203,21 @@ TEST_F(MetaOperatorTest, OnApplyErrorTest) { // it's only for GetOrModifyS3ChunkInfo() { - EXPECT_CALL(*mockMetaStore, GetOrModifyS3ChunkInfo(_, _, _)) + EXPECT_CALL(*mockMetaStore, GetOrModifyS3ChunkInfo(_, _, _, _)) .WillOnce(Invoke([&](const GetOrModifyS3ChunkInfoRequest* request, GetOrModifyS3ChunkInfoResponse* response, - std::shared_ptr* iterator) { - response->set_statuscode(MetaStatusCode::UNKNOWN_ERROR); - return MetaStatusCode::UNKNOWN_ERROR; - })); + std::shared_ptr* iterator, + int64_t logIndex) { + (void)logIndex; + response->set_statuscode(MetaStatusCode::UNKNOWN_ERROR); + return MetaStatusCode::UNKNOWN_ERROR; + })); GetOrModifyS3ChunkInfoRequest request; GetOrModifyS3ChunkInfoResponse response; FakeClosure closure; auto op = absl::make_unique( &node, &cntl, &request, &response, nullptr); - op->OnApply(1, &closure, TimeUtility::GetTimeofDayUs()); + op->OnApply(logIndex_++, &closure, TimeUtility::GetTimeofDayUs()); closure.WaitRunned(); EXPECT_EQ(MetaStatusCode::UNKNOWN_ERROR, response.statuscode()); } @@ -289,18 +293,17 @@ TEST_F(MetaOperatorTest, OnApplyFromLogErrorTest) { mock::MockMetaStore* mockMetaStore = new mock::MockMetaStore(); node.SetMetaStore(mockMetaStore); - ON_CALL(*mockMetaStore, Clear()) - .WillByDefault(Return(true)); + ON_CALL(*mockMetaStore, Clear()).WillByDefault(Return(true)); brpc::Controller cntl; #define OPERATOR_ON_APPLY_FROM_LOG_TEST(TYPE) \ { \ - EXPECT_CALL(*mockMetaStore, TYPE(_, _)) \ + EXPECT_CALL(*mockMetaStore, TYPE(_, _, _)) \ .WillOnce(Return(MetaStatusCode::UNKNOWN_ERROR)); \ TYPE##Request request; \ auto op = absl::make_unique(&node, &request, false); \ - op->OnApplyFromLog(TimeUtility::GetTimeofDayUs()); \ + op->OnApplyFromLog(logIndex_++, TimeUtility::GetTimeofDayUs()); \ op.release(); \ } @@ -319,21 +322,21 @@ TEST_F(MetaOperatorTest, OnApplyFromLogErrorTest) { // its only for GetOrModifyS3ChunkInfo() { - EXPECT_CALL(*mockMetaStore, GetOrModifyS3ChunkInfo(_, _, _)) + EXPECT_CALL(*mockMetaStore, GetOrModifyS3ChunkInfo(_, _, _, _)) .WillOnce(Return(MetaStatusCode::UNKNOWN_ERROR)); GetOrModifyS3ChunkInfoRequest request; auto op = absl::make_unique( &node, &request, false); - op->OnApplyFromLog(TimeUtility::GetTimeofDayUs()); + op->OnApplyFromLog(logIndex_++, TimeUtility::GetTimeofDayUs()); op.release(); } #define OPERATOR_ON_APPLY_FROM_LOG_DO_NOTHING_TEST(TYPE) \ { \ - EXPECT_CALL(*mockMetaStore, TYPE(_, _)).Times(0); \ + EXPECT_CALL(*mockMetaStore, TYPE(_, _, _)).Times(0); \ TYPE##Request request; \ auto op = absl::make_unique(&node, &request, false); \ - op->OnApplyFromLog(TimeUtility::GetTimeofDayUs()); \ + op->OnApplyFromLog(logIndex_++, TimeUtility::GetTimeofDayUs()); \ op.release(); \ } @@ -371,26 +374,30 @@ TEST_F(MetaOperatorTest, OnApplyFromLogErrorTest) { "/vars | grep " "op_apply_from_log_pool_100_copyset_100_delete_inode_total_error", 1)); - EXPECT_TRUE(CheckMetric( - "curl -s 0.0.0.0:" + std::to_string(kDummyServerPort) + - "/vars | grep " - "op_apply_from_log_pool_100_copyset_100_create_root_inode_total_error", - 1)); - EXPECT_TRUE(CheckMetric( - "curl -s 0.0.0.0:" + std::to_string(kDummyServerPort) + - "/vars | grep " - "op_apply_from_log_pool_100_copyset_100_create_partition_total_error", - 1)); - EXPECT_TRUE(CheckMetric( - "curl -s 0.0.0.0:" + std::to_string(kDummyServerPort) + - "/vars | grep " - "op_apply_from_log_pool_100_copyset_100_delete_partition_total_error", - 1)); - EXPECT_TRUE(CheckMetric( - "curl -s 0.0.0.0:" + std::to_string(kDummyServerPort) + - "/vars | grep " - "op_apply_from_log_pool_100_copyset_100_prepare_rename_tx_total_error", - 1)); + EXPECT_TRUE( + CheckMetric("curl -s 0.0.0.0:" + std::to_string(kDummyServerPort) + + "/vars | grep " + "op_apply_from_log_pool_100_copyset_100_create_root_" + "inode_total_error", + 1)); + EXPECT_TRUE( + CheckMetric("curl -s 0.0.0.0:" + std::to_string(kDummyServerPort) + + "/vars | grep " + "op_apply_from_log_pool_100_copyset_100_create_" + "partition_total_error", + 1)); + EXPECT_TRUE( + CheckMetric("curl -s 0.0.0.0:" + std::to_string(kDummyServerPort) + + "/vars | grep " + "op_apply_from_log_pool_100_copyset_100_delete_" + "partition_total_error", + 1)); + EXPECT_TRUE( + CheckMetric("curl -s 0.0.0.0:" + std::to_string(kDummyServerPort) + + "/vars | grep " + "op_apply_from_log_pool_100_copyset_100_prepare_rename_" + "tx_total_error", + 1)); } TEST_F(MetaOperatorTest, PropostTest_IsNotLeader) { @@ -425,8 +432,7 @@ TEST_F(MetaOperatorTest, PropostTest_RequestCanBypassProcess) { options.localFileSystem = &localFs; options.storageOptions.type = "memory"; - EXPECT_CALL(localFs, Mkdir(_)) - .WillOnce(Return(0)); + EXPECT_CALL(localFs, Mkdir(_)).WillOnce(Return(0)); EXPECT_TRUE(node.Init(options)); auto* mockMetaStore = new mock::MockMetaStore(); @@ -434,15 +440,11 @@ TEST_F(MetaOperatorTest, PropostTest_RequestCanBypassProcess) { auto* mockRaftNode = new MockRaftNode(); node.SetRaftNode(mockRaftNode); - ON_CALL(*mockMetaStore, Clear()) - .WillByDefault(Return(true)); - EXPECT_CALL(*mockRaftNode, apply(_)) - .Times(0); - EXPECT_CALL(*mockRaftNode, shutdown(_)) - .Times(AtLeast(1)); - EXPECT_CALL(*mockRaftNode, join()) - .Times(AtLeast(1)); - EXPECT_CALL(*mockMetaStore, GetDentry(_, _)) + ON_CALL(*mockMetaStore, Clear()).WillByDefault(Return(true)); + EXPECT_CALL(*mockRaftNode, apply(_)).Times(0); + EXPECT_CALL(*mockRaftNode, shutdown(_)).Times(AtLeast(1)); + EXPECT_CALL(*mockRaftNode, join()).Times(AtLeast(1)); + EXPECT_CALL(*mockMetaStore, GetDentry(_, _, _)) .WillOnce(Return(MetaStatusCode::OK)); braft::LeaderLeaseStatus status; @@ -483,8 +485,7 @@ TEST_F(MetaOperatorTest, PropostTest_IsNotLeaseLeader) { options.localFileSystem = &localFs; options.storageOptions.type = "memory"; - EXPECT_CALL(localFs, Mkdir(_)) - .WillOnce(Return(0)); + EXPECT_CALL(localFs, Mkdir(_)).WillOnce(Return(0)); EXPECT_TRUE(node.Init(options)); auto* mockRaftNode = new MockRaftNode(); @@ -519,8 +520,7 @@ TEST_F(MetaOperatorTest, PropostTest_PropostTaskFailed) { options.localFileSystem = &localFs; options.storageOptions.type = "memory"; - EXPECT_CALL(localFs, Mkdir(_)) - .WillOnce(Return(0)); + EXPECT_CALL(localFs, Mkdir(_)).WillOnce(Return(0)); EXPECT_TRUE(node.Init(options)); auto* mockRaftNode = new MockRaftNode(); diff --git a/curvefs/test/metaserver/dentry_manager_test.cpp b/curvefs/test/metaserver/dentry_manager_test.cpp index 5abbb4939a..f113eca4dc 100644 --- a/curvefs/test/metaserver/dentry_manager_test.cpp +++ b/curvefs/test/metaserver/dentry_manager_test.cpp @@ -36,9 +36,9 @@ namespace curvefs { namespace metaserver { using ::curvefs::metaserver::storage::KVStorage; -using ::curvefs::metaserver::storage::StorageOptions; -using ::curvefs::metaserver::storage::RocksDBStorage; using ::curvefs::metaserver::storage::RandomStoragePath; +using ::curvefs::metaserver::storage::RocksDBStorage; +using ::curvefs::metaserver::storage::StorageOptions; namespace { auto localfs = curve::fs::Ext4FileSystemImpl::getInstance(); @@ -58,11 +58,16 @@ class DentryManagerTest : public ::testing::Test { options.localFileSystem = localfs.get(); kvStorage_ = std::make_shared(options); ASSERT_TRUE(kvStorage_->Open()); - dentryStorage_ = std::make_shared( - kvStorage_, nameGenerator_, 0); - txManager_ = std::make_shared(dentryStorage_); - dentryManager_ = std::make_shared( - dentryStorage_, txManager_); + dentryStorage_ = + std::make_shared(kvStorage_, nameGenerator_, 0); + common::PartitionInfo partitionInfo; + partitionInfo.set_partitionid(1); + txManager_ = std::make_shared(dentryStorage_, partitionInfo); + dentryManager_ = + std::make_shared(dentryStorage_, txManager_); + ASSERT_TRUE(dentryManager_->Init()); + ASSERT_TRUE(txManager_->Init()); + logIndex_ = 0; } void TearDown() override { @@ -84,12 +89,8 @@ class DentryManagerTest : public ::testing::Test { return result; } - Dentry GenDentry(uint32_t fsId, - uint64_t parentId, - const std::string& name, - uint64_t txId, - uint64_t inodeId, - bool deleteMarkFlag) { + Dentry GenDentry(uint32_t fsId, uint64_t parentId, const std::string& name, + uint64_t txId, uint64_t inodeId, bool deleteMarkFlag) { Dentry dentry; dentry.set_fsid(fsId); dentry.set_parentinodeid(parentId); @@ -108,17 +109,19 @@ class DentryManagerTest : public ::testing::Test { std::shared_ptr dentryStorage_; std::shared_ptr dentryManager_; std::shared_ptr txManager_; + int64_t logIndex_; }; TEST_F(DentryManagerTest, CreateDentry) { // CASE 1: CreateDentry: success auto dentry = GenDentry(1, 0, "A", 0, 1, false); - ASSERT_EQ(dentryManager_->CreateDentry(dentry), MetaStatusCode::OK); + ASSERT_EQ(dentryManager_->CreateDentry(dentry, logIndex_++), + MetaStatusCode::OK); ASSERT_EQ(dentryStorage_->Size(), 1); // CASE 2: CreateDentry: dentry exist auto dentry2 = GenDentry(1, 0, "A", 0, 2, false); - ASSERT_EQ(dentryManager_->CreateDentry(dentry2), + ASSERT_EQ(dentryManager_->CreateDentry(dentry2, logIndex_++), MetaStatusCode::DENTRY_EXIST); ASSERT_EQ(dentryStorage_->Size(), 1); } @@ -126,18 +129,22 @@ TEST_F(DentryManagerTest, CreateDentry) { TEST_F(DentryManagerTest, DeleteDentry) { // CASE 1: DeleteDentry: not found auto dentry = GenDentry(1, 0, "A", 0, 1, false); - ASSERT_EQ(dentryManager_->DeleteDentry(dentry), MetaStatusCode::NOT_FOUND); + ASSERT_EQ(dentryManager_->DeleteDentry(dentry, logIndex_++), + MetaStatusCode::NOT_FOUND); // CASE 2: DeleteDentry: sucess - ASSERT_EQ(dentryManager_->CreateDentry(dentry), MetaStatusCode::OK); + ASSERT_EQ(dentryManager_->CreateDentry(dentry, logIndex_++), + MetaStatusCode::OK); ASSERT_EQ(dentryStorage_->Size(), 1); - ASSERT_EQ(dentryManager_->DeleteDentry(dentry), MetaStatusCode::OK); + ASSERT_EQ(dentryManager_->DeleteDentry(dentry, logIndex_++), + MetaStatusCode::OK); ASSERT_EQ(dentryStorage_->Size(), 0); } TEST_F(DentryManagerTest, ClearDentry) { auto dentry = GenDentry(1, 0, "A", 0, 1, false); - ASSERT_EQ(dentryManager_->CreateDentry(dentry), MetaStatusCode::OK); + ASSERT_EQ(dentryManager_->CreateDentry(dentry, logIndex_++), + MetaStatusCode::OK); ASSERT_EQ(dentryStorage_->Size(), 1); dentryManager_->ClearDentry(); ASSERT_EQ(dentryStorage_->Size(), 0); @@ -149,7 +156,8 @@ TEST_F(DentryManagerTest, GetDentry) { ASSERT_EQ(dentryManager_->GetDentry(&dentry), MetaStatusCode::NOT_FOUND); // CASE 2: GetDentry: success - ASSERT_EQ(dentryManager_->CreateDentry(dentry), MetaStatusCode::OK); + ASSERT_EQ(dentryManager_->CreateDentry(dentry, logIndex_++), + MetaStatusCode::OK); ASSERT_EQ(dentryStorage_->Size(), 1); dentry = GenDentry(1, 0, "A", 0, 0, false); ASSERT_EQ(dentryManager_->GetDentry(&dentry), MetaStatusCode::OK); @@ -159,8 +167,10 @@ TEST_F(DentryManagerTest, GetDentry) { TEST_F(DentryManagerTest, ListDentry) { auto dentry1 = GenDentry(1, 0, "A", 0, 1, false); auto dentry2 = GenDentry(1, 0, "B", 0, 2, false); - ASSERT_EQ(dentryManager_->CreateDentry(dentry1), MetaStatusCode::OK); - ASSERT_EQ(dentryManager_->CreateDentry(dentry2), MetaStatusCode::OK); + ASSERT_EQ(dentryManager_->CreateDentry(dentry1, logIndex_++), + MetaStatusCode::OK); + ASSERT_EQ(dentryManager_->CreateDentry(dentry2, logIndex_++), + MetaStatusCode::OK); ASSERT_EQ(dentryStorage_->Size(), 2); std::vector dentrys; @@ -175,15 +185,15 @@ TEST_F(DentryManagerTest, ListDentry) { TEST_F(DentryManagerTest, HandleRenameTx) { // CASE 1: HandleRenameTx: param error auto dentrys = std::vector(); - auto rc = txManager_->HandleRenameTx(dentrys); + auto rc = txManager_->HandleRenameTx(dentrys, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::PARAM_ERROR); // CASE 2: HandleRenameTx success - dentrys = std::vector { + dentrys = std::vector{ // { fsId, parentId, name, txId, inodeId, deleteMarkFlag } GenDentry(1, 0, "A", 1, 1, false), }; - rc = txManager_->HandleRenameTx(dentrys); + rc = txManager_->HandleRenameTx(dentrys, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); ASSERT_EQ(dentryStorage_->Size(), 1); } diff --git a/curvefs/test/metaserver/dentry_storage_test.cpp b/curvefs/test/metaserver/dentry_storage_test.cpp index 9bb46d6ef5..4464f20c76 100644 --- a/curvefs/test/metaserver/dentry_storage_test.cpp +++ b/curvefs/test/metaserver/dentry_storage_test.cpp @@ -34,11 +34,10 @@ namespace curvefs { namespace metaserver { using ::curvefs::metaserver::storage::KVStorage; -using ::curvefs::metaserver::storage::StorageOptions; -using ::curvefs::metaserver::storage::RocksDBStorage; using ::curvefs::metaserver::storage::NameGenerator; using ::curvefs::metaserver::storage::RandomStoragePath; -using TX_OP_TYPE = DentryStorage::TX_OP_TYPE; +using ::curvefs::metaserver::storage::RocksDBStorage; +using ::curvefs::metaserver::storage::StorageOptions; namespace { auto localfs = curve::fs::Ext4FileSystemImpl::getInstance(); @@ -48,12 +47,14 @@ class DentryStorageTest : public ::testing::Test { protected: void SetUp() override { nameGenerator_ = std::make_shared(1); - dataDir_ = RandomStoragePath();; + dataDir_ = RandomStoragePath(); + StorageOptions options; options.dataDir = dataDir_; options.localFileSystem = localfs.get(); kvStorage_ = std::make_shared(options); ASSERT_TRUE(kvStorage_->Open()); + logIndex_ = 0; } void TearDown() override { @@ -76,12 +77,8 @@ class DentryStorageTest : public ::testing::Test { return result; } - Dentry GenDentry(uint32_t fsId, - uint64_t parentId, - const std::string& name, - uint64_t txId, - uint64_t inodeId, - bool deleteMarkFlag, + Dentry GenDentry(uint32_t fsId, uint64_t parentId, const std::string& name, + uint64_t txId, uint64_t inodeId, bool deleteMarkFlag, FsFileType type = FsFileType::TYPE_FILE) { Dentry dentry; dentry.set_fsid(fsId); @@ -96,10 +93,13 @@ class DentryStorageTest : public ::testing::Test { void InsertDentrys(DentryStorage* storage, const std::vector&& dentrys) { - for (const auto& dentry : dentrys) { - auto rc = storage->HandleTx(TX_OP_TYPE::PREPARE, dentry); - ASSERT_EQ(rc, MetaStatusCode::OK); - } + // NOTE: store real transaction is unnecessary + metaserver::TransactionRequest request; + request.set_type(metaserver::TransactionRequest::None); + request.set_rawpayload(""); + + auto rc = storage->PrepareTx(dentrys, request, logIndex_++); + ASSERT_EQ(rc, MetaStatusCode::OK); ASSERT_EQ(storage->Size(), dentrys.size()); } @@ -112,11 +112,12 @@ class DentryStorageTest : public ::testing::Test { std::string dataDir_; std::shared_ptr nameGenerator_; std::shared_ptr kvStorage_; + int64_t logIndex_; }; TEST_F(DentryStorageTest, Insert) { DentryStorage storage(kvStorage_, nameGenerator_, 0); - + ASSERT_TRUE(storage.Init()); Dentry dentry; dentry.set_fsid(1); dentry.set_parentinodeid(1); @@ -132,30 +133,42 @@ TEST_F(DentryStorageTest, Insert) { dentry2.set_txid(0); // CASE 1: insert success - ASSERT_EQ(storage.Insert(dentry), MetaStatusCode::OK); + ASSERT_EQ(storage.Insert(dentry, logIndex_++), MetaStatusCode::OK); // CASE 2: insert with dentry exist - ASSERT_EQ(storage.Insert(dentry2), MetaStatusCode::DENTRY_EXIST); + ASSERT_EQ(storage.Insert(dentry2, logIndex_++), + MetaStatusCode::DENTRY_EXIST); ASSERT_EQ(storage.Size(), 1); // CASE 3: insert dentry failed with higher txid dentry.set_txid(1); - ASSERT_EQ(storage.Insert(dentry), MetaStatusCode::IDEMPOTENCE_OK); + ASSERT_EQ(storage.Insert(dentry, logIndex_++), + MetaStatusCode::IDEMPOTENCE_OK); ASSERT_EQ(storage.Size(), 1); // CASE 4: direct insert success by handle tx - auto rc = storage.HandleTx(TX_OP_TYPE::PREPARE, dentry); + // NOTE: store real transaction is unnecessary + metaserver::TransactionRequest request; + request.set_type(metaserver::TransactionRequest::None); + request.set_rawpayload(""); + auto rc = storage.PrepareTx({dentry}, request, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); ASSERT_EQ(storage.Size(), 2); // CASE 5: insert idempotence - ASSERT_EQ(storage.Insert(dentry), MetaStatusCode::IDEMPOTENCE_OK); + ASSERT_EQ(storage.Insert(dentry, logIndex_++), + MetaStatusCode::IDEMPOTENCE_OK); ASSERT_EQ(storage.Size(), 1); } TEST_F(DentryStorageTest, Delete) { DentryStorage storage(kvStorage_, nameGenerator_, 0); + ASSERT_TRUE(storage.Init()); + // NOTE: store real transaction is unnecessary + metaserver::TransactionRequest request; + request.set_type(metaserver::TransactionRequest::None); + request.set_rawpayload(""); Dentry dentry; dentry.set_fsid(1); dentry.set_parentinodeid(1); @@ -164,63 +177,66 @@ TEST_F(DentryStorageTest, Delete) { dentry.set_txid(0); // CASE 1: dentry not found - ASSERT_EQ(storage.Delete(dentry), MetaStatusCode::NOT_FOUND); + ASSERT_EQ(storage.Delete(dentry, logIndex_++), MetaStatusCode::NOT_FOUND); ASSERT_EQ(storage.Size(), 0); // CASE 2: delete success - ASSERT_EQ(storage.Insert(dentry), MetaStatusCode::OK); + ASSERT_EQ(storage.Insert(dentry, logIndex_++), MetaStatusCode::OK); ASSERT_EQ(storage.Size(), 1); - ASSERT_EQ(storage.Delete(dentry), MetaStatusCode::OK); + ASSERT_EQ(storage.Delete(dentry, logIndex_++), MetaStatusCode::OK); ASSERT_EQ(storage.Size(), 0); // CASE 3: delete multi-dentrys with different txid - ASSERT_EQ(storage.Insert(dentry), MetaStatusCode::OK); + ASSERT_EQ(storage.Insert(dentry, logIndex_++), MetaStatusCode::OK); dentry.set_txid(1); - auto rc = storage.HandleTx(TX_OP_TYPE::PREPARE, dentry); + // NOTE: store real transaction is unnecessary + auto rc = storage.PrepareTx({dentry}, request, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); ASSERT_EQ(storage.Size(), 2); dentry.set_txid(2); - ASSERT_EQ(storage.Delete(dentry), MetaStatusCode::OK); + ASSERT_EQ(storage.Delete(dentry, logIndex_++), MetaStatusCode::OK); ASSERT_EQ(storage.Size(), 0); // CASE 4: delete by higher txid dentry.set_txid(2); - ASSERT_EQ(storage.Insert(dentry), MetaStatusCode::OK); + ASSERT_EQ(storage.Insert(dentry, logIndex_++), MetaStatusCode::OK); ASSERT_EQ(storage.Size(), 1); dentry.set_txid(1); - ASSERT_EQ(storage.Delete(dentry), MetaStatusCode::NOT_FOUND); + ASSERT_EQ(storage.Delete(dentry, logIndex_++), MetaStatusCode::NOT_FOUND); ASSERT_EQ(storage.Size(), 1); dentry.set_txid(2); - ASSERT_EQ(storage.Delete(dentry), MetaStatusCode::OK); + ASSERT_EQ(storage.Delete(dentry, logIndex_++), MetaStatusCode::OK); ASSERT_EQ(storage.Size(), 0); // CASE 5: dentry deleted with DELETE_MARK_FLAG flag dentry.set_flag(DentryFlag::DELETE_MARK_FLAG); - ASSERT_EQ(storage.Insert(dentry), MetaStatusCode::OK); + ASSERT_EQ(storage.Insert(dentry, logIndex_++), MetaStatusCode::OK); ASSERT_EQ(storage.Size(), 1); - ASSERT_EQ(storage.Delete(dentry), MetaStatusCode::NOT_FOUND); + ASSERT_EQ(storage.Delete(dentry, logIndex_++), MetaStatusCode::NOT_FOUND); ASSERT_EQ(storage.Size(), 0); // CASE 6: delete by last dentry with DELETE_MARK_FLAG flag dentry.set_txid(0); - ASSERT_EQ(storage.Insert(dentry), MetaStatusCode::OK); + ASSERT_EQ(storage.Insert(dentry, logIndex_++), MetaStatusCode::OK); dentry.set_txid(1); dentry.set_flag(DentryFlag::DELETE_MARK_FLAG); - rc = storage.HandleTx(TX_OP_TYPE::PREPARE, dentry); + // NOTE: store real transaction is unnecessary + rc = storage.PrepareTx({dentry}, request, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); ASSERT_EQ(storage.Size(), 2); - ASSERT_EQ(storage.Delete(dentry), MetaStatusCode::NOT_FOUND); + ASSERT_EQ(storage.Delete(dentry, logIndex_++), MetaStatusCode::NOT_FOUND); ASSERT_EQ(storage.Size(), 0); } TEST_F(DentryStorageTest, Get) { DentryStorage storage(kvStorage_, nameGenerator_, 0); + ASSERT_TRUE(storage.Init()); Dentry dentry; // CASE 1: dentry not found @@ -228,11 +244,12 @@ TEST_F(DentryStorageTest, Get) { ASSERT_EQ(storage.Get(&dentry), MetaStatusCode::NOT_FOUND); // CASE 2: get success - InsertDentrys(&storage, std::vector{ - // { fsId, parentId, name, txId, inodeId, deleteMarkFlag } - GenDentry(1, 0, "A", 0, 1, false), - GenDentry(1, 0, "B", 0, 2, false), - }); + InsertDentrys(&storage, + std::vector{ + // { fsId, parentId, name, txId, inodeId, deleteMarkFlag } + GenDentry(1, 0, "A", 0, 1, false), + GenDentry(1, 0, "B", 0, 2, false), + }); dentry = GenDentry(1, 0, "A", 0, 0, false); ASSERT_EQ(storage.Get(&dentry), MetaStatusCode::OK); @@ -246,11 +263,12 @@ TEST_F(DentryStorageTest, Get) { // CASE 3: get multi-dentrys with different txid storage.Clear(); - InsertDentrys(&storage, std::vector{ - // { fsId, parentId, name, txId, inodeId, deleteMarkFlag } - GenDentry(1, 0, "A", 0, 1, false), - GenDentry(1, 0, "A", 1, 2, false), - }); + InsertDentrys(&storage, + std::vector{ + // { fsId, parentId, name, txId, inodeId, deleteMarkFlag } + GenDentry(1, 0, "A", 0, 1, false), + GenDentry(1, 0, "A", 1, 2, false), + }); dentry = GenDentry(1, 0, "A", 1, 0, false); ASSERT_EQ(storage.Get(&dentry), MetaStatusCode::OK); @@ -259,11 +277,12 @@ TEST_F(DentryStorageTest, Get) { // CASE 4: get dentry with DELETE_MARK_FLAG flag storage.Clear(); - InsertDentrys(&storage, std::vector{ - // { fsId, parentId, name, txId, inodeId, deleteMarkFlag } - GenDentry(1, 0, "A", 0, 1, false), - GenDentry(1, 0, "A", 1, 1, true), - }); + InsertDentrys(&storage, + std::vector{ + // { fsId, parentId, name, txId, inodeId, deleteMarkFlag } + GenDentry(1, 0, "A", 0, 1, false), + GenDentry(1, 0, "A", 1, 1, true), + }); dentry = GenDentry(1, 0, "A", 1, 0, false); ASSERT_EQ(storage.Get(&dentry), MetaStatusCode::NOT_FOUND); @@ -273,29 +292,31 @@ TEST_F(DentryStorageTest, Get) { TEST_F(DentryStorageTest, List) { DentryStorage storage(kvStorage_, nameGenerator_, 0); + ASSERT_TRUE(storage.Init()); std::vector dentrys; Dentry dentry; // CASE 1: basic list - InsertDentrys(&storage, std::vector{ - // { fsId, parentId, name, txId, inodeId, deleteMarkFlag } - GenDentry(1, 0, "A1", 0, 1, false), - GenDentry(1, 0, "A2", 0, 2, false), - GenDentry(1, 0, "A3", 0, 3, false), - GenDentry(1, 0, "A4", 0, 4, false), - GenDentry(1, 0, "A5", 0, 5, false), - }); + InsertDentrys(&storage, + std::vector{ + // { fsId, parentId, name, txId, inodeId, deleteMarkFlag } + GenDentry(1, 0, "A1", 0, 1, false), + GenDentry(1, 0, "A2", 0, 2, false), + GenDentry(1, 0, "A3", 0, 3, false), + GenDentry(1, 0, "A4", 0, 4, false), + GenDentry(1, 0, "A5", 0, 5, false), + }); dentry = GenDentry(1, 0, "", 0, 0, false); ASSERT_EQ(storage.List(dentry, &dentrys, 0), MetaStatusCode::OK); ASSERT_EQ(dentrys.size(), 5); ASSERT_DENTRYS_EQ(dentrys, std::vector{ - GenDentry(1, 0, "A1", 0, 1, false), - GenDentry(1, 0, "A2", 0, 2, false), - GenDentry(1, 0, "A3", 0, 3, false), - GenDentry(1, 0, "A4", 0, 4, false), - GenDentry(1, 0, "A5", 0, 5, false), - }); + GenDentry(1, 0, "A1", 0, 1, false), + GenDentry(1, 0, "A2", 0, 2, false), + GenDentry(1, 0, "A3", 0, 3, false), + GenDentry(1, 0, "A4", 0, 4, false), + GenDentry(1, 0, "A5", 0, 5, false), + }); // CASE 2: list by specify name dentrys.clear(); @@ -303,27 +324,28 @@ TEST_F(DentryStorageTest, List) { ASSERT_EQ(storage.List(dentry, &dentrys, 0), MetaStatusCode::OK); ASSERT_EQ(dentrys.size(), 2); ASSERT_DENTRYS_EQ(dentrys, std::vector{ - GenDentry(1, 0, "A4", 0, 4, false), - GenDentry(1, 0, "A5", 0, 5, false), - }); + GenDentry(1, 0, "A4", 0, 4, false), + GenDentry(1, 0, "A5", 0, 5, false), + }); // CASE 3: list by lower txid storage.Clear(); - InsertDentrys(&storage, std::vector{ - // { fsId, parentId, name, txId, inodeId, deleteMarkFlag } - GenDentry(1, 0, "A1", 1, 1, false), - GenDentry(1, 0, "A2", 2, 2, false), - GenDentry(1, 0, "A3", 3, 3, false), - }); + InsertDentrys(&storage, + std::vector{ + // { fsId, parentId, name, txId, inodeId, deleteMarkFlag } + GenDentry(1, 0, "A1", 1, 1, false), + GenDentry(1, 0, "A2", 2, 2, false), + GenDentry(1, 0, "A3", 3, 3, false), + }); dentrys.clear(); dentry = GenDentry(1, 0, "", 2, 0, false); ASSERT_EQ(storage.List(dentry, &dentrys, 0), MetaStatusCode::OK); ASSERT_EQ(dentrys.size(), 2); ASSERT_DENTRYS_EQ(dentrys, std::vector{ - GenDentry(1, 0, "A1", 1, 1, false), - GenDentry(1, 0, "A2", 2, 2, false), - }); + GenDentry(1, 0, "A1", 1, 1, false), + GenDentry(1, 0, "A2", 2, 2, false), + }); // CASE 4: list by higher txid dentrys.clear(); @@ -331,86 +353,90 @@ TEST_F(DentryStorageTest, List) { ASSERT_EQ(storage.List(dentry, &dentrys, 0), MetaStatusCode::OK); ASSERT_EQ(dentrys.size(), 3); ASSERT_DENTRYS_EQ(dentrys, std::vector{ - GenDentry(1, 0, "A1", 1, 1, false), - GenDentry(1, 0, "A2", 2, 2, false), - GenDentry(1, 0, "A3", 3, 3, false), - }); + GenDentry(1, 0, "A1", 1, 1, false), + GenDentry(1, 0, "A2", 2, 2, false), + GenDentry(1, 0, "A3", 3, 3, false), + }); // CASE 5: list dentrys which has DELETE_MARK_FLAG flag storage.Clear(); - InsertDentrys(&storage, std::vector{ - // { fsId, parentId, name, txId, inodeId, deleteMarkFlag } - GenDentry(1, 0, "A1", 1, 1, false), - GenDentry(1, 0, "A2", 2, 2, true), - GenDentry(1, 0, "A3", 3, 3, false), - }); + InsertDentrys(&storage, + std::vector{ + // { fsId, parentId, name, txId, inodeId, deleteMarkFlag } + GenDentry(1, 0, "A1", 1, 1, false), + GenDentry(1, 0, "A2", 2, 2, true), + GenDentry(1, 0, "A3", 3, 3, false), + }); dentrys.clear(); dentry = GenDentry(1, 0, "", 3, 0, false); ASSERT_EQ(storage.List(dentry, &dentrys, 0), MetaStatusCode::OK); ASSERT_EQ(dentrys.size(), 2); ASSERT_DENTRYS_EQ(dentrys, std::vector{ - GenDentry(1, 0, "A1", 1, 1, false), - GenDentry(1, 0, "A3", 3, 3, false), - }); + GenDentry(1, 0, "A1", 1, 1, false), + GenDentry(1, 0, "A3", 3, 3, false), + }); // CASE 6: list same dentrys with different txid storage.Clear(); - InsertDentrys(&storage, std::vector{ - // { fsId, parentId, name, txId, inodeId, deleteMarkFlag } - GenDentry(1, 0, "A", 0, 1, false), - GenDentry(1, 0, "A", 1, 1, false), - GenDentry(1, 0, "A", 2, 1, false), - }); + InsertDentrys(&storage, + std::vector{ + // { fsId, parentId, name, txId, inodeId, deleteMarkFlag } + GenDentry(1, 0, "A", 0, 1, false), + GenDentry(1, 0, "A", 1, 1, false), + GenDentry(1, 0, "A", 2, 1, false), + }); dentrys.clear(); dentry = GenDentry(1, 0, "", 2, 0, false); ASSERT_EQ(storage.List(dentry, &dentrys, 0), MetaStatusCode::OK); ASSERT_EQ(dentrys.size(), 1); ASSERT_DENTRYS_EQ(dentrys, std::vector{ - GenDentry(1, 0, "A", 2, 1, false), - }); + GenDentry(1, 0, "A", 2, 1, false), + }); // CASE 7: list by dentry tree storage.Clear(); - InsertDentrys(&storage, std::vector{ - // { fsId, parentId, name, txId, inodeId, deleteMarkFlag } - GenDentry(1, 0, "A", 0, 1, false), - GenDentry(1, 0, "B", 0, 2, false), - GenDentry(1, 2, "C", 0, 3, false), - GenDentry(1, 2, "D", 0, 4, false), - GenDentry(1, 2, "E", 0, 5, false), - GenDentry(1, 4, "F", 0, 6, true), - GenDentry(1, 4, "G", 0, 7, false), - }); + InsertDentrys(&storage, + std::vector{ + // { fsId, parentId, name, txId, inodeId, deleteMarkFlag } + GenDentry(1, 0, "A", 0, 1, false), + GenDentry(1, 0, "B", 0, 2, false), + GenDentry(1, 2, "C", 0, 3, false), + GenDentry(1, 2, "D", 0, 4, false), + GenDentry(1, 2, "E", 0, 5, false), + GenDentry(1, 4, "F", 0, 6, true), + GenDentry(1, 4, "G", 0, 7, false), + }); dentrys.clear(); dentry = GenDentry(1, 2, "", 0, 0, false); ASSERT_EQ(storage.List(dentry, &dentrys, 0), MetaStatusCode::OK); ASSERT_EQ(dentrys.size(), 3); ASSERT_DENTRYS_EQ(dentrys, std::vector{ - GenDentry(1, 2, "C", 0, 3, false), - GenDentry(1, 2, "D", 0, 4, false), - GenDentry(1, 2, "E", 0, 5, false), - }); + GenDentry(1, 2, "C", 0, 3, false), + GenDentry(1, 2, "D", 0, 4, false), + GenDentry(1, 2, "E", 0, 5, false), + }); dentrys.clear(); dentry = GenDentry(1, 4, "", 0, 0, false); ASSERT_EQ(storage.List(dentry, &dentrys, 0), MetaStatusCode::OK); ASSERT_EQ(dentrys.size(), 1); ASSERT_DENTRYS_EQ(dentrys, std::vector{ - GenDentry(1, 4, "G", 0, 7, false), - }); + GenDentry(1, 4, "G", 0, 7, false), + }); // CASE 8: list empty directory storage.Clear(); - InsertDentrys(&storage, std::vector{ - // { fsId, parentId, name, txId, inodeId, deleteMarkFlag } - GenDentry(1, 0, "A", 0, 1, false), - GenDentry(1, 0, "B", 0, 2, false), - GenDentry(1, 2, "D", 0, 4, true), - GenDentry(1, 2, "E", 0, 5, true), - }); + InsertDentrys(&storage, + std::vector{ + // { fsId, parentId, name, txId, inodeId, deleteMarkFlag } + GenDentry(1, 0, "A", 0, 1, false), + GenDentry(1, 0, "B", 0, 2, false), + GenDentry(1, 2, "D", 0, 4, true), + GenDentry(1, 2, "E", 0, 5, true), + }); dentrys.clear(); dentry = GenDentry(1, 2, "", 0, 0, false); @@ -429,13 +455,15 @@ TEST_F(DentryStorageTest, List) { // CASE 9: list directory only storage.Clear(); - InsertDentrys(&storage, std::vector{ - // { fsId, parentId, name, txId, inodeId, deleteMarkFlag } - GenDentry(1, 0, "A", 0, 1, false, FsFileType::TYPE_DIRECTORY), - GenDentry(1, 0, "B", 0, 2, true, FsFileType::TYPE_DIRECTORY), - GenDentry(1, 0, "D", 0, 3, false), - GenDentry(1, 0, "E", 0, 4, false), - }); + InsertDentrys( + &storage, + std::vector{ + // { fsId, parentId, name, txId, inodeId, deleteMarkFlag } + GenDentry(1, 0, "A", 0, 1, false, FsFileType::TYPE_DIRECTORY), + GenDentry(1, 0, "B", 0, 2, true, FsFileType::TYPE_DIRECTORY), + GenDentry(1, 0, "D", 0, 3, false), + GenDentry(1, 0, "E", 0, 4, false), + }); dentrys.clear(); dentry = GenDentry(1, 0, "", 0, 0, false); @@ -444,12 +472,13 @@ TEST_F(DentryStorageTest, List) { // CASE 10: list directory only with limit storage.Clear(); - InsertDentrys(&storage, std::vector{ - // { fsId, parentId, name, txId, inodeId, deleteMarkFlag } - GenDentry(1, 0, "A", 0, 1, false), - GenDentry(1, 0, "B", 0, 2, false), - GenDentry(1, 0, "D", 0, 3, false), - }); + InsertDentrys(&storage, + std::vector{ + // { fsId, parentId, name, txId, inodeId, deleteMarkFlag } + GenDentry(1, 0, "A", 0, 1, false), + GenDentry(1, 0, "B", 0, 2, false), + GenDentry(1, 0, "D", 0, 3, false), + }); dentrys.clear(); dentry = GenDentry(1, 0, "", 0, 0, false); @@ -457,12 +486,14 @@ TEST_F(DentryStorageTest, List) { ASSERT_EQ(dentrys.size(), 1); storage.Clear(); - InsertDentrys(&storage, std::vector{ - // { fsId, parentId, name, txId, inodeId, deleteMarkFlag } - GenDentry(1, 0, "A", 0, 1, false, FsFileType::TYPE_DIRECTORY), - GenDentry(1, 0, "B", 0, 2, false), - GenDentry(1, 0, "D", 0, 3, false), - }); + InsertDentrys( + &storage, + std::vector{ + // { fsId, parentId, name, txId, inodeId, deleteMarkFlag } + GenDentry(1, 0, "A", 0, 1, false, FsFileType::TYPE_DIRECTORY), + GenDentry(1, 0, "B", 0, 2, false), + GenDentry(1, 0, "D", 0, 3, false), + }); dentrys.clear(); dentry = GenDentry(1, 0, "", 0, 0, false); @@ -472,28 +503,35 @@ TEST_F(DentryStorageTest, List) { TEST_F(DentryStorageTest, HandleTx) { DentryStorage storage(kvStorage_, nameGenerator_, 0); + ASSERT_TRUE(storage.Init()); std::vector dentrys; Dentry dentry; - + // NOTE: store real transaction is unnecessary + metaserver::TransactionRequest request; + request.set_type(metaserver::TransactionRequest::None); + request.set_rawpayload(""); // CASE 1: prepare success - InsertDentrys(&storage, std::vector{ - // { fsId, parentId, name, txId, inodeId, deleteMarkFlag } - GenDentry(1, 0, "A", 0, 1, false), - }); + InsertDentrys(&storage, + std::vector{ + // { fsId, parentId, name, txId, inodeId, deleteMarkFlag } + GenDentry(1, 0, "A", 0, 1, false), + }); dentry = GenDentry(1, 0, "A", 1, 2, false); - auto rc = storage.HandleTx(TX_OP_TYPE::PREPARE, dentry); + // NOTE: store real transaction is unnecessary + auto rc = storage.PrepareTx({dentry}, request, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); ASSERT_EQ(storage.Size(), 2); // CASE 2: prepare with dentry exist dentry = GenDentry(1, 0, "A", 1, 2, false); - rc = storage.HandleTx(TX_OP_TYPE::PREPARE, dentry); + /// NOTE: store real transaction is unnecessary + rc = storage.PrepareTx({dentry}, request, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); ASSERT_EQ(storage.Size(), 2); // CASE 3: commit success - rc = storage.HandleTx(TX_OP_TYPE::COMMIT, dentry); + rc = storage.CommitTx({dentry}, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); ASSERT_EQ(storage.Size(), 1); @@ -503,28 +541,30 @@ TEST_F(DentryStorageTest, HandleTx) { // CASE 3: commit dentry with DELETE_MARK_FLAG flag storage.Clear(); - InsertDentrys(&storage, std::vector{ - // { fsId, parentId, name, txId, inodeId, deleteMarkFlag } - GenDentry(1, 0, "A", 0, 1, false), - GenDentry(1, 0, "A", 1, 1, true), - }); + InsertDentrys(&storage, + std::vector{ + // { fsId, parentId, name, txId, inodeId, deleteMarkFlag } + GenDentry(1, 0, "A", 0, 1, false), + GenDentry(1, 0, "A", 1, 1, true), + }); dentry = GenDentry(1, 0, "A", 1, 0, false); - rc = storage.HandleTx(TX_OP_TYPE::COMMIT, dentry); + rc = storage.CommitTx({dentry}, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); ASSERT_EQ(storage.Size(), 0); // CASE 4: Rollback success storage.Clear(); - InsertDentrys(&storage, std::vector{ - // { fsId, parentId, name, txId, inodeId, deleteMarkFlag } - GenDentry(1, 0, "A", 0, 1, false), - GenDentry(1, 0, "A", 1, 2, false), - }); + InsertDentrys(&storage, + std::vector{ + // { fsId, parentId, name, txId, inodeId, deleteMarkFlag } + GenDentry(1, 0, "A", 0, 1, false), + GenDentry(1, 0, "A", 1, 2, false), + }); ASSERT_EQ(storage.Size(), 2); dentry = GenDentry(1, 0, "A", 1, 2, false); - rc = storage.HandleTx(TX_OP_TYPE::ROLLBACK, dentry); + rc = storage.RollbackTx({dentry}, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); ASSERT_EQ(storage.Size(), 1); diff --git a/curvefs/test/metaserver/inode_manager_test.cpp b/curvefs/test/metaserver/inode_manager_test.cpp index e1808c45bd..a407ccde1a 100644 --- a/curvefs/test/metaserver/inode_manager_test.cpp +++ b/curvefs/test/metaserver/inode_manager_test.cpp @@ -46,12 +46,12 @@ using ::testing::SaveArg; using ::testing::SetArgPointee; using ::testing::StrEq; +using ::curvefs::metaserver::storage::Key4S3ChunkInfoList; using ::curvefs::metaserver::storage::KVStorage; -using ::curvefs::metaserver::storage::StorageOptions; -using ::curvefs::metaserver::storage::RocksDBStorage; using ::curvefs::metaserver::storage::NameGenerator; -using ::curvefs::metaserver::storage::Key4S3ChunkInfoList; using ::curvefs::metaserver::storage::RandomStoragePath; +using ::curvefs::metaserver::storage::RocksDBStorage; +using ::curvefs::metaserver::storage::StorageOptions; namespace curvefs { namespace metaserver { @@ -71,12 +71,13 @@ class InodeManagerTest : public ::testing::Test { ASSERT_TRUE(kvStorage_->Open()); auto nameGenerator = std::make_shared(1); - auto inodeStorage = std::make_shared( - kvStorage_, nameGenerator, 0); + auto inodeStorage = + std::make_shared(kvStorage_, nameGenerator, 0); auto trash = std::make_shared(inodeStorage); filetype2InodeNum_ = std::make_shared(); manager = std::make_shared(inodeStorage, trash, filetype2InodeNum_.get()); + ASSERT_TRUE(manager->Init()); param_.fsId = 1; param_.length = 100; @@ -88,6 +89,7 @@ class InodeManagerTest : public ::testing::Test { param_.rdev = 0; conv_ = std::make_shared(); + logIndex_ = 0; } void TearDown() override { @@ -110,7 +112,7 @@ class InodeManagerTest : public ::testing::Test { return result; } - bool CompareInode(const Inode &first, const Inode &second) { + bool CompareInode(const Inode& first, const Inode& second) { return first.fsid() == second.fsid() && first.atime() == second.atime() && first.inodeid() == second.inodeid() && @@ -125,11 +127,9 @@ class InodeManagerTest : public ::testing::Test { bool EqualS3ChunkInfo(const S3ChunkInfo& lhs, const S3ChunkInfo& rhs) { return lhs.chunkid() == rhs.chunkid() && - lhs.compaction() == rhs.compaction() && - lhs.offset() == rhs.offset() && - lhs.len() == rhs.len() && - lhs.size() == rhs.size() && - lhs.zero() == rhs.zero(); + lhs.compaction() == rhs.compaction() && + lhs.offset() == rhs.offset() && lhs.len() == rhs.len() && + lhs.size() == rhs.size() && lhs.zero() == rhs.zero(); } bool EqualS3ChunkInfoList(const S3ChunkInfoList& lhs, @@ -187,6 +187,7 @@ class InodeManagerTest : public ::testing::Test { std::string dataDir_; std::shared_ptr kvStorage_; std::shared_ptr filetype2InodeNum_; + int64_t logIndex_; }; TEST_F(InodeManagerTest, test1) { @@ -194,28 +195,28 @@ TEST_F(InodeManagerTest, test1) { uint32_t fsId = 1; Inode inode1; - ASSERT_EQ(manager->CreateInode(2, param_, &inode1), + ASSERT_EQ(manager->CreateInode(2, param_, &inode1, logIndex_++), MetaStatusCode::OK); ASSERT_EQ(inode1.inodeid(), 2); Inode inode2; - ASSERT_EQ(manager->CreateInode(3, param_, &inode2), + ASSERT_EQ(manager->CreateInode(3, param_, &inode2, logIndex_++), MetaStatusCode::OK); ASSERT_EQ(inode2.inodeid(), 3); Inode inode3; param_.type = FsFileType::TYPE_SYM_LINK; - ASSERT_EQ(manager->CreateInode(4, param_, &inode3), + ASSERT_EQ(manager->CreateInode(4, param_, &inode3, logIndex_++), MetaStatusCode::SYM_LINK_EMPTY); param_.symlink = "SYMLINK"; - ASSERT_EQ(manager->CreateInode(4, param_, &inode3), + ASSERT_EQ(manager->CreateInode(4, param_, &inode3, logIndex_++), MetaStatusCode::OK); ASSERT_EQ(inode3.inodeid(), 4); Inode inode4; param_.type = FsFileType::TYPE_S3; - ASSERT_EQ(manager->CreateInode(5, param_, &inode4), + ASSERT_EQ(manager->CreateInode(5, param_, &inode4, logIndex_++), MetaStatusCode::OK); ASSERT_EQ(inode4.inodeid(), 5); ASSERT_EQ(inode4.type(), FsFileType::TYPE_S3); @@ -225,7 +226,7 @@ TEST_F(InodeManagerTest, test1) { struct timespec now; clock_gettime(CLOCK_REALTIME, &now); param_.timestamp = absl::make_optional(now); - ASSERT_EQ(manager->CreateInode(6, param_, &inode5), + ASSERT_EQ(manager->CreateInode(6, param_, &inode5, logIndex_++), MetaStatusCode::OK); // GET @@ -250,18 +251,20 @@ TEST_F(InodeManagerTest, test1) { ASSERT_TRUE(CompareInode(inode4, temp4)); // DELETE - ASSERT_EQ(manager->DeleteInode(fsId, inode1.inodeid()), MetaStatusCode::OK); - ASSERT_EQ(manager->DeleteInode(fsId, inode1.inodeid()), + ASSERT_EQ(manager->DeleteInode(fsId, inode1.inodeid(), logIndex_++), + MetaStatusCode::OK); + ASSERT_EQ(manager->DeleteInode(fsId, inode1.inodeid(), logIndex_++), MetaStatusCode::OK); ASSERT_EQ(manager->GetInode(fsId, inode1.inodeid(), &temp1), MetaStatusCode::NOT_FOUND); // UPDATE UpdateInodeRequest request = MakeUpdateInodeRequestFromInode(inode1); - ASSERT_EQ(manager->UpdateInode(request), MetaStatusCode::NOT_FOUND); + ASSERT_EQ(manager->UpdateInode(request, logIndex_++), + MetaStatusCode::NOT_FOUND); temp2.set_atime(100); UpdateInodeRequest request2 = MakeUpdateInodeRequestFromInode(temp2); - ASSERT_EQ(manager->UpdateInode(request2), MetaStatusCode::OK); + ASSERT_EQ(manager->UpdateInode(request2, logIndex_++), MetaStatusCode::OK); Inode temp5; ASSERT_EQ(manager->GetInode(fsId, inode2.inodeid(), &temp5), MetaStatusCode::OK); @@ -290,29 +293,27 @@ TEST_F(InodeManagerTest, GetOrModifyS3ChunkInfo) { std::shared_ptr iterator; MetaStatusCode rc = manager->GetOrModifyS3ChunkInfo( - fsId, inodeId, map2add, map2del, true, &iterator); + fsId, inodeId, map2add, map2del, true, &iterator, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); - CHECK_ITERATOR_S3CHUNKINFOLIST(iterator, - std::vector{ 1, 2, 3 }, - std::vector{ - GenS3ChunkInfoList(1, 1), - GenS3ChunkInfoList(2, 2), - GenS3ChunkInfoList(3, 3), - }); + CHECK_ITERATOR_S3CHUNKINFOLIST(iterator, std::vector{1, 2, 3}, + std::vector{ + GenS3ChunkInfoList(1, 1), + GenS3ChunkInfoList(2, 2), + GenS3ChunkInfoList(3, 3), + }); LOG(INFO) << "CASE 1.1: check idempotent for GetOrModifyS3ChunkInfo()"; - rc = manager->GetOrModifyS3ChunkInfo( - fsId, inodeId, map2add, map2del, true, &iterator); + rc = manager->GetOrModifyS3ChunkInfo(fsId, inodeId, map2add, map2del, + true, &iterator, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); - CHECK_ITERATOR_S3CHUNKINFOLIST(iterator, - std::vector{ 1, 2, 3 }, - std::vector{ - GenS3ChunkInfoList(1, 1), - GenS3ChunkInfoList(2, 2), - GenS3ChunkInfoList(3, 3), - }); + CHECK_ITERATOR_S3CHUNKINFOLIST(iterator, std::vector{1, 2, 3}, + std::vector{ + GenS3ChunkInfoList(1, 1), + GenS3ChunkInfoList(2, 2), + GenS3ChunkInfoList(3, 3), + }); } // CASE 2: GetOrModifyS3ChunkInfo() with delete @@ -327,12 +328,11 @@ TEST_F(InodeManagerTest, GetOrModifyS3ChunkInfo) { std::shared_ptr iterator; MetaStatusCode rc = manager->GetOrModifyS3ChunkInfo( - fsId, inodeId, map2add, map2del, true, &iterator); + fsId, inodeId, map2add, map2del, true, &iterator, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); - CHECK_ITERATOR_S3CHUNKINFOLIST(iterator, - std::vector{}, - std::vector{}); + CHECK_ITERATOR_S3CHUNKINFOLIST(iterator, std::vector{}, + std::vector{}); } // CASE 3: GetOrModifyS3ChunkInfo() with add and delete @@ -351,7 +351,7 @@ TEST_F(InodeManagerTest, GetOrModifyS3ChunkInfo) { std::shared_ptr iterator; MetaStatusCode rc = manager->GetOrModifyS3ChunkInfo( - fsId, inodeId, map2add, map2del, true, &iterator); + fsId, inodeId, map2add, map2del, true, &iterator, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); ASSERT_EQ(iterator->Status(), 0); @@ -369,21 +369,21 @@ TEST_F(InodeManagerTest, GetOrModifyS3ChunkInfo) { map2del[8] = GenS3ChunkInfoList(1, 100); map2del[9] = GenS3ChunkInfoList(1, 100); - rc = manager->GetOrModifyS3ChunkInfo( - fsId, inodeId, map2add, map2del, true, &iterator); + rc = manager->GetOrModifyS3ChunkInfo(fsId, inodeId, map2add, map2del, + true, &iterator, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); ASSERT_EQ(iterator->Status(), 0); CHECK_ITERATOR_S3CHUNKINFOLIST(iterator, - std::vector{ 0, 1, 2, 7, 8, 9 }, - std::vector{ - GenS3ChunkInfoList(100, 100), - GenS3ChunkInfoList(1, 100), - GenS3ChunkInfoList(1, 100), - GenS3ChunkInfoList(100, 100), - GenS3ChunkInfoList(100, 100), - GenS3ChunkInfoList(100, 100), - }); + std::vector{0, 1, 2, 7, 8, 9}, + std::vector{ + GenS3ChunkInfoList(100, 100), + GenS3ChunkInfoList(1, 100), + GenS3ChunkInfoList(1, 100), + GenS3ChunkInfoList(100, 100), + GenS3ChunkInfoList(100, 100), + GenS3ChunkInfoList(100, 100), + }); // step3: delete all s3chunkinfo map2add.clear(); @@ -394,21 +394,21 @@ TEST_F(InodeManagerTest, GetOrModifyS3ChunkInfo) { map2del[1] = GenS3ChunkInfoList(1, 100); map2del[2] = GenS3ChunkInfoList(1, 100); - rc = manager->GetOrModifyS3ChunkInfo( - fsId, inodeId, map2add, map2del, true, &iterator); + rc = manager->GetOrModifyS3ChunkInfo(fsId, inodeId, map2add, map2del, + true, &iterator, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); ASSERT_EQ(iterator->Status(), 0); CHECK_ITERATOR_S3CHUNKINFOLIST(iterator, - std::vector{ 0, 1, 2, 7, 8, 9 }, - std::vector{ - GenS3ChunkInfoList(100, 100), - GenS3ChunkInfoList(100, 100), - GenS3ChunkInfoList(100, 100), - GenS3ChunkInfoList(100, 100), - GenS3ChunkInfoList(100, 100), - GenS3ChunkInfoList(100, 100), - }); + std::vector{0, 1, 2, 7, 8, 9}, + std::vector{ + GenS3ChunkInfoList(100, 100), + GenS3ChunkInfoList(100, 100), + GenS3ChunkInfoList(100, 100), + GenS3ChunkInfoList(100, 100), + GenS3ChunkInfoList(100, 100), + GenS3ChunkInfoList(100, 100), + }); } } @@ -417,16 +417,15 @@ TEST_F(InodeManagerTest, UpdateInode) { uint64_t ino = 2; Inode inode; - ASSERT_EQ(MetaStatusCode::OK, manager->CreateInode(ino, param_, &inode)); + ASSERT_EQ(MetaStatusCode::OK, + manager->CreateInode(ino, param_, &inode, logIndex_++)); // test update ok UpdateInodeRequest request = MakeUpdateInodeRequestFromInode(inode); - ASSERT_EQ(MetaStatusCode::OK, - manager->UpdateInode(request)); + ASSERT_EQ(MetaStatusCode::OK, manager->UpdateInode(request, logIndex_++)); // test update fail - ASSERT_EQ(MetaStatusCode::OK, - manager->UpdateInode(request)); + ASSERT_EQ(MetaStatusCode::OK, manager->UpdateInode(request, logIndex_++)); } @@ -434,8 +433,8 @@ TEST_F(InodeManagerTest, testGetAttr) { // CREATE uint32_t fsId = 1; Inode inode1; - ASSERT_EQ(manager->CreateInode(2, param_, &inode1), - MetaStatusCode::OK); + ASSERT_EQ(manager->CreateInode(2, param_, &inode1, logIndex_++), + MetaStatusCode::OK); ASSERT_EQ(inode1.inodeid(), 2); InodeAttr attr; @@ -456,14 +455,14 @@ TEST_F(InodeManagerTest, testGetXAttr) { // CREATE uint32_t fsId = 1; Inode inode1; - ASSERT_EQ(manager->CreateInode(2, param_, &inode1), - MetaStatusCode::OK); + ASSERT_EQ(manager->CreateInode(2, param_, &inode1, logIndex_++), + MetaStatusCode::OK); ASSERT_EQ(inode1.inodeid(), 2); ASSERT_TRUE(inode1.xattr().empty()); Inode inode2; param_.type = FsFileType::TYPE_DIRECTORY; - ASSERT_EQ(manager->CreateInode(3, param_, &inode2), + ASSERT_EQ(manager->CreateInode(3, param_, &inode2, logIndex_++), MetaStatusCode::OK); ASSERT_FALSE(inode2.xattr().empty()); ASSERT_EQ(inode2.xattr().find(XATTRFILES)->second, "0"); @@ -489,7 +488,7 @@ TEST_F(InodeManagerTest, testGetXAttr) { inode2.mutable_xattr()->find(XATTRENTRIES)->second = "2"; inode2.mutable_xattr()->find(XATTRFBYTES)->second = "100"; UpdateInodeRequest request = MakeUpdateInodeRequestFromInode(inode2); - ASSERT_EQ(manager->UpdateInode(request), MetaStatusCode::OK); + ASSERT_EQ(manager->UpdateInode(request, logIndex_++), MetaStatusCode::OK); // GET XAttr xattr1; @@ -508,7 +507,7 @@ TEST_F(InodeManagerTest, testCreateManageInode) { ManageInodeType type = ManageInodeType::TYPE_RECYCLE; Inode inode; ASSERT_EQ(MetaStatusCode::OK, - manager->CreateManageInode(param_, type, &inode)); + manager->CreateManageInode(param_, type, &inode, logIndex_++)); ASSERT_EQ(inode.inodeid(), RECYCLEINODEID); ASSERT_EQ(inode.parent()[0], ROOTINODEID); diff --git a/curvefs/test/metaserver/inode_storage_test.cpp b/curvefs/test/metaserver/inode_storage_test.cpp index bae78b1fef..279831ce54 100644 --- a/curvefs/test/metaserver/inode_storage_test.cpp +++ b/curvefs/test/metaserver/inode_storage_test.cpp @@ -78,6 +78,7 @@ class InodeStorageTest : public ::testing::Test { kvStorage_ = std::make_shared(options); ASSERT_TRUE(kvStorage_->Open()); conv_ = std::make_shared(); + logIndex_ = 0; } void TearDown() override { @@ -86,7 +87,7 @@ class InodeStorageTest : public ::testing::Test { ASSERT_EQ(output.size(), 0); } - std::string execShell(const std::string &cmd) { + std::string execShell(const std::string& cmd) { std::array buffer; std::string result; std::unique_ptr pipe(popen(cmd.c_str(), "r"), @@ -100,7 +101,7 @@ class InodeStorageTest : public ::testing::Test { return result; } - bool CompareInode(const Inode &first, const Inode &second) { + bool CompareInode(const Inode& first, const Inode& second) { return first.fsid() == second.fsid() && first.atime() == second.atime() && first.inodeid() == second.inodeid(); @@ -129,7 +130,7 @@ class InodeStorageTest : public ::testing::Test { uint64_t lastChunkId) { S3ChunkInfoList list; for (uint64_t id = firstChunkId; id <= lastChunkId; id++) { - S3ChunkInfo *info = list.add_s3chunks(); + S3ChunkInfo* info = list.add_s3chunks(); info->set_chunkid(id); info->set_compaction(0); info->set_offset(0); @@ -140,15 +141,15 @@ class InodeStorageTest : public ::testing::Test { return list; } - bool EqualS3ChunkInfo(const S3ChunkInfo &lhs, const S3ChunkInfo &rhs) { + bool EqualS3ChunkInfo(const S3ChunkInfo& lhs, const S3ChunkInfo& rhs) { return lhs.chunkid() == rhs.chunkid() && lhs.compaction() == rhs.compaction() && lhs.offset() == rhs.offset() && lhs.len() == rhs.len() && lhs.size() == rhs.size() && lhs.zero() == rhs.zero(); } - bool EqualS3ChunkInfoList(const S3ChunkInfoList &lhs, - const S3ChunkInfoList &rhs) { + bool EqualS3ChunkInfoList(const S3ChunkInfoList& lhs, + const S3ChunkInfoList& rhs) { size_t size = lhs.s3chunks_size(); if (size != rhs.s3chunks_size()) { return false; @@ -162,7 +163,7 @@ class InodeStorageTest : public ::testing::Test { return true; } - void CHECK_INODE_S3CHUNKINFOLIST(InodeStorage *storage, uint32_t fsId, + void CHECK_INODE_S3CHUNKINFOLIST(InodeStorage* storage, uint32_t fsId, uint64_t inodeId, const std::vector chunkIndexs, const std::vector lists) { @@ -189,21 +190,23 @@ class InodeStorageTest : public ::testing::Test { std::shared_ptr nameGenerator_; std::shared_ptr kvStorage_; std::shared_ptr conv_; + int64_t logIndex_; }; TEST_F(InodeStorageTest, test1) { InodeStorage storage(kvStorage_, nameGenerator_, 0); + ASSERT_TRUE(storage.Init()); Inode inode1 = GenInode(1, 1); Inode inode2 = GenInode(2, 2); Inode inode3 = GenInode(3, 3); // insert - ASSERT_EQ(storage.Insert(inode1), MetaStatusCode::OK); - ASSERT_EQ(storage.Insert(inode2), MetaStatusCode::OK); - ASSERT_EQ(storage.Insert(inode3), MetaStatusCode::OK); - ASSERT_EQ(storage.Insert(inode1), MetaStatusCode::INODE_EXIST); - ASSERT_EQ(storage.Insert(inode2), MetaStatusCode::INODE_EXIST); - ASSERT_EQ(storage.Insert(inode3), MetaStatusCode::INODE_EXIST); + ASSERT_EQ(storage.Insert(inode1, logIndex_++), MetaStatusCode::OK); + ASSERT_EQ(storage.Insert(inode2, logIndex_++), MetaStatusCode::OK); + ASSERT_EQ(storage.Insert(inode3, logIndex_++), MetaStatusCode::OK); + ASSERT_EQ(storage.Insert(inode1, logIndex_++), MetaStatusCode::INODE_EXIST); + ASSERT_EQ(storage.Insert(inode2, logIndex_++), MetaStatusCode::INODE_EXIST); + ASSERT_EQ(storage.Insert(inode3, logIndex_++), MetaStatusCode::INODE_EXIST); ASSERT_EQ(storage.Size(), 3); // get @@ -216,16 +219,18 @@ TEST_F(InodeStorageTest, test1) { ASSERT_TRUE(CompareInode(inode3, temp)); // delete - ASSERT_EQ(storage.Delete(Key4Inode(inode1)), MetaStatusCode::OK); + ASSERT_EQ(storage.Delete(Key4Inode(inode1), logIndex_++), + MetaStatusCode::OK); ASSERT_EQ(storage.Size(), 2); ASSERT_EQ(storage.Get(Key4Inode(inode1), &temp), MetaStatusCode::NOT_FOUND); - ASSERT_EQ(storage.Delete(Key4Inode(inode1)), MetaStatusCode::OK); + ASSERT_EQ(storage.Delete(Key4Inode(inode1), logIndex_++), + MetaStatusCode::OK); // update Inode oldInode; ASSERT_EQ(storage.Get(Key4Inode(inode2), &oldInode), MetaStatusCode::OK); inode2.set_atime(400); - ASSERT_EQ(storage.Update(inode2), MetaStatusCode::OK); + ASSERT_EQ(storage.Update(inode2, logIndex_++), MetaStatusCode::OK); Inode newInode; ASSERT_EQ(storage.Get(Key4Inode(inode2), &newInode), MetaStatusCode::OK); ASSERT_FALSE(CompareInode(oldInode, newInode)); @@ -240,6 +245,7 @@ TEST_F(InodeStorageTest, test1) { TEST_F(InodeStorageTest, testGetAttrNotFound) { InodeStorage storage(kvStorage_, nameGenerator_, 0); + ASSERT_TRUE(storage.Init()); Inode inode; inode.set_fsid(1); inode.set_inodeid(1); @@ -256,7 +262,7 @@ TEST_F(InodeStorageTest, testGetAttrNotFound) { inode.set_nlink(2); inode.set_type(FsFileType::TYPE_DIRECTORY); - ASSERT_EQ(storage.Insert(inode), MetaStatusCode::OK); + ASSERT_EQ(storage.Insert(inode, logIndex_++), MetaStatusCode::OK); InodeAttr attr; ASSERT_EQ(storage.GetAttr(Key4Inode(1, 2), &attr), MetaStatusCode::NOT_FOUND); @@ -264,6 +270,7 @@ TEST_F(InodeStorageTest, testGetAttrNotFound) { TEST_F(InodeStorageTest, testGetAttr) { InodeStorage storage(kvStorage_, nameGenerator_, 0); + ASSERT_TRUE(storage.Init()); Inode inode; inode.set_fsid(1); inode.set_inodeid(1); @@ -280,7 +287,7 @@ TEST_F(InodeStorageTest, testGetAttr) { inode.set_nlink(2); inode.set_type(FsFileType::TYPE_DIRECTORY); - ASSERT_EQ(storage.Insert(inode), MetaStatusCode::OK); + ASSERT_EQ(storage.Insert(inode, logIndex_++), MetaStatusCode::OK); InodeAttr attr; ASSERT_EQ(storage.GetAttr(Key4Inode(1, 1), &attr), MetaStatusCode::OK); ASSERT_EQ(attr.inodeid(), 1); @@ -291,6 +298,7 @@ TEST_F(InodeStorageTest, testGetAttr) { TEST_F(InodeStorageTest, testGetXAttr) { InodeStorage storage(kvStorage_, nameGenerator_, 0); + ASSERT_TRUE(storage.Init()); Inode inode; inode.set_fsid(1); inode.set_inodeid(1); @@ -316,7 +324,7 @@ TEST_F(InodeStorageTest, testGetXAttr) { inode.mutable_xattr()->insert({XATTRRENTRIES, "200"}); inode.mutable_xattr()->insert({XATTRRFBYTES, "1000"}); - ASSERT_EQ(storage.Insert(inode), MetaStatusCode::OK); + ASSERT_EQ(storage.Insert(inode, logIndex_++), MetaStatusCode::OK); XAttr xattr; ASSERT_EQ(storage.GetXAttr(Key4Inode(1, 1), &xattr), MetaStatusCode::OK); ASSERT_FALSE(xattr.xattrinfos().empty()); @@ -336,12 +344,13 @@ TEST_F(InodeStorageTest, ModifyInodeS3ChunkInfoList) { uint32_t fsId = 1; uint64_t inodeId = 1; InodeStorage storage(kvStorage_, nameGenerator_, 0); + ASSERT_TRUE(storage.Init()); // CASE 1: get empty s3chunkinfo { LOG(INFO) << "CASE 1: get empty s3chukninfo"; Inode inode = GenInode(fsId, inodeId); - ASSERT_EQ(storage.Insert(inode), MetaStatusCode::OK); + ASSERT_EQ(storage.Insert(inode, logIndex_++), MetaStatusCode::OK); S3ChunkInfoList list2del; size_t size = 0; @@ -362,7 +371,8 @@ TEST_F(InodeStorageTest, ModifyInodeS3ChunkInfoList) { for (size_t i = 0; i < chunkIndexs.size(); i++) { MetaStatusCode rc = storage.ModifyInodeS3ChunkInfoList( - fsId, inodeId, chunkIndexs[i], &lists2add[i], nullptr); + fsId, inodeId, chunkIndexs[i], &lists2add[i], nullptr, + logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); } @@ -383,7 +393,8 @@ TEST_F(InodeStorageTest, ModifyInodeS3ChunkInfoList) { for (size_t i = 0; i < chunkIndexs.size(); i++) { MetaStatusCode rc = storage.ModifyInodeS3ChunkInfoList( - fsId, inodeId, chunkIndexs[i], &lists2add[i], nullptr); + fsId, inodeId, chunkIndexs[i], &lists2add[i], nullptr, + logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); } @@ -404,7 +415,8 @@ TEST_F(InodeStorageTest, ModifyInodeS3ChunkInfoList) { for (size_t i = 0; i < chunkIndexs.size(); i++) { MetaStatusCode rc = storage.ModifyInodeS3ChunkInfoList( - fsId, inodeId, chunkIndexs[i], &lists2add[i], nullptr); + fsId, inodeId, chunkIndexs[i], &lists2add[i], nullptr, + logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); } @@ -430,7 +442,8 @@ TEST_F(InodeStorageTest, ModifyInodeS3ChunkInfoList) { for (size_t i = 0; i < chunkIndexs.size(); i++) { MetaStatusCode rc = storage.ModifyInodeS3ChunkInfoList( - fsId, inodeId, chunkIndexs[i], &lists2add[i], nullptr); + fsId, inodeId, chunkIndexs[i], &lists2add[i], nullptr, + logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); } @@ -462,7 +475,8 @@ TEST_F(InodeStorageTest, ModifyInodeS3ChunkInfoList) { for (size_t i = 0; i < chunkIndexs.size(); i++) { MetaStatusCode rc = storage.ModifyInodeS3ChunkInfoList( - fsId, inodeId, chunkIndexs[i], &lists2add[i], nullptr); + fsId, inodeId, chunkIndexs[i], &lists2add[i], nullptr, + logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); } } @@ -484,8 +498,8 @@ TEST_F(InodeStorageTest, ModifyInodeS3ChunkInfoList) { for (size_t i = 0; i < chunkIndexs.size(); i++) { MetaStatusCode rc = storage.ModifyInodeS3ChunkInfoList( - fsId, inodeId, chunkIndexs[i], &lists2add[i], - &lists2del[i]); + fsId, inodeId, chunkIndexs[i], &lists2add[i], &lists2del[i], + logIndex_++); ASSERT_EQ(rc, MetaStatusCode::STORAGE_INTERNAL_ERROR); } } @@ -507,7 +521,8 @@ TEST_F(InodeStorageTest, ModifyInodeS3ChunkInfoList) { for (size_t i = 0; i < chunkIndexs.size(); i++) { MetaStatusCode rc = storage.ModifyInodeS3ChunkInfoList( - fsId, inodeId, chunkIndexs[i], &lists2add[i], nullptr); + fsId, inodeId, chunkIndexs[i], &lists2add[i], nullptr, + logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); } } @@ -529,8 +544,8 @@ TEST_F(InodeStorageTest, ModifyInodeS3ChunkInfoList) { for (size_t i = 0; i < chunkIndexs.size(); i++) { MetaStatusCode rc = storage.ModifyInodeS3ChunkInfoList( - fsId, inodeId, chunkIndexs[i], &lists2add[i], - &lists2del[i]); + fsId, inodeId, chunkIndexs[i], &lists2add[i], &lists2del[i], + logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); } } @@ -562,7 +577,8 @@ TEST_F(InodeStorageTest, ModifyInodeS3ChunkInfoList) { for (size_t i = 0; i < chunkIndexs.size(); i++) { MetaStatusCode rc = storage.ModifyInodeS3ChunkInfoList( - fsId, inodeId, chunkIndexs[i], &lists2add[i], nullptr); + fsId, inodeId, chunkIndexs[i], &lists2add[i], nullptr, + logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); } } @@ -578,7 +594,8 @@ TEST_F(InodeStorageTest, ModifyInodeS3ChunkInfoList) { for (size_t i = 0; i < chunkIndexs.size(); i++) { MetaStatusCode rc = storage.ModifyInodeS3ChunkInfoList( - fsId, inodeId, chunkIndexs[i], &lists2add[i], nullptr); + fsId, inodeId, chunkIndexs[i], &lists2add[i], nullptr, + logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); } } @@ -600,8 +617,8 @@ TEST_F(InodeStorageTest, ModifyInodeS3ChunkInfoList) { for (size_t i = 0; i < chunkIndexs.size(); i++) { MetaStatusCode rc = storage.ModifyInodeS3ChunkInfoList( - fsId, inodeId, chunkIndexs[i], &lists2add[i], - &lists2del[i]); + fsId, inodeId, chunkIndexs[i], &lists2add[i], &lists2del[i], + logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); } } @@ -623,11 +640,12 @@ TEST_F(InodeStorageTest, PaddingInodeS3ChunkInfo) { uint32_t fsId = 1; uint64_t inodeId = 1; InodeStorage storage(kvStorage_, nameGenerator_, 0); + ASSERT_TRUE(storage.Init()); S3ChunkInfoList list2del; // step1: insert inode Inode inode = GenInode(fsId, inodeId); - ASSERT_EQ(storage.Insert(inode), MetaStatusCode::OK); + ASSERT_EQ(storage.Insert(inode, logIndex_++), MetaStatusCode::OK); // step2: append s3chunkinfo std::vector chunkIndexs{1, 3, 2, 1, 2}; @@ -639,7 +657,7 @@ TEST_F(InodeStorageTest, PaddingInodeS3ChunkInfo) { for (size_t i = 0; i < chunkIndexs.size(); i++) { MetaStatusCode rc = storage.ModifyInodeS3ChunkInfoList( - fsId, inodeId, chunkIndexs[i], &lists2add[i], nullptr); + fsId, inodeId, chunkIndexs[i], &lists2add[i], nullptr, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); } ASSERT_EQ(inode.mutable_s3chunkinfomap()->size(), 0); @@ -689,16 +707,17 @@ TEST_F(InodeStorageTest, PaddingInodeS3ChunkInfo) { TEST_F(InodeStorageTest, GetAllS3ChunkInfoList) { InodeStorage storage(kvStorage_, nameGenerator_, 0); + ASSERT_TRUE(storage.Init()); uint64_t chunkIndex = 1; S3ChunkInfoList list2add = GenS3ChunkInfoList(1, 10); // step1: prepare inode and its s3chunkinfo auto prepareInode = [&](uint32_t fsId, uint64_t inodeId) { Inode inode = GenInode(fsId, inodeId); - ASSERT_EQ(storage.Insert(inode), MetaStatusCode::OK); + ASSERT_EQ(storage.Insert(inode, logIndex_++), MetaStatusCode::OK); MetaStatusCode rc = storage.ModifyInodeS3ChunkInfoList( - fsId, inodeId, chunkIndex, &list2add, nullptr); + fsId, inodeId, chunkIndex, &list2add, nullptr, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); }; @@ -731,7 +750,7 @@ TEST_F(InodeStorageTest, TestUpdateVolumeExtentSlice) { {MetaStatusCode::OK, Status::OK()}, {MetaStatusCode::STORAGE_INTERNAL_ERROR, Status::InternalError()}}; - for (const auto &test : cases) { + for (const auto& test : cases) { auto kvStorage = std::make_shared(); InodeStorage storage(kvStorage, nameGenerator_, 0); @@ -743,15 +762,31 @@ TEST_F(InodeStorageTest, TestUpdateVolumeExtentSlice) { const std::string expectTableName = nameGenerator_->GetVolumeExtentTableName(); const std::string expectKey = "4:1:1:" + std::to_string(slice.offset()); + const std::string appliedTableName = + nameGenerator_->GetAppliedIndexTableName(); + const std::string appliedKey = "inode"; + + if (test.second.ok()) { + EXPECT_CALL(*kvStorage, SSet(appliedTableName, appliedKey, _)) + .Times(1) + .WillOnce(Return(Status::OK())); + } EXPECT_CALL(*kvStorage, SSet(expectTableName, expectKey, _)) + .Times(1) .WillOnce(Return(test.second)); - ASSERT_EQ(test.first, - storage.UpdateVolumeExtentSlice(fsId, inodeId, slice)); + EXPECT_CALL(*kvStorage, BeginTransaction()).WillOnce(Return(kvStorage)); + if (test.second.ok()) { + EXPECT_CALL(*kvStorage, Commit).WillOnce(Return(Status::OK())); + } else { + EXPECT_CALL(*kvStorage, Rollback).WillOnce(Return(Status::OK())); + } + ASSERT_EQ(test.first, storage.UpdateVolumeExtentSlice( + fsId, inodeId, slice, logIndex_++)); } } -static void RandomSetExtent(VolumeExtent *ext) { +static void RandomSetExtent(VolumeExtent* ext) { std::random_device rd; ext->set_fsoffset(rd()); @@ -760,16 +795,17 @@ static void RandomSetExtent(VolumeExtent *ext) { ext->set_isused(rd() & 1); } -static bool PrepareGetAllVolumeExtentTest(InodeStorage *storage, uint32_t fsId, +static bool PrepareGetAllVolumeExtentTest(InodeStorage* storage, uint32_t fsId, uint64_t inodeId, - std::vector *out) { + std::vector* out, + int64_t logIndex) { VolumeExtentSlice slice1; slice1.set_offset(0); RandomSetExtent(slice1.add_extents()); RandomSetExtent(slice1.add_extents()); - auto st = storage->UpdateVolumeExtentSlice(fsId, inodeId, slice1); + auto st = storage->UpdateVolumeExtentSlice(fsId, inodeId, slice1, logIndex); if (st != MetaStatusCode::OK) { return false; } @@ -780,7 +816,7 @@ static bool PrepareGetAllVolumeExtentTest(InodeStorage *storage, uint32_t fsId, RandomSetExtent(slice2.add_extents()); RandomSetExtent(slice2.add_extents()); - st = storage->UpdateVolumeExtentSlice(fsId, inodeId, slice2); + st = storage->UpdateVolumeExtentSlice(fsId, inodeId, slice2, logIndex); if (st != MetaStatusCode::OK) { return false; } @@ -791,29 +827,29 @@ static bool PrepareGetAllVolumeExtentTest(InodeStorage *storage, uint32_t fsId, RandomSetExtent(slice3.add_extents()); RandomSetExtent(slice3.add_extents()); - st = storage->UpdateVolumeExtentSlice(fsId, inodeId * 10, slice3); + st = storage->UpdateVolumeExtentSlice(fsId, inodeId * 10, slice3, logIndex); out->push_back(slice1); out->push_back(slice2); return true; } -static bool operator==(const VolumeExtentSliceList &list, - const std::vector &slices) { +static bool operator==(const VolumeExtentSliceList& list, + const std::vector& slices) { std::vector clist(list.slices().begin(), list.slices().end()); auto copy = slices; std::sort(copy.begin(), copy.end(), - [](const VolumeExtentSlice &s1, const VolumeExtentSlice &s2) { + [](const VolumeExtentSlice& s1, const VolumeExtentSlice& s2) { return s1.offset() < s2.offset(); }); return true; } -static bool operator==(const VolumeExtentSlice &s1, - const VolumeExtentSlice &s2) { +static bool operator==(const VolumeExtentSlice& s1, + const VolumeExtentSlice& s2) { return google::protobuf::util::MessageDifferencer::Equals(s1, s2); } @@ -825,14 +861,14 @@ TEST_F(InodeStorageTest, TestGetAllVolumeExtent) { std::make_shared(opts); std::shared_ptr kvStore = kvStorage_; - for (auto &store : {memStore, kvStore}) { + for (auto& store : {memStore, kvStore}) { InodeStorage storage(store, nameGenerator_, 0); const uint32_t fsId = 1; const uint64_t inodeId = 2; std::vector slices; - ASSERT_TRUE( - PrepareGetAllVolumeExtentTest(&storage, fsId, inodeId, &slices)); + ASSERT_TRUE(PrepareGetAllVolumeExtentTest(&storage, fsId, inodeId, + &slices, logIndex_++)); VolumeExtentSliceList list; ASSERT_EQ(MetaStatusCode::OK, @@ -869,14 +905,15 @@ TEST_F(InodeStorageTest, TestGetVolumeExtentByOffset) { std::make_shared(opts); std::shared_ptr kvStore = kvStorage_; - for (auto &store : {kvStorage_, memStore}) { + for (auto& store : {kvStorage_, memStore}) { InodeStorage storage(store, nameGenerator_, 0); + ASSERT_TRUE(storage.Init()); const uint32_t fsId = 1; const uint64_t inodeId = 2; std::vector slices; - ASSERT_TRUE( - PrepareGetAllVolumeExtentTest(&storage, fsId, inodeId, &slices)) + ASSERT_TRUE(PrepareGetAllVolumeExtentTest(&storage, fsId, inodeId, + &slices, logIndex_++)) << ToString(store->Type()); VolumeExtentSlice slice; @@ -899,9 +936,9 @@ TEST_F(InodeStorageTest, TestGetVolumeExtentByOffset) { TEST_F(InodeStorageTest, Test_UpdateInodeWithDeallocate) { auto inode = GenInode(1, 1); InodeStorage storage(kvStorage_, nameGenerator_, 0); - ASSERT_EQ(storage.Insert(inode), MetaStatusCode::OK); + ASSERT_EQ(storage.Insert(inode, logIndex_++), MetaStatusCode::OK); - ASSERT_EQ(MetaStatusCode::OK, storage.Update(inode, true)); + ASSERT_EQ(MetaStatusCode::OK, storage.Update(inode, logIndex_++, true)); auto tableName = nameGenerator_->GetDeallocatableInodeTableName(); Key4Inode key(1, 1); @@ -916,6 +953,7 @@ TEST_F(InodeStorageTest, Test_UpdateDeallocatableBlockGroup) { uint32_t fsId = 1; auto tableName = nameGenerator_->GetDeallocatableBlockGroupTableName(); InodeStorage storage(kvStorage_, nameGenerator_, 0); + ASSERT_TRUE(storage.Init()); uint64_t increaseSize = 100 * 1024; uint64_t decreaseSize = 4 * 1024; Key4DeallocatableBlockGroup key(fsId, blockGroupOffset); @@ -934,8 +972,8 @@ TEST_F(InodeStorageTest, Test_UpdateDeallocatableBlockGroup) { inputVec.Add()->CopyFrom(blockGroup); // increase first time - ASSERT_EQ(MetaStatusCode::OK, - storage.UpdateDeallocatableBlockGroup(fsId, inputVec)); + ASSERT_EQ(MetaStatusCode::OK, storage.UpdateDeallocatableBlockGroup( + fsId, inputVec, logIndex_++)); DeallocatableBlockGroup out; auto st = kvStorage_->HGet(tableName, key.SerializeToString(), &out); ASSERT_TRUE(st.ok()); @@ -945,8 +983,8 @@ TEST_F(InodeStorageTest, Test_UpdateDeallocatableBlockGroup) { ASSERT_EQ(0, out.inodeidunderdeallocate_size()); // increase second time - ASSERT_EQ(MetaStatusCode::OK, - storage.UpdateDeallocatableBlockGroup(fsId, inputVec)); + ASSERT_EQ(MetaStatusCode::OK, storage.UpdateDeallocatableBlockGroup( + fsId, inputVec, logIndex_++)); st = kvStorage_->HGet(tableName, key.SerializeToString(), &out); ASSERT_TRUE(st.ok()); ASSERT_EQ(increaseSize * 2, out.deallocatablesize()); @@ -964,8 +1002,8 @@ TEST_F(InodeStorageTest, Test_UpdateDeallocatableBlockGroup) { mark->add_inodeidunderdeallocate(1); inputVec.Add()->CopyFrom(blockGroup); - ASSERT_EQ(MetaStatusCode::OK, - storage.UpdateDeallocatableBlockGroup(fsId, inputVec)); + ASSERT_EQ(MetaStatusCode::OK, storage.UpdateDeallocatableBlockGroup( + fsId, inputVec, logIndex_++)); DeallocatableBlockGroup out; auto st = kvStorage_->HGet(tableName, key.SerializeToString(), &out); ASSERT_TRUE(st.ok()); @@ -982,8 +1020,8 @@ TEST_F(InodeStorageTest, Test_UpdateDeallocatableBlockGroup) { auto decrease = blockGroup.mutable_decrease(); decrease->set_decreasedeallocatablesize(decreaseSize); inputVec.Add()->CopyFrom(blockGroup); - ASSERT_EQ(MetaStatusCode::OK, - storage.UpdateDeallocatableBlockGroup(fsId, inputVec)); + ASSERT_EQ(MetaStatusCode::OK, storage.UpdateDeallocatableBlockGroup( + fsId, inputVec, logIndex_++)); DeallocatableBlockGroup out; auto st = kvStorage_->HGet(tableName, key.SerializeToString(), &out); ASSERT_TRUE(st.ok()); diff --git a/curvefs/test/metaserver/metastore_test.cpp b/curvefs/test/metaserver/metastore_test.cpp index 78e357d5c7..f8910d046f 100644 --- a/curvefs/test/metaserver/metastore_test.cpp +++ b/curvefs/test/metaserver/metastore_test.cpp @@ -51,13 +51,13 @@ using ::testing::StrEq; namespace curvefs { namespace metaserver { -using ::curvefs::metaserver::storage::KVStorage; -using ::curvefs::metaserver::storage::StorageOptions; -using ::curvefs::metaserver::storage::RocksDBStorage; +using ::curvefs::client::rpcclient::MockMdsClient; +using ::curvefs::metaserver::copyset::CopysetNode; using ::curvefs::metaserver::storage::Key4S3ChunkInfoList; +using ::curvefs::metaserver::storage::KVStorage; using ::curvefs::metaserver::storage::RandomStoragePath; -using ::curvefs::metaserver::copyset::CopysetNode; -using ::curvefs::client::rpcclient::MockMdsClient; +using ::curvefs::metaserver::storage::RocksDBStorage; +using ::curvefs::metaserver::storage::StorageOptions; namespace { @@ -68,7 +68,7 @@ class MockSnapshotWriter : public braft::SnapshotWriter { MOCK_METHOD1(save_meta, int(const braft::SnapshotMeta&)); MOCK_METHOD1(add_file, int(const std::string&)); MOCK_METHOD2(add_file, - int(const std::string &, const google::protobuf::Message *)); + int(const std::string&, const google::protobuf::Message*)); MOCK_METHOD1(remove_file, int(const std::string&)); }; @@ -102,6 +102,7 @@ class MetastoreTest : public ::testing::Test { // init fsinfo mdsCli_ = std::make_shared(); FsInfoManager::GetInstance().SetMdsClient(mdsCli_); + logIndex_ = 0; } void TearDown() override { @@ -123,16 +124,12 @@ class MetastoreTest : public ::testing::Test { return result; } - bool CompareInode(const Inode &first, const Inode &second) { - uint64_t firstMtime = first.mtime() * 1000000000u - + first.mtime_ns(); - uint64_t secondMtime = second.mtime() * 1000000000u - + second.mtime_ns(); + bool CompareInode(const Inode& first, const Inode& second) { + uint64_t firstMtime = first.mtime() * 1000000000u + first.mtime_ns(); + uint64_t secondMtime = second.mtime() * 1000000000u + second.mtime_ns(); - uint64_t firstCtime = first.ctime() * 1000000000u - + first.ctime_ns(); - uint64_t secondCtime = second.ctime() * 1000000000u - + second.ctime_ns(); + uint64_t firstCtime = first.ctime() * 1000000000u + first.ctime_ns(); + uint64_t secondCtime = second.ctime() * 1000000000u + second.ctime_ns(); return first.fsid() == second.fsid() && first.atime() == second.atime() && @@ -141,20 +138,19 @@ class MetastoreTest : public ::testing::Test { first.length() == second.length() && first.uid() == second.uid() && first.gid() == second.gid() && first.mode() == second.mode() && first.type() == second.type() && - firstMtime >= secondMtime && - firstCtime >= secondCtime && + firstMtime >= secondMtime && firstCtime >= secondCtime && first.symlink() == second.symlink() && first.nlink() >= second.nlink(); } - void PrintDentry(const Dentry &dentry) { + void PrintDentry(const Dentry& dentry) { LOG(INFO) << "dentry: fsid = " << dentry.fsid() << ", inodeid = " << dentry.inodeid() << ", name = " << dentry.name() << ", parentinodeid = " << dentry.parentinodeid(); } - bool CompareDentry(const Dentry &first, const Dentry &second) { + bool CompareDentry(const Dentry& first, const Dentry& second) { bool ret = first.fsid() == second.fsid() && first.inodeid() == second.inodeid() && first.parentinodeid() == second.parentinodeid() && @@ -166,8 +162,8 @@ class MetastoreTest : public ::testing::Test { return ret; } - bool ComparePartition(const PartitionInfo &first, - const PartitionInfo &second) { + bool ComparePartition(const PartitionInfo& first, + const PartitionInfo& second) { bool ret = first.fsid() == second.fsid() && first.poolid() == second.poolid() && first.copysetid() == second.copysetid() && @@ -187,11 +183,9 @@ class MetastoreTest : public ::testing::Test { bool EqualS3ChunkInfo(const S3ChunkInfo& lhs, const S3ChunkInfo& rhs) { return lhs.chunkid() == rhs.chunkid() && - lhs.compaction() == rhs.compaction() && - lhs.offset() == rhs.offset() && - lhs.len() == rhs.len() && - lhs.size() == rhs.size() && - lhs.zero() == rhs.zero(); + lhs.compaction() == rhs.compaction() && + lhs.offset() == rhs.offset() && lhs.len() == rhs.len() && + lhs.size() == rhs.size() && lhs.zero() == rhs.zero(); } bool EqualS3ChunkInfoList(const S3ChunkInfoList& lhs, @@ -244,15 +238,15 @@ class MetastoreTest : public ::testing::Test { class OnSnapshotSaveDoneImpl : public OnSnapshotSaveDoneClosure { public: - void SetSuccess() { + void SetSuccess() override { ret_ = true; LOG(INFO) << "OnSnapshotSaveDone success"; } - void SetError(MetaStatusCode code) { + void SetError(MetaStatusCode code) override { ret_ = false; LOG(INFO) << "OnSnapshotSaveDone error"; } - void Run() { + void Run() override { LOG(INFO) << "OnSnapshotSaveDone Run"; std::unique_lock lk(mtx_); finished_ = true; @@ -285,6 +279,7 @@ class MetastoreTest : public ::testing::Test { std::shared_ptr copyset_; std::shared_ptr mdsCli_; StorageOptions options_; + int64_t logIndex_; }; TEST_F(MetastoreTest, partition) { @@ -301,13 +296,13 @@ TEST_F(MetastoreTest, partition) { partitionInfo.set_start(100); partitionInfo.set_end(1000); createPartitionRequest.mutable_partition()->CopyFrom(partitionInfo); - MetaStatusCode ret = metastore.CreatePartition(&createPartitionRequest, - &createPartitionResponse); + MetaStatusCode ret = metastore.CreatePartition( + &createPartitionRequest, &createPartitionResponse, logIndex_++); ASSERT_EQ(ret, MetaStatusCode::OK); ASSERT_EQ(createPartitionResponse.statuscode(), ret); ret = metastore.CreatePartition(&createPartitionRequest, - &createPartitionResponse); + &createPartitionResponse, logIndex_++); ASSERT_EQ(ret, MetaStatusCode::OK); ASSERT_EQ(createPartitionResponse.statuscode(), ret); @@ -317,18 +312,18 @@ TEST_F(MetastoreTest, partition) { deletePartitionRequest.set_copysetid(partitionInfo.copysetid()); deletePartitionRequest.set_partitionid(partitionInfo.partitionid()); ret = metastore.DeletePartition(&deletePartitionRequest, - &deletePartitionResponse); + &deletePartitionResponse, logIndex_++); ASSERT_EQ(ret, MetaStatusCode::OK); ASSERT_EQ(deletePartitionResponse.statuscode(), ret); ret = metastore.DeletePartition(&deletePartitionRequest, - &deletePartitionResponse); + &deletePartitionResponse, logIndex_++); ASSERT_EQ(ret, MetaStatusCode::PARTITION_NOT_FOUND); ASSERT_EQ(deletePartitionResponse.statuscode(), ret); // after delete, create partiton1 again ret = metastore.CreatePartition(&createPartitionRequest, - &createPartitionResponse); + &createPartitionResponse, logIndex_++); ASSERT_EQ(ret, MetaStatusCode::OK); ASSERT_EQ(createPartitionResponse.statuscode(), ret); @@ -337,7 +332,7 @@ TEST_F(MetastoreTest, partition) { partitionInfo2.set_partitionid(2); createPartitionRequest.mutable_partition()->CopyFrom(partitionInfo2); ret = metastore.CreatePartition(&createPartitionRequest, - &createPartitionResponse); + &createPartitionResponse, logIndex_++); ASSERT_EQ(ret, MetaStatusCode::OK); ASSERT_EQ(createPartitionResponse.statuscode(), ret); @@ -365,18 +360,18 @@ TEST_F(MetastoreTest, partition) { createRequest.set_mode(mode); createRequest.set_type(type); - ret = metastore.CreateInode(&createRequest, &createResponse); + ret = metastore.CreateInode(&createRequest, &createResponse, logIndex_++); ASSERT_EQ(createResponse.statuscode(), ret); ASSERT_EQ(createResponse.statuscode(), MetaStatusCode::PARTITION_NOT_FOUND); createRequest.set_partitionid(partitionId); - ret = metastore.CreateInode(&createRequest, &createResponse); + ret = metastore.CreateInode(&createRequest, &createResponse, logIndex_++); ASSERT_EQ(createResponse.statuscode(), ret); ASSERT_EQ(createResponse.statuscode(), MetaStatusCode::OK); // partition has mata, delete fail ret = metastore.DeletePartition(&deletePartitionRequest, - &deletePartitionResponse); + &deletePartitionResponse, logIndex_++); ASSERT_EQ(ret, MetaStatusCode::PARTITION_DELETING); ASSERT_EQ(deletePartitionResponse.statuscode(), ret); @@ -400,8 +395,8 @@ TEST_F(MetastoreTest, test_inode) { partitionInfo1.set_start(100); partitionInfo1.set_end(1000); createPartitionRequest.mutable_partition()->CopyFrom(partitionInfo1); - MetaStatusCode ret = metastore.CreatePartition(&createPartitionRequest, - &createPartitionResponse); + MetaStatusCode ret = metastore.CreatePartition( + &createPartitionRequest, &createPartitionResponse, logIndex_++); ASSERT_EQ(ret, MetaStatusCode::OK); ASSERT_EQ(createPartitionResponse.statuscode(), ret); @@ -435,12 +430,12 @@ TEST_F(MetastoreTest, test_inode) { createRequest.set_type(type); createRequest.set_allocated_create(tm); // CreateInde wrong partitionid - ret = metastore.CreateInode(&createRequest, &createResponse); + ret = metastore.CreateInode(&createRequest, &createResponse, logIndex_++); ASSERT_EQ(createResponse.statuscode(), ret); ASSERT_EQ(createResponse.statuscode(), MetaStatusCode::PARTITION_NOT_FOUND); createRequest.set_partitionid(partitionId); - ret = metastore.CreateInode(&createRequest, &createResponse); + ret = metastore.CreateInode(&createRequest, &createResponse, logIndex_++); ASSERT_EQ(createResponse.statuscode(), ret); ASSERT_EQ(createResponse.statuscode(), MetaStatusCode::OK); ASSERT_TRUE(createResponse.has_inode()); @@ -453,7 +448,7 @@ TEST_F(MetastoreTest, test_inode) { createRequest.set_type(FsFileType::TYPE_S3); CreateInodeResponse createResponse2; - ret = metastore.CreateInode(&createRequest, &createResponse2); + ret = metastore.CreateInode(&createRequest, &createResponse2, logIndex_++); ASSERT_EQ(createResponse2.statuscode(), ret); ASSERT_EQ(createResponse2.statuscode(), MetaStatusCode::OK); ASSERT_TRUE(createResponse2.has_inode()); @@ -473,17 +468,17 @@ TEST_F(MetastoreTest, test_inode) { // type symlink createRequest.set_type(FsFileType::TYPE_SYM_LINK); CreateInodeResponse createResponse3; - ret = metastore.CreateInode(&createRequest, &createResponse3); + ret = metastore.CreateInode(&createRequest, &createResponse3, logIndex_++); ASSERT_EQ(createResponse3.statuscode(), MetaStatusCode::SYM_LINK_EMPTY); createRequest.set_type(FsFileType::TYPE_SYM_LINK); createRequest.set_symlink(""); - ret = metastore.CreateInode(&createRequest, &createResponse3); + ret = metastore.CreateInode(&createRequest, &createResponse3, logIndex_++); ASSERT_EQ(createResponse3.statuscode(), MetaStatusCode::SYM_LINK_EMPTY); createRequest.set_type(FsFileType::TYPE_SYM_LINK); createRequest.set_symlink("symlink"); - ret = metastore.CreateInode(&createRequest, &createResponse3); + ret = metastore.CreateInode(&createRequest, &createResponse3, logIndex_++); ASSERT_EQ(createResponse3.statuscode(), ret); ASSERT_EQ(createResponse3.statuscode(), MetaStatusCode::OK); ASSERT_TRUE(createResponse3.has_inode()); @@ -505,12 +500,12 @@ TEST_F(MetastoreTest, test_inode) { getRequest.set_inodeid(createResponse.inode().inodeid()); // GetInode wrong partitionid - ret = metastore.GetInode(&getRequest, &getResponse); + ret = metastore.GetInode(&getRequest, &getResponse, logIndex_++); ASSERT_EQ(getResponse.statuscode(), MetaStatusCode::PARTITION_NOT_FOUND); ASSERT_EQ(getResponse.statuscode(), ret); getRequest.set_partitionid(partitionId); - ret = metastore.GetInode(&getRequest, &getResponse); + ret = metastore.GetInode(&getRequest, &getResponse, logIndex_++); ASSERT_EQ(getResponse.statuscode(), MetaStatusCode::OK); ASSERT_EQ(getResponse.statuscode(), ret); ASSERT_TRUE(getResponse.has_inode()); @@ -529,7 +524,7 @@ TEST_F(MetastoreTest, test_inode) { getRequest2.set_partitionid(partitionId); getRequest2.set_fsid(fsId); getRequest2.set_inodeid(createResponse.inode().inodeid() + 100); - ret = metastore.GetInode(&getRequest2, &getResponse2); + ret = metastore.GetInode(&getRequest2, &getResponse2, logIndex_++); ASSERT_EQ(getResponse2.statuscode(), MetaStatusCode::NOT_FOUND); ASSERT_EQ(getResponse2.statuscode(), ret); @@ -544,12 +539,12 @@ TEST_F(MetastoreTest, test_inode) { updateRequest.set_inodeid(createResponse.inode().inodeid()); // UpdateInode wrong partitionid - ret = metastore.UpdateInode(&updateRequest, &updateResponse); + ret = metastore.UpdateInode(&updateRequest, &updateResponse, logIndex_++); ASSERT_EQ(updateResponse.statuscode(), MetaStatusCode::PARTITION_NOT_FOUND); ASSERT_EQ(updateResponse.statuscode(), ret); updateRequest.set_partitionid(partitionId); - ret = metastore.UpdateInode(&updateRequest, &updateResponse); + ret = metastore.UpdateInode(&updateRequest, &updateResponse, logIndex_++); ASSERT_EQ(updateResponse.statuscode(), MetaStatusCode::OK); ASSERT_EQ(updateResponse.statuscode(), ret); @@ -560,7 +555,7 @@ TEST_F(MetastoreTest, test_inode) { getRequest3.set_partitionid(partitionId); getRequest3.set_fsid(fsId); getRequest3.set_inodeid(createResponse.inode().inodeid()); - ret = metastore.GetInode(&getRequest3, &getResponse3); + ret = metastore.GetInode(&getRequest3, &getResponse3, logIndex_++); ASSERT_EQ(getResponse3.statuscode(), MetaStatusCode::OK); ASSERT_EQ(getResponse3.statuscode(), ret); ASSERT_TRUE(CompareInode(createResponse.inode(), getResponse3.inode())); @@ -574,7 +569,7 @@ TEST_F(MetastoreTest, test_inode) { updateRequest2.set_inodeid(createResponse.inode().inodeid()); updateRequest2.set_length(length + 1); updateRequest2.set_nlink(100); - ret = metastore.UpdateInode(&updateRequest2, &updateResponse2); + ret = metastore.UpdateInode(&updateRequest2, &updateResponse2, logIndex_++); ASSERT_EQ(updateResponse2.statuscode(), MetaStatusCode::OK); ASSERT_EQ(updateResponse2.statuscode(), ret); @@ -585,7 +580,7 @@ TEST_F(MetastoreTest, test_inode) { getRequest4.set_partitionid(partitionId); getRequest4.set_fsid(fsId); getRequest4.set_inodeid(createResponse.inode().inodeid()); - ret = metastore.GetInode(&getRequest4, &getResponse4); + ret = metastore.GetInode(&getRequest4, &getResponse4, logIndex_++); ASSERT_EQ(getResponse4.statuscode(), MetaStatusCode::OK); ASSERT_EQ(getResponse4.statuscode(), ret); ASSERT_FALSE(CompareInode(createResponse.inode(), getResponse4.inode())); @@ -601,7 +596,7 @@ TEST_F(MetastoreTest, test_inode) { updateRequest3.set_inodeid(createResponse.inode().inodeid()); S3ChunkInfoList s3ChunkInfoList; updateRequest3.mutable_s3chunkinfomap()->insert({0, s3ChunkInfoList}); - ret = metastore.UpdateInode(&updateRequest3, &updateResponse3); + ret = metastore.UpdateInode(&updateRequest3, &updateResponse3, logIndex_++); ASSERT_EQ(updateResponse3.statuscode(), MetaStatusCode::OK); ASSERT_EQ(updateResponse3.statuscode(), ret); @@ -615,16 +610,16 @@ TEST_F(MetastoreTest, test_inode) { deleteRequest.set_inodeid(createResponse.inode().inodeid()); // DeleteInode wrong partitionid - ret = metastore.DeleteInode(&deleteRequest, &deleteResponse); + ret = metastore.DeleteInode(&deleteRequest, &deleteResponse, logIndex_++); ASSERT_EQ(deleteResponse.statuscode(), MetaStatusCode::PARTITION_NOT_FOUND); ASSERT_EQ(deleteResponse.statuscode(), ret); deleteRequest.set_partitionid(partitionId); - ret = metastore.DeleteInode(&deleteRequest, &deleteResponse); + ret = metastore.DeleteInode(&deleteRequest, &deleteResponse, logIndex_++); ASSERT_EQ(deleteResponse.statuscode(), MetaStatusCode::OK); ASSERT_EQ(deleteResponse.statuscode(), ret); - ret = metastore.DeleteInode(&deleteRequest, &deleteResponse); + ret = metastore.DeleteInode(&deleteRequest, &deleteResponse, logIndex_++); ASSERT_EQ(deleteResponse.statuscode(), MetaStatusCode::OK); ASSERT_EQ(deleteResponse.statuscode(), ret); } @@ -644,8 +639,8 @@ TEST_F(MetastoreTest, test_dentry) { partitionInfo1.set_start(100); partitionInfo1.set_end(1000); createPartitionRequest.mutable_partition()->CopyFrom(partitionInfo1); - MetaStatusCode ret = metastore.CreatePartition(&createPartitionRequest, - &createPartitionResponse); + MetaStatusCode ret = metastore.CreatePartition( + &createPartitionRequest, &createPartitionResponse, logIndex_++); ASSERT_EQ(ret, MetaStatusCode::OK); ASSERT_EQ(createPartitionResponse.statuscode(), ret); @@ -653,7 +648,7 @@ TEST_F(MetastoreTest, test_dentry) { partitionInfo2.set_partitionid(2); createPartitionRequest.mutable_partition()->CopyFrom(partitionInfo2); ret = metastore.CreatePartition(&createPartitionRequest, - &createPartitionResponse); + &createPartitionResponse, logIndex_++); ASSERT_EQ(ret, MetaStatusCode::OK); ASSERT_EQ(createPartitionResponse.statuscode(), ret); @@ -681,7 +676,8 @@ TEST_F(MetastoreTest, test_dentry) { createInodeRequest.set_mode(mode); createInodeRequest.set_type(type); - ret = metastore.CreateInode(&createInodeRequest, &createInodeResponse); + ret = metastore.CreateInode(&createInodeRequest, &createInodeResponse, + logIndex_++); ASSERT_EQ(createInodeResponse.statuscode(), ret); ASSERT_EQ(createInodeResponse.statuscode(), MetaStatusCode::OK); @@ -708,17 +704,17 @@ TEST_F(MetastoreTest, test_dentry) { createRequest.mutable_dentry()->CopyFrom(dentry1); // CreateDentry wrong partitionid - ret = metastore.CreateDentry(&createRequest, &createResponse); + ret = metastore.CreateDentry(&createRequest, &createResponse, logIndex_++); ASSERT_EQ(createResponse.statuscode(), MetaStatusCode::PARTITION_NOT_FOUND); ASSERT_EQ(createResponse.statuscode(), ret); createRequest.set_partitionid(partitionId); - ret = metastore.CreateDentry(&createRequest, &createResponse); + ret = metastore.CreateDentry(&createRequest, &createResponse, logIndex_++); ASSERT_EQ(createResponse.statuscode(), MetaStatusCode::OK); ASSERT_EQ(createResponse.statuscode(), ret); - ret = metastore.CreateDentry(&createRequest, &createResponse); + ret = metastore.CreateDentry(&createRequest, &createResponse, logIndex_++); ASSERT_EQ(createResponse.statuscode(), MetaStatusCode::OK); ASSERT_EQ(createResponse.statuscode(), ret); @@ -731,7 +727,7 @@ TEST_F(MetastoreTest, test_dentry) { dentry2.set_type(FsFileType::TYPE_DIRECTORY); createRequest.mutable_dentry()->CopyFrom(dentry2); - ret = metastore.CreateDentry(&createRequest, &createResponse); + ret = metastore.CreateDentry(&createRequest, &createResponse, logIndex_++); ASSERT_EQ(createResponse.statuscode(), MetaStatusCode::OK); ASSERT_EQ(createResponse.statuscode(), ret); @@ -744,7 +740,7 @@ TEST_F(MetastoreTest, test_dentry) { dentry3.set_type(FsFileType::TYPE_DIRECTORY); createRequest.mutable_dentry()->CopyFrom(dentry3); - ret = metastore.CreateDentry(&createRequest, &createResponse); + ret = metastore.CreateDentry(&createRequest, &createResponse, logIndex_++); ASSERT_EQ(createResponse.statuscode(), MetaStatusCode::OK); ASSERT_EQ(createResponse.statuscode(), ret); @@ -759,12 +755,12 @@ TEST_F(MetastoreTest, test_dentry) { getRequest.set_name(name); // GetDentry wrong partitionid - ret = metastore.GetDentry(&getRequest, &getResponse); + ret = metastore.GetDentry(&getRequest, &getResponse, logIndex_++); ASSERT_EQ(getResponse.statuscode(), MetaStatusCode::PARTITION_NOT_FOUND); ASSERT_EQ(getResponse.statuscode(), ret); getRequest.set_partitionid(partitionId); - ret = metastore.GetDentry(&getRequest, &getResponse); + ret = metastore.GetDentry(&getRequest, &getResponse, logIndex_++); ASSERT_EQ(getResponse.statuscode(), MetaStatusCode::OK); ASSERT_EQ(getResponse.statuscode(), ret); ASSERT_TRUE(getResponse.has_dentry()); @@ -773,7 +769,7 @@ TEST_F(MetastoreTest, test_dentry) { getRequest.set_fsid(fsId + 1); getRequest.set_parentinodeid(parentId); getRequest.set_name(name); - ret = metastore.GetDentry(&getRequest, &getResponse); + ret = metastore.GetDentry(&getRequest, &getResponse, logIndex_++); ASSERT_EQ(getResponse.statuscode(), MetaStatusCode::PARTITION_ID_MISSMATCH); ASSERT_EQ(getResponse.statuscode(), ret); @@ -787,12 +783,12 @@ TEST_F(MetastoreTest, test_dentry) { listRequest.set_dirinodeid(parentId); // ListDentry wrong partitionid - ret = metastore.ListDentry(&listRequest, &listResponse); + ret = metastore.ListDentry(&listRequest, &listResponse, logIndex_++); ASSERT_EQ(listResponse.statuscode(), MetaStatusCode::PARTITION_NOT_FOUND); ASSERT_EQ(listResponse.statuscode(), ret); listRequest.set_partitionid(partitionId); - ret = metastore.ListDentry(&listRequest, &listResponse); + ret = metastore.ListDentry(&listRequest, &listResponse, logIndex_++); ASSERT_EQ(listResponse.statuscode(), MetaStatusCode::OK); ASSERT_EQ(listResponse.statuscode(), ret); ASSERT_EQ(listResponse.dentrys_size(), 3); @@ -807,7 +803,7 @@ TEST_F(MetastoreTest, test_dentry) { listRequest.set_count(100); listResponse.Clear(); - ret = metastore.ListDentry(&listRequest, &listResponse); + ret = metastore.ListDentry(&listRequest, &listResponse, logIndex_++); ASSERT_EQ(listResponse.statuscode(), MetaStatusCode::OK); ASSERT_EQ(listResponse.statuscode(), MetaStatusCode::OK); ASSERT_EQ(listResponse.dentrys_size(), 2); @@ -820,7 +816,7 @@ TEST_F(MetastoreTest, test_dentry) { listRequest.set_count(1); listResponse.Clear(); - ret = metastore.ListDentry(&listRequest, &listResponse); + ret = metastore.ListDentry(&listRequest, &listResponse, logIndex_++); ASSERT_EQ(listResponse.statuscode(), MetaStatusCode::OK); ASSERT_EQ(listResponse.statuscode(), ret); ASSERT_EQ(listResponse.dentrys_size(), 1); @@ -837,12 +833,12 @@ TEST_F(MetastoreTest, test_dentry) { deleteRequest.set_name("dentry2"); // DeleteDentry wrong partitionid - ret = metastore.DeleteDentry(&deleteRequest, &deleteResponse); + ret = metastore.DeleteDentry(&deleteRequest, &deleteResponse, logIndex_++); ASSERT_EQ(deleteResponse.statuscode(), MetaStatusCode::PARTITION_NOT_FOUND); ASSERT_EQ(deleteResponse.statuscode(), ret); deleteRequest.set_partitionid(partitionId); - ret = metastore.DeleteDentry(&deleteRequest, &deleteResponse); + ret = metastore.DeleteDentry(&deleteRequest, &deleteResponse, logIndex_++); ASSERT_EQ(deleteResponse.statuscode(), MetaStatusCode::OK); ASSERT_EQ(deleteResponse.statuscode(), ret); @@ -851,14 +847,14 @@ TEST_F(MetastoreTest, test_dentry) { listRequest.clear_last(); listRequest.clear_count(); listResponse.Clear(); - ret = metastore.ListDentry(&listRequest, &listResponse); + ret = metastore.ListDentry(&listRequest, &listResponse, logIndex_++); ASSERT_EQ(listResponse.statuscode(), MetaStatusCode::OK); ASSERT_EQ(listResponse.statuscode(), ret); ASSERT_EQ(listResponse.dentrys_size(), 2); ASSERT_TRUE(CompareDentry(listResponse.dentrys(0), dentry1)); ASSERT_TRUE(CompareDentry(listResponse.dentrys(1), dentry3)); - ret = metastore.DeleteDentry(&deleteRequest, &deleteResponse); + ret = metastore.DeleteDentry(&deleteRequest, &deleteResponse, logIndex_++); ASSERT_EQ(deleteResponse.statuscode(), MetaStatusCode::NOT_FOUND); } @@ -884,13 +880,13 @@ TEST_F(MetastoreTest, persist_success) { partitionInfo.set_txid(100); partitionInfo.set_status(PartitionStatus::READWRITE); createPartitionRequest.mutable_partition()->CopyFrom(partitionInfo); - MetaStatusCode ret = metastore.CreatePartition(&createPartitionRequest, - &createPartitionResponse); + MetaStatusCode ret = metastore.CreatePartition( + &createPartitionRequest, &createPartitionResponse, logIndex_++); ASSERT_EQ(ret, MetaStatusCode::OK); ASSERT_EQ(createPartitionResponse.statuscode(), ret); ret = metastore.CreatePartition(&createPartitionRequest, - &createPartitionResponse); + &createPartitionResponse, logIndex_++); ASSERT_EQ(ret, MetaStatusCode::OK); ASSERT_EQ(createPartitionResponse.statuscode(), ret); @@ -899,7 +895,7 @@ TEST_F(MetastoreTest, persist_success) { partitionInfo2.set_partitionid(partitionId2); createPartitionRequest.mutable_partition()->CopyFrom(partitionInfo2); ret = metastore.CreatePartition(&createPartitionRequest, - &createPartitionResponse); + &createPartitionResponse, logIndex_++); ASSERT_EQ(ret, MetaStatusCode::OK); ASSERT_EQ(createPartitionResponse.statuscode(), ret); @@ -927,13 +923,15 @@ TEST_F(MetastoreTest, persist_success) { createInodeRequest.set_mode(mode); createInodeRequest.set_type(type); - ret = metastore.CreateInode(&createInodeRequest, &createInodeResponse1); + ret = metastore.CreateInode(&createInodeRequest, &createInodeResponse1, + logIndex_++); ASSERT_EQ(createInodeResponse1.statuscode(), ret); ASSERT_EQ(createInodeResponse1.statuscode(), MetaStatusCode::OK); ASSERT_EQ(createInodeResponse1.inode().inodeid(), 100); createInodeRequest.set_partitionid(partitionId); - ret = metastore.CreateInode(&createInodeRequest, &createInodeResponse2); + ret = metastore.CreateInode(&createInodeRequest, &createInodeResponse2, + logIndex_++); ASSERT_EQ(createInodeResponse2.statuscode(), ret); ASSERT_EQ(createInodeResponse2.statuscode(), MetaStatusCode::OK); @@ -955,7 +953,8 @@ TEST_F(MetastoreTest, persist_success) { createDentryRequest.set_partitionid(partitionId); createDentryRequest.mutable_dentry()->CopyFrom(dentry1); - ret = metastore.CreateDentry(&createDentryRequest, &createDentryResponse1); + ret = metastore.CreateDentry(&createDentryRequest, &createDentryResponse1, + logIndex_++); ASSERT_EQ(createDentryResponse1.statuscode(), MetaStatusCode::OK); ASSERT_EQ(createDentryResponse1.statuscode(), ret); @@ -963,7 +962,8 @@ TEST_F(MetastoreTest, persist_success) { dentry2.set_inodeid(2); dentry2.set_name("dentry2"); createDentryRequest.mutable_dentry()->CopyFrom(dentry2); - ret = metastore.CreateDentry(&createDentryRequest, &createDentryResponse2); + ret = metastore.CreateDentry(&createDentryRequest, &createDentryResponse2, + logIndex_++); ASSERT_EQ(createDentryResponse2.statuscode(), MetaStatusCode::OK); ASSERT_EQ(createDentryResponse2.statuscode(), ret); @@ -976,7 +976,25 @@ TEST_F(MetastoreTest, persist_success) { // dump MetaStoreImpl to file OnSnapshotSaveDoneImpl done; LOG(INFO) << "MetastoreTest test Save"; - ASSERT_TRUE(metastore.Save(test_path_, &done)); + do { + brpc::ClosureGuard doneGuard(&done); + auto* writer = done.GetSnapshotWriter(); + std::vector files; + if (!metastore.SaveMeta(test_path_, &files)) { + LOG(ERROR) << "Save metadata failed"; + done.SetError(MetaStatusCode::SAVE_META_FAIL); + break; + } + if (!metastore.SaveData(test_path_, &files)) { + LOG(ERROR) << "Save data failed"; + done.SetError(MetaStatusCode::SAVE_META_FAIL); + break; + } + for (const auto& f : files) { + writer->add_file(f); + } + done.SetSuccess(); + } while (false); // wait meta save to file done.Wait(); @@ -1033,13 +1051,13 @@ TEST_F(MetastoreTest, persist_deleting_partition_success) { partitionInfo.set_txid(100); partitionInfo.set_status(PartitionStatus::READWRITE); createPartitionRequest.mutable_partition()->CopyFrom(partitionInfo); - MetaStatusCode ret = metastore.CreatePartition(&createPartitionRequest, - &createPartitionResponse); + MetaStatusCode ret = metastore.CreatePartition( + &createPartitionRequest, &createPartitionResponse, logIndex_++); ASSERT_EQ(ret, MetaStatusCode::OK); ASSERT_EQ(createPartitionResponse.statuscode(), ret); ret = metastore.CreatePartition(&createPartitionRequest, - &createPartitionResponse); + &createPartitionResponse, logIndex_++); ASSERT_EQ(ret, MetaStatusCode::OK); ASSERT_EQ(createPartitionResponse.statuscode(), ret); @@ -1048,7 +1066,7 @@ TEST_F(MetastoreTest, persist_deleting_partition_success) { partitionInfo2.set_partitionid(partitionId2); createPartitionRequest.mutable_partition()->CopyFrom(partitionInfo2); ret = metastore.CreatePartition(&createPartitionRequest, - &createPartitionResponse); + &createPartitionResponse, logIndex_++); ASSERT_EQ(ret, MetaStatusCode::OK); ASSERT_EQ(createPartitionResponse.statuscode(), ret); @@ -1076,13 +1094,15 @@ TEST_F(MetastoreTest, persist_deleting_partition_success) { createInodeRequest.set_mode(mode); createInodeRequest.set_type(type); - ret = metastore.CreateInode(&createInodeRequest, &createInodeResponse1); + ret = metastore.CreateInode(&createInodeRequest, &createInodeResponse1, + logIndex_++); ASSERT_EQ(createInodeResponse1.statuscode(), ret); ASSERT_EQ(createInodeResponse1.statuscode(), MetaStatusCode::OK); ASSERT_EQ(createInodeResponse1.inode().inodeid(), 100); createInodeRequest.set_partitionid(partitionId); - ret = metastore.CreateInode(&createInodeRequest, &createInodeResponse2); + ret = metastore.CreateInode(&createInodeRequest, &createInodeResponse2, + logIndex_++); ASSERT_EQ(createInodeResponse2.statuscode(), ret); ASSERT_EQ(createInodeResponse2.statuscode(), MetaStatusCode::OK); @@ -1104,7 +1124,8 @@ TEST_F(MetastoreTest, persist_deleting_partition_success) { createDentryRequest.set_partitionid(partitionId); createDentryRequest.mutable_dentry()->CopyFrom(dentry1); - ret = metastore.CreateDentry(&createDentryRequest, &createDentryResponse1); + ret = metastore.CreateDentry(&createDentryRequest, &createDentryResponse1, + logIndex_++); ASSERT_EQ(createDentryResponse1.statuscode(), MetaStatusCode::OK); ASSERT_EQ(createDentryResponse1.statuscode(), ret); @@ -1112,7 +1133,8 @@ TEST_F(MetastoreTest, persist_deleting_partition_success) { dentry2.set_inodeid(2); dentry2.set_name("dentry2"); createDentryRequest.mutable_dentry()->CopyFrom(dentry2); - ret = metastore.CreateDentry(&createDentryRequest, &createDentryResponse2); + ret = metastore.CreateDentry(&createDentryRequest, &createDentryResponse2, + logIndex_++); ASSERT_EQ(createDentryResponse2.statuscode(), MetaStatusCode::OK); ASSERT_EQ(createDentryResponse2.statuscode(), ret); @@ -1129,7 +1151,7 @@ TEST_F(MetastoreTest, persist_deleting_partition_success) { deletePartitionRequest.set_partitionid(partitionId); ret = metastore.DeletePartition(&deletePartitionRequest, - &deletePartitionResponse); + &deletePartitionResponse, logIndex_++); ASSERT_EQ(deletePartitionResponse.statuscode(), MetaStatusCode::PARTITION_DELETING); ASSERT_EQ(deletePartitionResponse.statuscode(), ret); @@ -1139,8 +1161,25 @@ TEST_F(MetastoreTest, persist_deleting_partition_success) { // dump MetaStoreImpl to file OnSnapshotSaveDoneImpl done; LOG(INFO) << "MetastoreTest test Save"; - ASSERT_TRUE(metastore.Save(test_path_, &done)); - + do { + brpc::ClosureGuard doneGuard(&done); + auto* writer = done.GetSnapshotWriter(); + std::vector files; + if (!metastore.SaveMeta(test_path_, &files)) { + LOG(ERROR) << "Save metadata failed"; + done.SetError(MetaStatusCode::SAVE_META_FAIL); + break; + } + if (!metastore.SaveData(test_path_, &files)) { + LOG(ERROR) << "Save data failed"; + done.SetError(MetaStatusCode::SAVE_META_FAIL); + break; + } + for (const auto& f : files) { + writer->add_file(f); + } + done.SetSuccess(); + } while (false); // wait meta save to file done.Wait(); ASSERT_TRUE(done.IsSuccess()); @@ -1185,15 +1224,33 @@ TEST_F(MetastoreTest, persist_partition_fail) { PartitionInfo partitionInfo; partitionInfo.set_partitionid(partitionId); createPartitionRequest.mutable_partition()->CopyFrom(partitionInfo); - MetaStatusCode ret = metastore.CreatePartition(&createPartitionRequest, - &createPartitionResponse); + MetaStatusCode ret = metastore.CreatePartition( + &createPartitionRequest, &createPartitionResponse, logIndex_++); ASSERT_EQ(ret, MetaStatusCode::OK); ASSERT_EQ(createPartitionResponse.statuscode(), ret); // dump MetaStoreImpl to file OnSnapshotSaveDoneImpl done; LOG(INFO) << "MetastoreTest test Save"; - ASSERT_FALSE(metastore.Save(test_path_, &done)); + do { + brpc::ClosureGuard doneGuard(&done); + auto* writer = done.GetSnapshotWriter(); + std::vector files; + if (!metastore.SaveMeta(test_path_, &files)) { + LOG(ERROR) << "Save metadata failed"; + done.SetError(MetaStatusCode::SAVE_META_FAIL); + break; + } + if (!metastore.SaveData(test_path_, &files)) { + LOG(ERROR) << "Save data failed"; + done.SetError(MetaStatusCode::SAVE_META_FAIL); + break; + } + for (const auto& f : files) { + writer->add_file(f); + } + done.SetSuccess(); + } while (false); // wait meta save to file done.Wait(); @@ -1218,8 +1275,8 @@ TEST_F(MetastoreTest, persist_dentry_fail) { partitionInfo.set_end(1000); partitionInfo.set_status(common::PartitionStatus::READWRITE); createPartitionRequest.mutable_partition()->CopyFrom(partitionInfo); - MetaStatusCode ret = metastore.CreatePartition(&createPartitionRequest, - &createPartitionResponse); + MetaStatusCode ret = metastore.CreatePartition( + &createPartitionRequest, &createPartitionResponse, logIndex_++); ASSERT_EQ(ret, MetaStatusCode::OK); ASSERT_EQ(createPartitionResponse.statuscode(), ret); @@ -1246,7 +1303,8 @@ TEST_F(MetastoreTest, persist_dentry_fail) { createInodeRequest.set_mode(mode); createInodeRequest.set_type(type); - ret = metastore.CreateInode(&createInodeRequest, &createInodeResponse); + ret = metastore.CreateInode(&createInodeRequest, &createInodeResponse, + logIndex_++); ASSERT_EQ(createInodeResponse.statuscode(), ret); ASSERT_EQ(createInodeResponse.statuscode(), MetaStatusCode::OK); uint64_t parentId = createInodeResponse.inode().inodeid(); @@ -1268,14 +1326,33 @@ TEST_F(MetastoreTest, persist_dentry_fail) { createDentryRequest.set_partitionid(partitionId); createDentryRequest.mutable_dentry()->CopyFrom(dentry1); - ret = metastore.CreateDentry(&createDentryRequest, &createDentryResponse1); + ret = metastore.CreateDentry(&createDentryRequest, &createDentryResponse1, + logIndex_++); ASSERT_EQ(createDentryResponse1.statuscode(), MetaStatusCode::OK); ASSERT_EQ(createDentryResponse1.statuscode(), ret); // dump MetaStoreImpl to file OnSnapshotSaveDoneImpl done; LOG(INFO) << "MetastoreTest test Save"; - ASSERT_FALSE(metastore.Save(test_path_, &done)); + do { + brpc::ClosureGuard doneGuard(&done); + auto* writer = done.GetSnapshotWriter(); + std::vector files; + if (!metastore.SaveMeta(test_path_, &files)) { + LOG(ERROR) << "Save metadata failed"; + done.SetError(MetaStatusCode::SAVE_META_FAIL); + break; + } + if (!metastore.SaveData(test_path_, &files)) { + LOG(ERROR) << "Save data failed"; + done.SetError(MetaStatusCode::SAVE_META_FAIL); + break; + } + for (const auto& f : files) { + writer->add_file(f); + } + done.SetSuccess(); + } while (false); // wait meta save to file done.Wait(); @@ -1301,8 +1378,8 @@ TEST_F(MetastoreTest, testBatchGetInodeAttr) { partitionInfo1.set_start(100); partitionInfo1.set_end(1000); createPartitionRequest.mutable_partition()->CopyFrom(partitionInfo1); - MetaStatusCode ret = metastore.CreatePartition(&createPartitionRequest, - &createPartitionResponse); + MetaStatusCode ret = metastore.CreatePartition( + &createPartitionRequest, &createPartitionResponse, logIndex_++); ASSERT_EQ(ret, MetaStatusCode::OK); ASSERT_EQ(createPartitionResponse.statuscode(), ret); @@ -1330,12 +1407,12 @@ TEST_F(MetastoreTest, testBatchGetInodeAttr) { createRequest.set_mode(mode); createRequest.set_type(type); - ret = metastore.CreateInode(&createRequest, &createResponse); + ret = metastore.CreateInode(&createRequest, &createResponse, logIndex_++); ASSERT_EQ(createResponse.statuscode(), MetaStatusCode::OK); uint64_t inodeId1 = createResponse.inode().inodeid(); createRequest.set_length(3); - ret = metastore.CreateInode(&createRequest, &createResponse); + ret = metastore.CreateInode(&createRequest, &createResponse, logIndex_++); ASSERT_EQ(createResponse.statuscode(), MetaStatusCode::OK); uint64_t inodeId2 = createResponse.inode().inodeid(); @@ -1348,7 +1425,8 @@ TEST_F(MetastoreTest, testBatchGetInodeAttr) { batchRequest.add_inodeid(inodeId1); batchRequest.add_inodeid(inodeId2); - ret = metastore.BatchGetInodeAttr(&batchRequest, &batchResponse); + ret = + metastore.BatchGetInodeAttr(&batchRequest, &batchResponse, logIndex_++); ASSERT_EQ(batchResponse.statuscode(), MetaStatusCode::OK); ASSERT_EQ(batchResponse.attr_size(), 2); if (batchResponse.attr(0).inodeid() == inodeId1) { @@ -1375,8 +1453,8 @@ TEST_F(MetastoreTest, testBatchGetXAttr) { partitionInfo1.set_start(100); partitionInfo1.set_end(1000); createPartitionRequest.mutable_partition()->CopyFrom(partitionInfo1); - MetaStatusCode ret = metastore.CreatePartition(&createPartitionRequest, - &createPartitionResponse); + MetaStatusCode ret = metastore.CreatePartition( + &createPartitionRequest, &createPartitionResponse, logIndex_++); ASSERT_EQ(ret, MetaStatusCode::OK); ASSERT_EQ(createPartitionResponse.statuscode(), ret); @@ -1404,11 +1482,11 @@ TEST_F(MetastoreTest, testBatchGetXAttr) { createRequest.set_mode(mode); createRequest.set_type(type); - ret = metastore.CreateInode(&createRequest, &createResponse); + ret = metastore.CreateInode(&createRequest, &createResponse, logIndex_++); ASSERT_EQ(createResponse.statuscode(), MetaStatusCode::OK); uint64_t inodeId1 = createResponse.inode().inodeid(); - ret = metastore.CreateInode(&createRequest, &createResponse); + ret = metastore.CreateInode(&createRequest, &createResponse, logIndex_++); ASSERT_EQ(createResponse.statuscode(), MetaStatusCode::OK); uint64_t inodeId2 = createResponse.inode().inodeid(); @@ -1424,7 +1502,7 @@ TEST_F(MetastoreTest, testBatchGetXAttr) { updateRequest.mutable_xattr()->insert({XATTRSUBDIRS, "2"}); updateRequest.mutable_xattr()->insert({XATTRENTRIES, "3"}); updateRequest.mutable_xattr()->insert({XATTRFBYTES, "100"}); - ret = metastore.UpdateInode(&updateRequest, &updateResponse); + ret = metastore.UpdateInode(&updateRequest, &updateResponse, logIndex_++); ASSERT_EQ(updateResponse.statuscode(), MetaStatusCode::OK); BatchGetXAttrRequest batchRequest; @@ -1436,44 +1514,52 @@ TEST_F(MetastoreTest, testBatchGetXAttr) { batchRequest.add_inodeid(inodeId1); batchRequest.add_inodeid(inodeId2); - ret = metastore.BatchGetXAttr(&batchRequest, &batchResponse); + ret = metastore.BatchGetXAttr(&batchRequest, &batchResponse, logIndex_++); ASSERT_EQ(batchResponse.statuscode(), MetaStatusCode::OK); ASSERT_EQ(batchResponse.xattr_size(), 2); if (batchResponse.xattr(0).inodeid() == inodeId1) { - ASSERT_EQ(batchResponse.xattr(0).xattrinfos() - .find(XATTRFILES)->second, "1"); - ASSERT_EQ(batchResponse.xattr(0).xattrinfos() - .find(XATTRSUBDIRS)->second, "2"); - ASSERT_EQ(batchResponse.xattr(0).xattrinfos() - .find(XATTRENTRIES)->second, "3"); - ASSERT_EQ(batchResponse.xattr(0).xattrinfos() - .find(XATTRFBYTES)->second, "100"); - ASSERT_EQ(batchResponse.xattr(1).xattrinfos() - .find(XATTRFILES)->second, "0"); - ASSERT_EQ(batchResponse.xattr(1).xattrinfos() - .find(XATTRSUBDIRS)->second, "0"); - ASSERT_EQ(batchResponse.xattr(1).xattrinfos() - .find(XATTRENTRIES)->second, "0"); - ASSERT_EQ(batchResponse.xattr(1).xattrinfos() - .find(XATTRFBYTES)->second, "0"); + ASSERT_EQ(batchResponse.xattr(0).xattrinfos().find(XATTRFILES)->second, + "1"); + ASSERT_EQ( + batchResponse.xattr(0).xattrinfos().find(XATTRSUBDIRS)->second, + "2"); + ASSERT_EQ( + batchResponse.xattr(0).xattrinfos().find(XATTRENTRIES)->second, + "3"); + ASSERT_EQ(batchResponse.xattr(0).xattrinfos().find(XATTRFBYTES)->second, + "100"); + ASSERT_EQ(batchResponse.xattr(1).xattrinfos().find(XATTRFILES)->second, + "0"); + ASSERT_EQ( + batchResponse.xattr(1).xattrinfos().find(XATTRSUBDIRS)->second, + "0"); + ASSERT_EQ( + batchResponse.xattr(1).xattrinfos().find(XATTRENTRIES)->second, + "0"); + ASSERT_EQ(batchResponse.xattr(1).xattrinfos().find(XATTRFBYTES)->second, + "0"); } else { - ASSERT_EQ(batchResponse.xattr(1).xattrinfos() - .find(XATTRFILES)->second, "1"); - ASSERT_EQ(batchResponse.xattr(1).xattrinfos() - .find(XATTRSUBDIRS)->second, "2"); - ASSERT_EQ(batchResponse.xattr(1).xattrinfos() - .find(XATTRENTRIES)->second, "3"); - ASSERT_EQ(batchResponse.xattr(1).xattrinfos() - .find(XATTRFBYTES)->second, "100"); - ASSERT_EQ(batchResponse.xattr(0).xattrinfos() - .find(XATTRFILES)->second, "0"); - ASSERT_EQ(batchResponse.xattr(0).xattrinfos() - .find(XATTRSUBDIRS)->second, "0"); - ASSERT_EQ(batchResponse.xattr(0).xattrinfos() - .find(XATTRENTRIES)->second, "0"); - ASSERT_EQ(batchResponse.xattr(0).xattrinfos() - .find(XATTRFBYTES)->second, "0"); + ASSERT_EQ(batchResponse.xattr(1).xattrinfos().find(XATTRFILES)->second, + "1"); + ASSERT_EQ( + batchResponse.xattr(1).xattrinfos().find(XATTRSUBDIRS)->second, + "2"); + ASSERT_EQ( + batchResponse.xattr(1).xattrinfos().find(XATTRENTRIES)->second, + "3"); + ASSERT_EQ(batchResponse.xattr(1).xattrinfos().find(XATTRFBYTES)->second, + "100"); + ASSERT_EQ(batchResponse.xattr(0).xattrinfos().find(XATTRFILES)->second, + "0"); + ASSERT_EQ( + batchResponse.xattr(0).xattrinfos().find(XATTRSUBDIRS)->second, + "0"); + ASSERT_EQ( + batchResponse.xattr(0).xattrinfos().find(XATTRENTRIES)->second, + "0"); + ASSERT_EQ(batchResponse.xattr(0).xattrinfos().find(XATTRFBYTES)->second, + "0"); } } @@ -1499,7 +1585,8 @@ TEST_F(MetastoreTest, GetOrModifyS3ChunkInfo) { partitionInfo.set_start(1); partitionInfo.set_end(100); request.mutable_partition()->CopyFrom(partitionInfo); - MetaStatusCode rc = metastore.CreatePartition(&request, &response); + MetaStatusCode rc = + metastore.CreatePartition(&request, &response, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); ASSERT_EQ(response.statuscode(), rc); } @@ -1512,7 +1599,7 @@ TEST_F(MetastoreTest, GetOrModifyS3ChunkInfo) { request.set_partitionid(100); MetaStatusCode rc = metastore.GetOrModifyS3ChunkInfo( - &request, &response, nullptr); + &request, &response, nullptr, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::PARTITION_NOT_FOUND); ASSERT_EQ(response.statuscode(), rc); } @@ -1522,7 +1609,7 @@ TEST_F(MetastoreTest, GetOrModifyS3ChunkInfo) { LOG(INFO) << "CASE 2: GetOrModifyS3ChunkInfo success"; GetOrModifyS3ChunkInfoRequest request; GetOrModifyS3ChunkInfoResponse response; - std::vector chunkIndexs{ 1, 2 }; + std::vector chunkIndexs{1, 2}; std::vector lists2add{ GenS3ChunkInfoList(100, 200), GenS3ChunkInfoList(300, 400), @@ -1535,12 +1622,12 @@ TEST_F(MetastoreTest, GetOrModifyS3ChunkInfo) { request.set_returns3chunkinfomap(true); for (size_t i = 0; i < chunkIndexs.size(); i++) { request.mutable_s3chunkinfoadd()->insert( - { chunkIndexs[i], lists2add[i] }); + {chunkIndexs[i], lists2add[i]}); } std::shared_ptr iterator; MetaStatusCode rc = metastore.GetOrModifyS3ChunkInfo( - &request, &response, &iterator); + &request, &response, &iterator, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); ASSERT_EQ(response.statuscode(), rc); ASSERT_EQ(response.mutable_s3chunkinfomap()->size(), 0); @@ -1554,7 +1641,7 @@ TEST_F(MetastoreTest, GetOrModifyS3ChunkInfo) { << " with unsupport streaming"; GetOrModifyS3ChunkInfoRequest request; GetOrModifyS3ChunkInfoResponse response; - std::vector chunkIndexs{ 1, 2 }; + std::vector chunkIndexs{1, 2}; std::vector lists2add{ GenS3ChunkInfoList(100, 200), GenS3ChunkInfoList(300, 400), @@ -1567,12 +1654,12 @@ TEST_F(MetastoreTest, GetOrModifyS3ChunkInfo) { request.set_returns3chunkinfomap(true); for (size_t i = 0; i < chunkIndexs.size(); i++) { request.mutable_s3chunkinfoadd()->insert( - { chunkIndexs[i], lists2add[i] }); + {chunkIndexs[i], lists2add[i]}); } std::shared_ptr iterator; MetaStatusCode rc = metastore.GetOrModifyS3ChunkInfo( - &request, &response, &iterator); + &request, &response, &iterator, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); ASSERT_EQ(response.statuscode(), rc); ASSERT_EQ(response.mutable_s3chunkinfomap()->size(), 2); @@ -1585,7 +1672,7 @@ TEST_F(MetastoreTest, GetOrModifyS3ChunkInfo) { << " with unsupport streaming"; GetOrModifyS3ChunkInfoRequest request; GetOrModifyS3ChunkInfoResponse response; - std::vector chunkIndexs{ 1, 2 }; + std::vector chunkIndexs{1, 2}; std::vector lists2add{ GenS3ChunkInfoList(100, 200), GenS3ChunkInfoList(300, 400), @@ -1598,12 +1685,12 @@ TEST_F(MetastoreTest, GetOrModifyS3ChunkInfo) { request.set_returns3chunkinfomap(false); for (size_t i = 0; i < chunkIndexs.size(); i++) { request.mutable_s3chunkinfoadd()->insert( - { chunkIndexs[i], lists2add[i] }); + {chunkIndexs[i], lists2add[i]}); } std::shared_ptr iterator; MetaStatusCode rc = metastore.GetOrModifyS3ChunkInfo( - &request, &response, &iterator); + &request, &response, &iterator, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); ASSERT_EQ(response.statuscode(), rc); ASSERT_EQ(response.mutable_s3chunkinfomap()->size(), 0); @@ -1633,7 +1720,8 @@ TEST_F(MetastoreTest, GetInodeWithPaddingS3Meta) { partitionInfo.set_start(1); partitionInfo.set_end(100); request.mutable_partition()->CopyFrom(partitionInfo); - MetaStatusCode rc = metastore.CreatePartition(&request, &response); + MetaStatusCode rc = + metastore.CreatePartition(&request, &response, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); ASSERT_EQ(response.statuscode(), rc); } @@ -1653,7 +1741,7 @@ TEST_F(MetastoreTest, GetInodeWithPaddingS3Meta) { request.set_mode(777); request.set_type(FsFileType::TYPE_FILE); - (void)metastore.CreateInode(&request, &response); + (void)metastore.CreateInode(&request, &response, logIndex_++); ASSERT_EQ(response.statuscode(), MetaStatusCode::OK); inodeId = response.inode().inodeid(); } @@ -1663,7 +1751,7 @@ TEST_F(MetastoreTest, GetInodeWithPaddingS3Meta) { GetOrModifyS3ChunkInfoRequest request; GetOrModifyS3ChunkInfoResponse response; - std::vector chunkIndexs{ 1, 2 }; + std::vector chunkIndexs{1, 2}; std::vector list2add{ GenS3ChunkInfoList(100, 149), GenS3ChunkInfoList(200, 249), @@ -1675,13 +1763,13 @@ TEST_F(MetastoreTest, GetInodeWithPaddingS3Meta) { request.set_returns3chunkinfomap(false); for (size_t i = 0; i < chunkIndexs.size(); i++) { request.mutable_s3chunkinfoadd()->insert( - { chunkIndexs[i], list2add[i] }); + {chunkIndexs[i], list2add[i]}); } // ModifyS3ChunkInfo() std::shared_ptr iterator; MetaStatusCode rc = metastore.GetOrModifyS3ChunkInfo( - &request, &response, &iterator); + &request, &response, &iterator, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); ASSERT_EQ(response.statuscode(), rc); } @@ -1698,7 +1786,7 @@ TEST_F(MetastoreTest, GetInodeWithPaddingS3Meta) { request.set_inodeid(inodeId); request.set_supportstreaming(true); - auto rc = metastore.GetInode(&request, &response); + auto rc = metastore.GetInode(&request, &response, logIndex_++); ASSERT_EQ(response.statuscode(), MetaStatusCode::OK); ASSERT_EQ(rc, MetaStatusCode::OK); auto inode = response.mutable_inode(); @@ -1711,7 +1799,7 @@ TEST_F(MetastoreTest, GetInodeWithPaddingS3Meta) { GetOrModifyS3ChunkInfoRequest request; GetOrModifyS3ChunkInfoResponse response; - std::vector chunkIndexs{ 3 }; + std::vector chunkIndexs{3}; std::vector list2add{ GenS3ChunkInfoList(1, 1), }; @@ -1722,13 +1810,13 @@ TEST_F(MetastoreTest, GetInodeWithPaddingS3Meta) { request.set_returns3chunkinfomap(false); for (size_t i = 0; i < chunkIndexs.size(); i++) { request.mutable_s3chunkinfoadd()->insert( - { chunkIndexs[i], list2add[i] }); + {chunkIndexs[i], list2add[i]}); } // ModifyS3ChunkInfo() std::shared_ptr iterator; MetaStatusCode rc = metastore.GetOrModifyS3ChunkInfo( - &request, &response, &iterator); + &request, &response, &iterator, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); ASSERT_EQ(response.statuscode(), rc); } @@ -1745,7 +1833,7 @@ TEST_F(MetastoreTest, GetInodeWithPaddingS3Meta) { request.set_inodeid(inodeId); request.set_supportstreaming(true); - auto rc = metastore.GetInode(&request, &response); + auto rc = metastore.GetInode(&request, &response, logIndex_++); ASSERT_EQ(response.statuscode(), MetaStatusCode::OK); ASSERT_EQ(rc, MetaStatusCode::OK); auto inode = response.mutable_inode(); @@ -1765,7 +1853,7 @@ TEST_F(MetastoreTest, GetInodeWithPaddingS3Meta) { request.set_inodeid(inodeId); request.set_supportstreaming(false); - auto rc = metastore.GetInode(&request, &response); + auto rc = metastore.GetInode(&request, &response, logIndex_++); ASSERT_EQ(response.statuscode(), MetaStatusCode::OK); ASSERT_EQ(rc, MetaStatusCode::OK); auto inode = response.mutable_inode(); @@ -1783,7 +1871,7 @@ TEST_F(MetastoreTest, TestUpdateVolumeExtent_PartitionNotFound) { request.set_partitionid(100); - auto st = metastore.UpdateVolumeExtent(&request, &response); + auto st = metastore.UpdateVolumeExtent(&request, &response, logIndex_++); ASSERT_EQ(st, MetaStatusCode::PARTITION_NOT_FOUND); ASSERT_EQ(MetaStatusCode::PARTITION_NOT_FOUND, response.statuscode()); } @@ -1791,7 +1879,7 @@ TEST_F(MetastoreTest, TestUpdateVolumeExtent_PartitionNotFound) { } // namespace metaserver } // namespace curvefs -int main(int argc, char **argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); ::testing::InitGoogleMock(&argc, argv); ::curvefs::common::Process::InitSetProcTitle(argc, argv); diff --git a/curvefs/test/metaserver/mock/mock_kv_storage.h b/curvefs/test/metaserver/mock/mock_kv_storage.h index efc968def3..2923b42d02 100644 --- a/curvefs/test/metaserver/mock/mock_kv_storage.h +++ b/curvefs/test/metaserver/mock/mock_kv_storage.h @@ -36,15 +36,13 @@ namespace curvefs { namespace metaserver { namespace storage { -class MockKVStorage : public KVStorage { +class MockKVStorage : public KVStorage, public StorageTransaction { public: MOCK_METHOD3(HGet, Status(const std::string&, const std::string&, ValueType*)); - MOCK_METHOD3(HSet, - Status(const std::string&, - const std::string&, - const ValueType&)); + MOCK_METHOD3(HSet, Status(const std::string&, const std::string&, + const ValueType&)); MOCK_METHOD2(HDel, Status(const std::string&, const std::string&)); @@ -57,16 +55,13 @@ class MockKVStorage : public KVStorage { MOCK_METHOD3(SGet, Status(const std::string&, const std::string&, ValueType*)); - MOCK_METHOD3(SSet, - Status(const std::string&, - const std::string&, - const ValueType&)); + MOCK_METHOD3(SSet, Status(const std::string&, const std::string&, + const ValueType&)); MOCK_METHOD2(SDel, Status(const std::string&, const std::string&)); - MOCK_METHOD2(SSeek, - std::shared_ptr(const std::string&, - const std::string&)); + MOCK_METHOD2(SSeek, std::shared_ptr(const std::string&, + const std::string&)); MOCK_METHOD1(SGetAll, std::shared_ptr(const std::string&)); @@ -88,6 +83,10 @@ class MockKVStorage : public KVStorage { bool(const std::string&, std::vector*)); MOCK_METHOD1(Recover, bool(const std::string&)); + + MOCK_METHOD0(Commit, Status()); + + MOCK_METHOD0(Rollback, Status()); }; } // namespace storage diff --git a/curvefs/test/metaserver/mock/mock_metastore.h b/curvefs/test/metaserver/mock/mock_metastore.h index da5a7b7c56..a8c923d3aa 100644 --- a/curvefs/test/metaserver/mock/mock_metastore.h +++ b/curvefs/test/metaserver/mock/mock_metastore.h @@ -25,10 +25,11 @@ #include -#include #include -#include #include +#include +#include +#include #include "curvefs/src/metaserver/metastore.h" @@ -39,72 +40,88 @@ namespace mock { class MockMetaStore : public curvefs::metaserver::MetaStore { public: MOCK_METHOD1(Load, bool(const std::string&)); - MOCK_METHOD2(Save, bool(const std::string&, OnSnapshotSaveDoneClosure*)); + MOCK_METHOD2(SaveMeta, + bool(const std::string&, std::vector* files)); + MOCK_METHOD2(SaveData, + bool(const std::string&, std::vector* files)); MOCK_METHOD0(Clear, bool()); MOCK_METHOD0(Destroy, bool()); - MOCK_METHOD2(CreatePartition, MetaStatusCode(const CreatePartitionRequest*, - CreatePartitionResponse*)); - MOCK_METHOD2(DeletePartition, MetaStatusCode(const DeletePartitionRequest*, - DeletePartitionResponse*)); - MOCK_METHOD1(GetPartitionInfoList, bool(std::list *)); + MOCK_METHOD3(CreatePartition, + MetaStatusCode(const CreatePartitionRequest*, + CreatePartitionResponse*, int64_t logIndex)); + MOCK_METHOD3(DeletePartition, + MetaStatusCode(const DeletePartitionRequest*, + DeletePartitionResponse*, int64_t logIndex)); + MOCK_METHOD1(GetPartitionInfoList, bool(std::list*)); MOCK_METHOD1(GetPartitionSnap, - bool(std::map> *)); - - MOCK_METHOD2(CreateDentry, MetaStatusCode(const CreateDentryRequest*, - CreateDentryResponse*)); - MOCK_METHOD2(DeleteDentry, MetaStatusCode(const DeleteDentryRequest*, - DeleteDentryResponse*)); - MOCK_METHOD2(GetDentry, - MetaStatusCode(const GetDentryRequest*, GetDentryResponse*)); - MOCK_METHOD2(ListDentry, - MetaStatusCode(const ListDentryRequest*, ListDentryResponse*)); - - MOCK_METHOD2(CreateInode, MetaStatusCode(const CreateInodeRequest*, - CreateInodeResponse*)); - MOCK_METHOD2(CreateRootInode, MetaStatusCode(const CreateRootInodeRequest*, - CreateRootInodeResponse*)); - MOCK_METHOD2(CreateManageInode, - MetaStatusCode(const CreateManageInodeRequest*, - CreateManageInodeResponse*)); - MOCK_METHOD2(GetInode, - MetaStatusCode(const GetInodeRequest*, GetInodeResponse*)); - MOCK_METHOD2(BatchGetInodeAttr, + bool(std::map>*)); + + MOCK_METHOD3(CreateDentry, + MetaStatusCode(const CreateDentryRequest*, + CreateDentryResponse*, int64_t logIndex)); + MOCK_METHOD3(DeleteDentry, + MetaStatusCode(const DeleteDentryRequest*, + DeleteDentryResponse*, int64_t logIndex)); + MOCK_METHOD3(GetDentry, + MetaStatusCode(const GetDentryRequest*, GetDentryResponse*, + int64_t logIndex)); + MOCK_METHOD3(ListDentry, + MetaStatusCode(const ListDentryRequest*, ListDentryResponse*, + int64_t logIndex)); + + MOCK_METHOD3(CreateInode, + MetaStatusCode(const CreateInodeRequest*, CreateInodeResponse*, + int64_t logIndex)); + MOCK_METHOD3(CreateRootInode, + MetaStatusCode(const CreateRootInodeRequest*, + CreateRootInodeResponse*, int64_t logIndex)); + MOCK_METHOD3(CreateManageInode, + MetaStatusCode(const CreateManageInodeRequest*, + CreateManageInodeResponse*, int64_t logIndex)); + MOCK_METHOD3(GetInode, MetaStatusCode(const GetInodeRequest*, + GetInodeResponse*, int64_t logIndex)); + MOCK_METHOD3(BatchGetInodeAttr, MetaStatusCode(const BatchGetInodeAttrRequest*, - BatchGetInodeAttrResponse*)); - MOCK_METHOD2(BatchGetXAttr, + BatchGetInodeAttrResponse*, int64_t logIndex)); + MOCK_METHOD3(BatchGetXAttr, MetaStatusCode(const BatchGetXAttrRequest*, - BatchGetXAttrResponse*)); - MOCK_METHOD2(DeleteInode, MetaStatusCode(const DeleteInodeRequest*, - DeleteInodeResponse*)); - MOCK_METHOD2(UpdateInode, MetaStatusCode(const UpdateInodeRequest*, - UpdateInodeResponse*)); - - MOCK_METHOD2(PrepareRenameTx, MetaStatusCode(const PrepareRenameTxRequest*, - PrepareRenameTxResponse*)); + BatchGetXAttrResponse*, int64_t logIndex)); + MOCK_METHOD3(DeleteInode, + MetaStatusCode(const DeleteInodeRequest*, DeleteInodeResponse*, + int64_t logIndex)); + MOCK_METHOD3(UpdateInode, + MetaStatusCode(const UpdateInodeRequest*, UpdateInodeResponse*, + int64_t logIndex)); + + MOCK_METHOD3(PrepareRenameTx, + MetaStatusCode(const PrepareRenameTxRequest*, + PrepareRenameTxResponse*, int64_t logIndex)); MOCK_METHOD0(GetStreamServer, std::shared_ptr()); - MOCK_METHOD3(GetOrModifyS3ChunkInfo, MetaStatusCode( - const GetOrModifyS3ChunkInfoRequest* request, - GetOrModifyS3ChunkInfoResponse* response, - std::shared_ptr* iterator)); + MOCK_METHOD4(GetOrModifyS3ChunkInfo, + MetaStatusCode(const GetOrModifyS3ChunkInfoRequest* request, + GetOrModifyS3ChunkInfoResponse* response, + std::shared_ptr* iterator, + int64_t logIndex)); - MOCK_METHOD2(SendS3ChunkInfoByStream, MetaStatusCode( - std::shared_ptr connection, - std::shared_ptr iterator)); + MOCK_METHOD2(SendS3ChunkInfoByStream, + MetaStatusCode(std::shared_ptr connection, + std::shared_ptr iterator)); - MOCK_METHOD2(GetVolumeExtent, + MOCK_METHOD3(GetVolumeExtent, MetaStatusCode(const GetVolumeExtentRequest*, - GetVolumeExtentResponse*)); + GetVolumeExtentResponse*, int64_t logIndex)); - MOCK_METHOD2(UpdateVolumeExtent, + MOCK_METHOD3(UpdateVolumeExtent, MetaStatusCode(const UpdateVolumeExtentRequest*, - UpdateVolumeExtentResponse*)); + UpdateVolumeExtentResponse*, int64_t logIndex)); - MOCK_METHOD2(UpdateDeallocatableBlockGroup, - MetaStatusCode(const UpdateDeallocatableBlockGroupRequest *, - UpdateDeallocatableBlockGroupResponse *)); + MOCK_METHOD3(UpdateDeallocatableBlockGroup, + MetaStatusCode(const UpdateDeallocatableBlockGroupRequest*, + UpdateDeallocatableBlockGroupResponse*, + int64_t logIndex)); }; } // namespace mock diff --git a/curvefs/test/metaserver/mock/mock_partition.h b/curvefs/test/metaserver/mock/mock_partition.h index 9dd48c0f12..c0fb2f9102 100644 --- a/curvefs/test/metaserver/mock/mock_partition.h +++ b/curvefs/test/metaserver/mock/mock_partition.h @@ -34,7 +34,7 @@ class MockPartition : public curvefs::metaserver::Partition { public: MockPartition() : Partition() {} MOCK_METHOD1(GetAllBlockGroup, - MetaStatusCode(std::vector *)); + MetaStatusCode(std::vector*)); MOCK_CONST_METHOD0(GetPartitionId, uint32_t()); MOCK_CONST_METHOD0(GetFsId, uint32_t()); }; diff --git a/curvefs/test/metaserver/partition_clean_test.cpp b/curvefs/test/metaserver/partition_clean_test.cpp index 628cdc284e..901b0bc5be 100644 --- a/curvefs/test/metaserver/partition_clean_test.cpp +++ b/curvefs/test/metaserver/partition_clean_test.cpp @@ -40,11 +40,11 @@ using ::testing::Invoke; using ::testing::Matcher; using ::testing::Return; +using ::curvefs::metaserver::copyset::MockCopysetNode; using ::curvefs::metaserver::storage::KVStorage; using ::curvefs::metaserver::storage::RandomStoragePath; using ::curvefs::metaserver::storage::RocksDBStorage; using ::curvefs::metaserver::storage::StorageOptions; -using ::curvefs::metaserver::copyset::MockCopysetNode; namespace curvefs { namespace metaserver { @@ -67,6 +67,7 @@ class PartitionCleanManagerTest : public testing::Test { s3Adaptor_ = std::make_shared(); copyset_ = new MockCopysetNode(); FsInfoManager::GetInstance().SetMdsClient(mdsCli_); + logIndex_ = 0; } void TearDown() override { @@ -97,6 +98,7 @@ class PartitionCleanManagerTest : public testing::Test { std::shared_ptr mdsCli_; std::shared_ptr s3Adaptor_; MockCopysetNode *copyset_; + int64_t logIndex_; }; TEST_F(PartitionCleanManagerTest, test1) { @@ -140,16 +142,18 @@ TEST_F(PartitionCleanManagerTest, test1) { Time tm; tm.set_sec(0); tm.set_nsec(0); - ASSERT_EQ(partition->CreateRootInode(param), MetaStatusCode::OK); - ASSERT_EQ(partition->CreateDentry(dentry, tm), + ASSERT_EQ(partition->CreateRootInode(param, logIndex_++), MetaStatusCode::OK); - ASSERT_EQ(partition->CreateDentry(dentry, tm), + ASSERT_EQ(partition->CreateDentry(dentry, tm, logIndex_++), + MetaStatusCode::OK); + ASSERT_EQ(partition->CreateDentry(dentry, tm, logIndex_++), MetaStatusCode::OK); Inode inode1; param.type = FsFileType::TYPE_S3; param.symlink = ""; - ASSERT_EQ(partition->CreateInode(param, &inode1), MetaStatusCode::OK); + ASSERT_EQ(partition->CreateInode(param, &inode1, logIndex_++), + MetaStatusCode::OK); ASSERT_EQ(partition->GetInodeNum(), 2); ASSERT_EQ(partition->GetDentryNum(), 1); Inode inode2; @@ -164,21 +168,23 @@ TEST_F(PartitionCleanManagerTest, test1) { .WillRepeatedly(Return(true)); EXPECT_CALL(*copyset_, Propose(_)) - .WillOnce(Invoke([partition, fsId](const braft::Task &task) { - ASSERT_EQ(partition->DeleteInode(fsId, ROOTINODEID), + .WillOnce(Invoke([partition, fsId, this](const braft::Task& task) { + ASSERT_EQ(partition->DeleteInode(fsId, ROOTINODEID, logIndex_++), MetaStatusCode::OK); LOG(INFO) << "Partition DeleteInode, fsId = " << fsId << ", inodeId = " << ROOTINODEID; task.done->Run(); })) - .WillOnce(Invoke([partition, fsId, inode1](const braft::Task &task) { - ASSERT_EQ(partition->DeleteInode(fsId, inode1.inodeid()), - MetaStatusCode::OK); - LOG(INFO) << "Partition DeleteInode, fsId = " << fsId - << ", inodeId = " << inode1.inodeid(); - task.done->Run(); - })) - .WillOnce(Invoke([partition](const braft::Task &task) { + .WillOnce( + Invoke([partition, fsId, inode1, this](const braft::Task& task) { + ASSERT_EQ( + partition->DeleteInode(fsId, inode1.inodeid(), logIndex_++), + MetaStatusCode::OK); + LOG(INFO) << "Partition DeleteInode, fsId = " << fsId + << ", inodeId = " << inode1.inodeid(); + task.done->Run(); + })) + .WillOnce(Invoke([partition](const braft::Task& task) { LOG(INFO) << "Partition deletePartition"; task.done->Run(); })); diff --git a/curvefs/test/metaserver/partition_test.cpp b/curvefs/test/metaserver/partition_test.cpp index 7c405e335c..7256a136ba 100644 --- a/curvefs/test/metaserver/partition_test.cpp +++ b/curvefs/test/metaserver/partition_test.cpp @@ -32,19 +32,19 @@ #include "curvefs/test/metaserver/storage/utils.h" #include "src/fs/ext4_filesystem_impl.h" -using ::testing::AtLeast; -using ::testing::StrEq; using ::testing::_; +using ::testing::AtLeast; +using ::testing::DoAll; using ::testing::Return; using ::testing::ReturnArg; -using ::testing::DoAll; -using ::testing::SetArgPointee; using ::testing::SaveArg; +using ::testing::SetArgPointee; +using ::testing::StrEq; using ::curvefs::metaserver::storage::KVStorage; -using ::curvefs::metaserver::storage::StorageOptions; -using ::curvefs::metaserver::storage::RocksDBStorage; using ::curvefs::metaserver::storage::RandomStoragePath; +using ::curvefs::metaserver::storage::RocksDBStorage; +using ::curvefs::metaserver::storage::StorageOptions; namespace curvefs { namespace metaserver { @@ -65,12 +65,14 @@ class PartitionTest : public ::testing::Test { param_.symlink = ""; param_.rdev = 0; - dataDir_ = RandomStoragePath();; + dataDir_ = RandomStoragePath(); + StorageOptions options; options.dataDir = dataDir_; options.localFileSystem = localfs.get(); kvStorage_ = std::make_shared(options); ASSERT_TRUE(kvStorage_->Open()); + logIndex_ = 0; } void TearDown() override { @@ -99,6 +101,7 @@ class PartitionTest : public ::testing::Test { std::string dataDir_; StorageOptions options_; std::shared_ptr kvStorage_; + int64_t logIndex_; }; TEST_F(PartitionTest, testInodeIdGen1) { @@ -112,6 +115,8 @@ TEST_F(PartitionTest, testInodeIdGen1) { Partition partition1(partitionInfo1, kvStorage_); + ASSERT_TRUE(partition1.Init()); + ASSERT_TRUE(partition1.IsDeletable()); for (int i = 0; i < 100; i++) { ASSERT_EQ(partition1.GetNewInodeId(), partitionInfo1.start() + i); @@ -132,6 +137,8 @@ TEST_F(PartitionTest, testInodeIdGen2) { Partition partition1(partitionInfo1, kvStorage_); + ASSERT_TRUE(partition1.Init()); + ASSERT_TRUE(partition1.IsDeletable()); for (int i = 0; i < 50; i++) { ASSERT_EQ(partition1.GetNewInodeId(), partitionInfo1.nextid() + i); @@ -152,6 +159,8 @@ TEST_F(PartitionTest, testInodeIdGen3) { Partition partition1(partitionInfo1, kvStorage_); + ASSERT_TRUE(partition1.Init()); + ASSERT_EQ(partition1.GetNewInodeId(), UINT64_MAX); ASSERT_EQ(partition1.GetNewInodeId(), UINT64_MAX); } @@ -170,6 +179,9 @@ TEST_F(PartitionTest, testInodeIdGen4_NextId) { partitionInfo1.set_end(199); Partition p(partitionInfo1, kvStorage_); + + ASSERT_TRUE(p.Init()); + EXPECT_EQ(t.second, p.GetNewInodeId()); } } @@ -187,6 +199,8 @@ TEST_F(PartitionTest, testInodeIdGen5_paritionstatus) { Partition partition1(partitionInfo1, kvStorage_); + ASSERT_TRUE(partition1.Init()); + ASSERT_EQ(partition1.GetNewInodeId(), 198); ASSERT_EQ(partition1.GetPartitionInfo().status(), PartitionStatus::READWRITE); @@ -209,6 +223,8 @@ TEST_F(PartitionTest, test1) { Partition partition1(partitionInfo1, kvStorage_); + ASSERT_TRUE(partition1.Init()); + ASSERT_TRUE(partition1.IsDeletable()); ASSERT_TRUE(partition1.IsInodeBelongs(1, 100)); ASSERT_TRUE(partition1.IsInodeBelongs(1, 199)); @@ -231,14 +247,16 @@ TEST_F(PartitionTest, inodenum) { Partition partition1(partitionInfo1, kvStorage_); + ASSERT_TRUE(partition1.Init()); + ASSERT_EQ(partition1.GetInodeNum(), 0); Inode inode; - ASSERT_EQ(partition1.CreateInode(param_, &inode), + ASSERT_EQ(partition1.CreateInode(param_, &inode, logIndex_++), MetaStatusCode::OK); ASSERT_EQ(partition1.GetInodeNum(), 1); - ASSERT_EQ(partition1.DeleteInode(1, 100), MetaStatusCode::OK); + ASSERT_EQ(partition1.DeleteInode(1, 100, logIndex_++), MetaStatusCode::OK); ASSERT_EQ(partition1.GetInodeNum(), 0); } @@ -252,14 +270,17 @@ TEST_F(PartitionTest, dentrynum) { partitionInfo1.set_end(199); Partition partition1(partitionInfo1, kvStorage_); + + ASSERT_TRUE(partition1.Init()); + ASSERT_EQ(partition1.GetDentryNum(), 0); // create parent inode Inode inode; inode.set_inodeid(100); - ASSERT_EQ(partition1.CreateInode(param_, &inode), - MetaStatusCode::OK); + ASSERT_EQ(partition1.CreateInode(param_, &inode, logIndex_++), + MetaStatusCode::OK); Dentry dentry; dentry.set_fsid(1); @@ -271,11 +292,11 @@ TEST_F(PartitionTest, dentrynum) { Time tm; tm.set_sec(0); tm.set_nsec(0); - ASSERT_EQ(partition1.CreateDentry(dentry, tm), + ASSERT_EQ(partition1.CreateDentry(dentry, tm, logIndex_++), MetaStatusCode::OK); ASSERT_EQ(partition1.GetDentryNum(), 1); - ASSERT_EQ(partition1.DeleteDentry(dentry), MetaStatusCode::OK); + ASSERT_EQ(partition1.DeleteDentry(dentry, logIndex_++), MetaStatusCode::OK); ASSERT_EQ(partition1.GetDentryNum(), 0); } @@ -290,6 +311,8 @@ TEST_F(PartitionTest, PARTITION_ID_MISSMATCH_ERROR) { Partition partition1(partitionInfo1, kvStorage_); + ASSERT_TRUE(partition1.Init()); + Dentry dentry1; dentry1.set_fsid(2); dentry1.set_parentinodeid(100); @@ -302,15 +325,15 @@ TEST_F(PartitionTest, PARTITION_ID_MISSMATCH_ERROR) { tm.set_sec(0); tm.set_nsec(0); // test CreateDentry - ASSERT_EQ(partition1.CreateDentry(dentry1, tm), + ASSERT_EQ(partition1.CreateDentry(dentry1, tm, logIndex_++), MetaStatusCode::PARTITION_ID_MISSMATCH); - ASSERT_EQ(partition1.CreateDentry(dentry2, tm), + ASSERT_EQ(partition1.CreateDentry(dentry2, tm, logIndex_++), MetaStatusCode::PARTITION_ID_MISSMATCH); // test DeleteDentry - ASSERT_EQ(partition1.DeleteDentry(dentry1), + ASSERT_EQ(partition1.DeleteDentry(dentry1, logIndex_++), MetaStatusCode::PARTITION_ID_MISSMATCH); - ASSERT_EQ(partition1.DeleteDentry(dentry2), + ASSERT_EQ(partition1.DeleteDentry(dentry2, logIndex_++), MetaStatusCode::PARTITION_ID_MISSMATCH); // test GetDentry @@ -330,9 +353,9 @@ TEST_F(PartitionTest, PARTITION_ID_MISSMATCH_ERROR) { // test HandleRenameTx std::vector dentrys1 = {dentry1}; std::vector dentrys2 = {dentry2}; - ASSERT_EQ(partition1.HandleRenameTx(dentrys1), + ASSERT_EQ(partition1.HandleRenameTx(dentrys1, logIndex_++), MetaStatusCode::PARTITION_ID_MISSMATCH); - ASSERT_EQ(partition1.HandleRenameTx(dentrys2), + ASSERT_EQ(partition1.HandleRenameTx(dentrys2, logIndex_++), MetaStatusCode::PARTITION_ID_MISSMATCH); // test InsertPendingTx @@ -345,11 +368,11 @@ TEST_F(PartitionTest, PARTITION_ID_MISSMATCH_ERROR) { param_.type = FsFileType::TYPE_DIRECTORY; param_.fsId = fsId + 1; Inode inode1; - ASSERT_EQ(partition1.CreateInode(param_, &inode1), + ASSERT_EQ(partition1.CreateInode(param_, &inode1, logIndex_++), MetaStatusCode::PARTITION_ID_MISSMATCH); // test CreateRootInode - ASSERT_EQ(partition1.CreateRootInode(param_), + ASSERT_EQ(partition1.CreateRootInode(param_, logIndex_++), MetaStatusCode::PARTITION_ID_MISSMATCH); // test GetInode @@ -361,9 +384,9 @@ TEST_F(PartitionTest, PARTITION_ID_MISSMATCH_ERROR) { MetaStatusCode::PARTITION_ID_MISSMATCH); // test DeleteInode - ASSERT_EQ(partition1.DeleteInode(fsId + 1, rightInodeId), + ASSERT_EQ(partition1.DeleteInode(fsId + 1, rightInodeId, logIndex_++), MetaStatusCode::PARTITION_ID_MISSMATCH); - ASSERT_EQ(partition1.DeleteInode(fsId, wrongInodeId), + ASSERT_EQ(partition1.DeleteInode(fsId, wrongInodeId, logIndex_++), MetaStatusCode::PARTITION_ID_MISSMATCH); Inode inode2; @@ -376,16 +399,16 @@ TEST_F(PartitionTest, PARTITION_ID_MISSMATCH_ERROR) { // test UpdateInode UpdateInodeRequest inode2Request = MakeUpdateInodeRequestFromInode(inode2); - ASSERT_EQ(partition1.UpdateInode(inode2Request), + ASSERT_EQ(partition1.UpdateInode(inode2Request, logIndex_++), MetaStatusCode::PARTITION_ID_MISSMATCH); UpdateInodeRequest inode3Request = MakeUpdateInodeRequestFromInode(inode3); - ASSERT_EQ(partition1.UpdateInode(inode3Request), + ASSERT_EQ(partition1.UpdateInode(inode3Request, logIndex_++), MetaStatusCode::PARTITION_ID_MISSMATCH); // test InsertInode - ASSERT_EQ(partition1.InsertInode(inode2), + ASSERT_EQ(partition1.InsertInode(inode2, logIndex_++), MetaStatusCode::PARTITION_ID_MISSMATCH); - ASSERT_EQ(partition1.InsertInode(inode3), + ASSERT_EQ(partition1.InsertInode(inode3, logIndex_++), MetaStatusCode::PARTITION_ID_MISSMATCH); } @@ -400,12 +423,15 @@ TEST_F(PartitionTest, testGetInodeAttr) { Partition partition1(partitionInfo1, kvStorage_); + ASSERT_TRUE(partition1.Init()); + // create parent inode Inode inode; inode.set_inodeid(100); param_.type = FsFileType::TYPE_FILE; param_.fsId = 1; - ASSERT_EQ(partition1.CreateInode(param_, &inode), MetaStatusCode::OK); + ASSERT_EQ(partition1.CreateInode(param_, &inode, logIndex_++), + MetaStatusCode::OK); InodeAttr attr; ASSERT_EQ(partition1.GetInodeAttr(1, 100, &attr), MetaStatusCode::OK); ASSERT_EQ(attr.inodeid(), 100); @@ -428,11 +454,14 @@ TEST_F(PartitionTest, testGetXAttr) { Partition partition1(partitionInfo1, kvStorage_); + ASSERT_TRUE(partition1.Init()); + // create parent inode Inode inode; inode.set_inodeid(100); param_.type = FsFileType::TYPE_DIRECTORY; - ASSERT_EQ(partition1.CreateInode(param_, &inode), MetaStatusCode::OK); + ASSERT_EQ(partition1.CreateInode(param_, &inode, logIndex_++), + MetaStatusCode::OK); XAttr xattr; ASSERT_EQ(partition1.GetXAttr(1, 100, &xattr), MetaStatusCode::OK); ASSERT_EQ(xattr.inodeid(), 100); diff --git a/curvefs/test/metaserver/recycle_cleaner_test.cpp b/curvefs/test/metaserver/recycle_cleaner_test.cpp index f2b85275a9..f7b6481ac4 100644 --- a/curvefs/test/metaserver/recycle_cleaner_test.cpp +++ b/curvefs/test/metaserver/recycle_cleaner_test.cpp @@ -63,6 +63,7 @@ class RecycleCleanerTest : public testing::Test { options.localFileSystem = localfs.get(); kvStorage_ = std::make_shared(options); ASSERT_TRUE(kvStorage_->Open()); + logIndex_ = 0; uint32_t partitionId1 = 1; uint32_t fsId = 2; @@ -88,15 +89,17 @@ class RecycleCleanerTest : public testing::Test { rootPram.fsId = fsId; rootPram.parent = 0; rootPram.type = FsFileType::TYPE_DIRECTORY; - ASSERT_EQ(partition_->CreateRootInode(rootPram), MetaStatusCode::OK); + ASSERT_EQ(partition_->CreateRootInode(rootPram, logIndex_++), + MetaStatusCode::OK); InodeParam managePram; managePram.fsId = fsId; managePram.parent = ROOTINODEID; managePram.type = FsFileType::TYPE_DIRECTORY; Inode inode; - ASSERT_EQ(partition_->CreateManageInode( - managePram, ManageInodeType::TYPE_RECYCLE, &inode), - MetaStatusCode::OK); + ASSERT_EQ( + partition_->CreateManageInode( + managePram, ManageInodeType::TYPE_RECYCLE, &inode, logIndex_++), + MetaStatusCode::OK); Dentry dentry; dentry.set_fsid(fsId); dentry.set_inodeid(RECYCLEINODEID); @@ -107,7 +110,7 @@ class RecycleCleanerTest : public testing::Test { Time tm; tm.set_sec(0); tm.set_nsec(0); - ASSERT_EQ(partition_->CreateDentry(dentry, tm), + ASSERT_EQ(partition_->CreateDentry(dentry, tm, logIndex_++), MetaStatusCode::OK); } @@ -149,6 +152,7 @@ class RecycleCleanerTest : public testing::Test { std::shared_ptr metaClient_; std::shared_ptr partition_; copyset::MockCopysetNode copysetNode_; + int64_t logIndex_; }; TEST_F(RecycleCleanerTest, time_func_test) { @@ -259,7 +263,7 @@ TEST_F(RecycleCleanerTest, delete_node_test) { EXPECT_CALL(*metaClient_, GetInode(_, _, _, _)) .WillOnce(Return(MetaStatusCode::OK)) .WillOnce( - DoAll(SetArgPointee<2>(inode), Return(MetaStatusCode::OK))); + DoAll(SetArgPointee<2>(inode), Return(MetaStatusCode::OK))); EXPECT_CALL(*metaClient_, UpdateInodeAttrWithOutNlink(_, _, _, _, _)) .WillOnce(Return(MetaStatusCode::OK)); @@ -362,8 +366,8 @@ TEST_F(RecycleCleanerTest, scan_recycle_test5) { Time tm; tm.set_sec(0); tm.set_nsec(0); - partition_->CreateDentry(dentry1, tm); - partition_->CreateDentry(dentry2, tm); + partition_->CreateDentry(dentry1, tm, logIndex_++); + partition_->CreateDentry(dentry2, tm, logIndex_++); LOG(INFO) << "create dentry1 " << dentry1.ShortDebugString(); LOG(INFO) << "create dentry2 " << dentry2.ShortDebugString(); @@ -395,9 +399,9 @@ TEST_F(RecycleCleanerTest, scan_recycle_test6) { Time tm; tm.set_sec(0); tm.set_nsec(0); - ASSERT_EQ(partition_->CreateDentry(dentry1, tm), + ASSERT_EQ(partition_->CreateDentry(dentry1, tm, logIndex_++), MetaStatusCode::OK); - ASSERT_EQ(partition_->CreateDentry(dentry2, tm), + ASSERT_EQ(partition_->CreateDentry(dentry2, tm, logIndex_++), MetaStatusCode::OK); LOG(INFO) << "create dentry1 " << dentry1.ShortDebugString(); LOG(INFO) << "create dentry2 " << dentry2.ShortDebugString(); diff --git a/curvefs/test/metaserver/recycle_manager_test.cpp b/curvefs/test/metaserver/recycle_manager_test.cpp index 9d601f6ce9..f2017ff8ba 100644 --- a/curvefs/test/metaserver/recycle_manager_test.cpp +++ b/curvefs/test/metaserver/recycle_manager_test.cpp @@ -66,6 +66,7 @@ class RecycleManangeTest : public testing::Test { mdsclient_ = std::make_shared(); FsInfoManager::GetInstance().SetMdsClient(mdsclient_); + logIndex_ = 0; } void TearDown() override { @@ -102,6 +103,7 @@ class RecycleManangeTest : public testing::Test { std::string dataDir_; std::shared_ptr kvStorage_; std::shared_ptr mdsclient_; + int64_t logIndex_; }; TEST_F(RecycleManangeTest, test_empty_recycle) { @@ -156,15 +158,17 @@ TEST_F(RecycleManangeTest, test_empty_recycle) { rootPram.fsId = fsId; rootPram.parent = 0; rootPram.type = FsFileType::TYPE_DIRECTORY; - ASSERT_EQ(partition->CreateRootInode(rootPram), MetaStatusCode::OK); + ASSERT_EQ(partition->CreateRootInode(rootPram, logIndex_++), + MetaStatusCode::OK); InodeParam managePram; managePram.fsId = fsId; managePram.parent = ROOTINODEID; managePram.type = FsFileType::TYPE_DIRECTORY; Inode manageInode; - ASSERT_EQ(partition->CreateManageInode( - managePram, ManageInodeType::TYPE_RECYCLE, &manageInode), - MetaStatusCode::OK); + ASSERT_EQ( + partition->CreateManageInode(managePram, ManageInodeType::TYPE_RECYCLE, + &manageInode, logIndex_++), + MetaStatusCode::OK); Dentry dentry; dentry.set_fsid(fsId); dentry.set_inodeid(RECYCLEINODEID); @@ -175,7 +179,7 @@ TEST_F(RecycleManangeTest, test_empty_recycle) { Time tm; tm.set_sec(0); tm.set_nsec(0); - ASSERT_EQ(partition->CreateDentry(dentry, tm), + ASSERT_EQ(partition->CreateDentry(dentry, tm, logIndex_++), MetaStatusCode::OK); // create recycle time dir @@ -184,7 +188,8 @@ TEST_F(RecycleManangeTest, test_empty_recycle) { param.fsId = fsId; param.parent = RECYCLEINODEID; param.type = FsFileType::TYPE_DIRECTORY; - ASSERT_EQ(partition->CreateInode(param, &inode), MetaStatusCode::OK); + ASSERT_EQ(partition->CreateInode(param, &inode, logIndex_++), + MetaStatusCode::OK); Dentry dentry1; dentry1.set_name(GetRecycleTimeDirName()); @@ -193,7 +198,7 @@ TEST_F(RecycleManangeTest, test_empty_recycle) { dentry1.set_inodeid(2001); dentry1.set_txid(0); dentry1.set_type(FsFileType::TYPE_DIRECTORY); - ASSERT_EQ(partition->CreateDentry(dentry1, tm), + ASSERT_EQ(partition->CreateDentry(dentry1, tm, logIndex_++), MetaStatusCode::OK); // wait clean recycle diff --git a/curvefs/test/metaserver/s3compact/s3compact_test.cpp b/curvefs/test/metaserver/s3compact/s3compact_test.cpp index cd99958dc5..d8fed36dab 100644 --- a/curvefs/test/metaserver/s3compact/s3compact_test.cpp +++ b/curvefs/test/metaserver/s3compact/s3compact_test.cpp @@ -48,10 +48,10 @@ using ::testing::SetArgPointee; using ::testing::StrEq; using ::curvefs::metaserver::storage::KVStorage; +using ::curvefs::metaserver::storage::NameGenerator; using ::curvefs::metaserver::storage::RandomStoragePath; using ::curvefs::metaserver::storage::RocksDBStorage; using ::curvefs::metaserver::storage::StorageOptions; -using ::curvefs::metaserver::storage::NameGenerator; namespace curvefs { namespace metaserver { @@ -105,6 +105,7 @@ class S3CompactTest : public ::testing::Test { impl_ = std::make_shared(&workerOptions_); mockImpl_ = std::make_shared(&workerOptions_); mockCopysetNodeWrapper_ = absl::make_unique(); + logIndex_ = 0; } void TearDown() override { @@ -129,6 +130,7 @@ class S3CompactTest : public ::testing::Test { std::string dataDir_; std::shared_ptr kvStorage_; std::shared_ptr filetype2InodeNum_; + int64_t logIndex_; }; TEST_F(S3CompactTest, test_CopysetNodeWrapper) { @@ -636,7 +638,7 @@ TEST_F(S3CompactTest, test_CompactChunks) { inode1.set_type(FsFileType::TYPE_FILE); ::google::protobuf::Map s3chunkinfoMap; *inode1.mutable_s3chunkinfomap() = s3chunkinfoMap; - ASSERT_EQ(inodeStorage_->Insert(inode1), MetaStatusCode::OK); + ASSERT_EQ(inodeStorage_->Insert(inode1, logIndex_++), MetaStatusCode::OK); t.inodeKey = Key4Inode(1, 1); mockImpl_->CompactChunks(t); // normal @@ -662,9 +664,9 @@ TEST_F(S3CompactTest, test_CompactChunks) { ref->set_zero(false); } auto rc = inodeStorage_->ModifyInodeS3ChunkInfoList( - inode1.fsid(), inode1.inodeid(), 0, &l0, nullptr); + inode1.fsid(), inode1.inodeid(), 0, &l0, nullptr, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); - ASSERT_EQ(inodeStorage_->Update(inode1), MetaStatusCode::OK); + ASSERT_EQ(inodeStorage_->Update(inode1, logIndex_++), MetaStatusCode::OK); mockImpl_->CompactChunks(t); ASSERT_EQ(tmp.s3chunkinfomap().size(), 1); const auto& l = tmp.s3chunkinfomap().at(0); @@ -678,7 +680,7 @@ TEST_F(S3CompactTest, test_CompactChunks) { ASSERT_EQ(s3chunkinfo.zero(), false); // inode nlink = 0, deleted inode1.set_nlink(0); - ASSERT_EQ(inodeStorage_->Update(inode1), MetaStatusCode::OK); + ASSERT_EQ(inodeStorage_->Update(inode1, logIndex_++), MetaStatusCode::OK); mockImpl_->CompactChunks(t); EXPECT_CALL(*mockImpl_, UpdateInode_rvr(_, _, _, _, _)) .WillRepeatedly(Return(MetaStatusCode::UNKNOWN_ERROR)); diff --git a/curvefs/test/metaserver/space/utils.h b/curvefs/test/metaserver/space/utils.h index 55bfa4e864..49456c1c5c 100644 --- a/curvefs/test/metaserver/space/utils.h +++ b/curvefs/test/metaserver/space/utils.h @@ -23,7 +23,11 @@ #ifndef CURVEFS_TEST_METASERVER_SPACE_UTILS_H_ #define CURVEFS_TEST_METASERVER_SPACE_UTILS_H_ +#include +#include +#include #include +#include namespace curvefs { namespace metaserver { diff --git a/curvefs/test/metaserver/storage/iterator_test.cpp b/curvefs/test/metaserver/storage/iterator_test.cpp index 1e2a54a667..ca76d40c30 100644 --- a/curvefs/test/metaserver/storage/iterator_test.cpp +++ b/curvefs/test/metaserver/storage/iterator_test.cpp @@ -51,7 +51,7 @@ class HashIterator : public Iterator { void Next() override { iter_++; } std::string Key() override { return iter_->first; } std::string Value() override { return iter_->second; } - bool ParseFromValue(ValueType* value) { return true; } + bool ParseFromValue(ValueType* value) override { return true; } int Status() override { return 0; } private: diff --git a/curvefs/test/metaserver/transaction_test.cpp b/curvefs/test/metaserver/transaction_test.cpp index ddfa2776b8..c21cf91651 100644 --- a/curvefs/test/metaserver/transaction_test.cpp +++ b/curvefs/test/metaserver/transaction_test.cpp @@ -35,11 +35,10 @@ namespace curvefs { namespace metaserver { using ::curvefs::metaserver::storage::KVStorage; -using ::curvefs::metaserver::storage::StorageOptions; -using ::curvefs::metaserver::storage::RocksDBStorage; using ::curvefs::metaserver::storage::NameGenerator; using ::curvefs::metaserver::storage::RandomStoragePath; -using TX_OP_TYPE = DentryStorage::TX_OP_TYPE; +using ::curvefs::metaserver::storage::RocksDBStorage; +using ::curvefs::metaserver::storage::StorageOptions; namespace { auto localfs = curve::fs::Ext4FileSystemImpl::getInstance(); @@ -48,7 +47,7 @@ auto localfs = curve::fs::Ext4FileSystemImpl::getInstance(); class TransactionTest : public ::testing::Test { protected: void SetUp() override { - dataDir_ = RandomStoragePath();; + dataDir_ = RandomStoragePath(); StorageOptions options; options.dataDir = dataDir_; @@ -57,11 +56,16 @@ class TransactionTest : public ::testing::Test { ASSERT_TRUE(kvStorage_->Open()); nameGenerator_ = std::make_shared(1); - dentryStorage_ = std::make_shared( - kvStorage_, nameGenerator_, 0); - txManager_ = std::make_shared(dentryStorage_); - dentryManager_ = std::make_shared( - dentryStorage_, txManager_); + dentryStorage_ = + std::make_shared(kvStorage_, nameGenerator_, 0); + common::PartitionInfo partitionInfo; + partitionInfo.set_partitionid(1); + txManager_ = std::make_shared(dentryStorage_, partitionInfo); + dentryManager_ = + std::make_shared(dentryStorage_, txManager_); + ASSERT_TRUE(dentryManager_->Init()); + ASSERT_TRUE(txManager_->Init()); + logIndex_ = 0; } void TearDown() override { @@ -84,12 +88,8 @@ class TransactionTest : public ::testing::Test { return result; } - Dentry GenDentry(uint32_t fsId, - uint64_t parentId, - const std::string& name, - uint64_t txId, - uint64_t inodeId, - uint32_t flag) { + Dentry GenDentry(uint32_t fsId, uint64_t parentId, const std::string& name, + uint64_t txId, uint64_t inodeId, uint32_t flag) { Dentry dentry; dentry.set_fsid(fsId); dentry.set_parentinodeid(parentId); @@ -102,10 +102,12 @@ class TransactionTest : public ::testing::Test { void InsertDentrys(std::shared_ptr storage, const std::vector&& dentrys) { - for (const auto& dentry : dentrys) { - auto rc = storage->HandleTx(TX_OP_TYPE::PREPARE, dentry); - ASSERT_EQ(rc, MetaStatusCode::OK); - } + // NOTE: store real transaction is unnecessary + metaserver::TransactionRequest request; + request.set_type(metaserver::TransactionRequest::None); + request.set_rawpayload(""); + auto rc = storage->PrepareTx(dentrys, request, logIndex_++); + ASSERT_EQ(rc, MetaStatusCode::OK); ASSERT_EQ(storage->Size(), dentrys.size()); } @@ -124,12 +126,13 @@ class TransactionTest : public ::testing::Test { std::shared_ptr dentryStorage_; std::shared_ptr dentryManager_; std::shared_ptr txManager_; + int64_t logIndex_; }; TEST_F(TransactionTest, PreCheck) { // CASE 1: empty dentrys auto dentrys = std::vector(); - auto rc = txManager_->HandleRenameTx(dentrys); + auto rc = txManager_->HandleRenameTx(dentrys, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::PARAM_ERROR); // CASE 2: sizeof(dentrys) > 2 @@ -139,7 +142,7 @@ TEST_F(TransactionTest, PreCheck) { GenDentry(1, 0, "B", 0, 2, 0), GenDentry(1, 0, "C", 0, 3, 0), }; - rc = txManager_->HandleRenameTx(dentrys); + rc = txManager_->HandleRenameTx(dentrys, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::PARAM_ERROR); // CASE 3: dentrys fsids are different @@ -148,7 +151,7 @@ TEST_F(TransactionTest, PreCheck) { GenDentry(1, 0, "A", 0, 1, 0), GenDentry(2, 0, "B", 0, 2, 0), }; - rc = txManager_->HandleRenameTx(dentrys); + rc = txManager_->HandleRenameTx(dentrys, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::PARAM_ERROR); // CASE 4: dentrys txids are different @@ -157,23 +160,24 @@ TEST_F(TransactionTest, PreCheck) { GenDentry(1, 0, "A", 0, 1, 0), GenDentry(1, 0, "B", 1, 2, 0), }; - rc = txManager_->HandleRenameTx(dentrys); + rc = txManager_->HandleRenameTx(dentrys, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::PARAM_ERROR); } TEST_F(TransactionTest, HandleTxWithCommit) { - InsertDentrys(dentryStorage_, std::vector{ - // { fsId, parentId, name, txId, inodeId, flag } - GenDentry(1, 0, "A", 0, 1, 0), - }); + InsertDentrys(dentryStorage_, + std::vector{ + // { fsId, parentId, name, txId, inodeId, flag } + GenDentry(1, 0, "A", 0, 1, 0), + }); // step-1: prepare tx success (rename A B) - auto dentrys = std::vector { + auto dentrys = std::vector{ // { fsId, parentId, name, txId, inodeId, flag } GenDentry(1, 0, "A", 1, 1, DELETE_FLAG), GenDentry(1, 0, "B", 1, 1, 0), }; - auto rc = txManager_->HandleRenameTx(dentrys); + auto rc = txManager_->HandleRenameTx(dentrys, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); ASSERT_EQ(dentryStorage_->Size(), 3); @@ -196,11 +200,11 @@ TEST_F(TransactionTest, HandleTxWithCommit) { ASSERT_EQ(dentryStorage_->Size(), 3); // step-4: prepare a new tx success with commit - dentrys = std::vector { + dentrys = std::vector{ // { fsId, parentId, name, txId, inodeId, flag } GenDentry(1, 0, "C", 2, 2, 0), }; - rc = txManager_->HandleRenameTx(dentrys); + rc = txManager_->HandleRenameTx(dentrys, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); // step-5: check dentrys @@ -210,25 +214,26 @@ TEST_F(TransactionTest, HandleTxWithCommit) { ASSERT_EQ(rc, MetaStatusCode::OK); ASSERT_EQ(dentrys.size(), 2); ASSERT_DENTRYS_EQ(dentrys, std::vector{ - GenDentry(1, 0, "B", 1, 1, 0), - GenDentry(1, 0, "C", 2, 2, 0), - }); + GenDentry(1, 0, "B", 1, 1, 0), + GenDentry(1, 0, "C", 2, 2, 0), + }); ASSERT_EQ(dentryStorage_->Size(), 2); } TEST_F(TransactionTest, HandleTxWithRollback) { - InsertDentrys(dentryStorage_, std::vector{ - // { fsId, parentId, name, txId, inodeId, flag } - GenDentry(1, 0, "A", 0, 1, 0), - }); + InsertDentrys(dentryStorage_, + std::vector{ + // { fsId, parentId, name, txId, inodeId, flag } + GenDentry(1, 0, "A", 0, 1, 0), + }); // step-1: prepare tx success (rename A B) - auto dentrys = std::vector { + auto dentrys = std::vector{ // { fsId, parentId, name, txId, inodeId, flag } GenDentry(1, 0, "A", 1, 1, DELETE_FLAG), GenDentry(1, 0, "B", 1, 1, 0), }; - auto rc = txManager_->HandleRenameTx(dentrys); + auto rc = txManager_->HandleRenameTx(dentrys, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); ASSERT_EQ(dentryStorage_->Size(), 3); @@ -251,11 +256,11 @@ TEST_F(TransactionTest, HandleTxWithRollback) { ASSERT_EQ(dentryStorage_->Size(), 3); // step-4: prepare a new tx success with rollback - dentrys = std::vector { + dentrys = std::vector{ // { fsId, parentId, name, txId, inodeId, flag } GenDentry(1, 0, "C", 1, 2, 0), }; - rc = txManager_->HandleRenameTx(dentrys); + rc = txManager_->HandleRenameTx(dentrys, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); // step-5: check dentrys @@ -265,9 +270,9 @@ TEST_F(TransactionTest, HandleTxWithRollback) { ASSERT_EQ(rc, MetaStatusCode::OK); ASSERT_EQ(dentrys.size(), 2); ASSERT_DENTRYS_EQ(dentrys, std::vector{ - GenDentry(1, 0, "A", 0, 1, 0), - GenDentry(1, 0, "C", 1, 2, 0), - }); + GenDentry(1, 0, "A", 0, 1, 0), + GenDentry(1, 0, "C", 1, 2, 0), + }); ASSERT_EQ(dentryStorage_->Size(), 2); } @@ -281,20 +286,21 @@ TEST_F(TransactionTest, HandleTxWithTargetExist) { * * rename /A /B/A */ - InsertDentrys(dentryStorage_, std::vector{ - // { fsId, parentId, name, txId, inodeId, flag } - GenDentry(1, 0, "A", 0, 1, 0), - GenDentry(1, 0, "B", 0, 2, 0), - GenDentry(1, 2, "A", 0, 3, 0), - }); + InsertDentrys(dentryStorage_, + std::vector{ + // { fsId, parentId, name, txId, inodeId, flag } + GenDentry(1, 0, "A", 0, 1, 0), + GenDentry(1, 0, "B", 0, 2, 0), + GenDentry(1, 2, "A", 0, 3, 0), + }); // step-1: prepare tx success (rename A B) - auto dentrys = std::vector { + auto dentrys = std::vector{ // { fsId, parentId, name, txId, inodeId, flag } GenDentry(1, 0, "A", 1, 1, FILE_FLAG | DELETE_FLAG), GenDentry(1, 2, "A", 1, 1, FILE_FLAG), }; - auto rc = txManager_->HandleRenameTx(dentrys); + auto rc = txManager_->HandleRenameTx(dentrys, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); ASSERT_EQ(dentryStorage_->Size(), 5); @@ -318,11 +324,11 @@ TEST_F(TransactionTest, HandleTxWithTargetExist) { ASSERT_EQ(dentryStorage_->Size(), 5); // step-4: prepare a new tx success with commit - dentrys = std::vector { + dentrys = std::vector{ // { fsId, parentId, name, txId, inodeId, deleteMarkFlag } GenDentry(1, 0, "C", 2, 4, 0), }; - rc = txManager_->HandleRenameTx(dentrys); + rc = txManager_->HandleRenameTx(dentrys, logIndex_++); ASSERT_EQ(rc, MetaStatusCode::OK); // step-5: check dentrys @@ -332,8 +338,8 @@ TEST_F(TransactionTest, HandleTxWithTargetExist) { ASSERT_EQ(rc, MetaStatusCode::OK); ASSERT_EQ(dentrys.size(), 1); ASSERT_DENTRYS_EQ(dentrys, std::vector{ - GenDentry(1, 0, "B", 0, 2, 0), - }); + GenDentry(1, 0, "B", 0, 2, 0), + }); dentrys.clear(); dentry = GenDentry(1, 2, "", 1, 0, 0); @@ -341,8 +347,8 @@ TEST_F(TransactionTest, HandleTxWithTargetExist) { ASSERT_EQ(rc, MetaStatusCode::OK); ASSERT_EQ(dentrys.size(), 1); ASSERT_DENTRYS_EQ(dentrys, std::vector{ - GenDentry(1, 2, "A", 1, 1, FILE_FLAG), - }); + GenDentry(1, 2, "A", 1, 1, FILE_FLAG), + }); ASSERT_EQ(dentryStorage_->Size(), 3); // /B /B/A /C(pending) } diff --git a/curvefs/test/metaserver/trash_test.cpp b/curvefs/test/metaserver/trash_test.cpp index 3e130d3e90..56edf05f7a 100644 --- a/curvefs/test/metaserver/trash_test.cpp +++ b/curvefs/test/metaserver/trash_test.cpp @@ -31,14 +31,14 @@ #include "curvefs/test/client/rpcclient/mock_mds_client.h" #include "curvefs/test/metaserver/mock_metaserver_s3_adaptor.h" -using ::testing::AtLeast; -using ::testing::StrEq; using ::testing::_; +using ::testing::AtLeast; +using ::testing::DoAll; using ::testing::Return; using ::testing::ReturnArg; -using ::testing::DoAll; -using ::testing::SetArgPointee; using ::testing::SaveArg; +using ::testing::SetArgPointee; +using ::testing::StrEq; namespace curvefs { namespace metaserver { @@ -47,11 +47,11 @@ namespace { auto localfs = curve::fs::Ext4FileSystemImpl::getInstance(); } +using ::curvefs::client::rpcclient::MockMdsClient; using ::curvefs::metaserver::storage::KVStorage; -using ::curvefs::metaserver::storage::StorageOptions; -using ::curvefs::metaserver::storage::RocksDBStorage; using ::curvefs::metaserver::storage::RandomStoragePath; -using ::curvefs::client::rpcclient::MockMdsClient; +using ::curvefs::metaserver::storage::RocksDBStorage; +using ::curvefs::metaserver::storage::StorageOptions; class TestTrash : public ::testing::Test { protected: @@ -64,9 +64,10 @@ class TestTrash : public ::testing::Test { ASSERT_TRUE(kvStorage_->Open()); auto nameGenerator = std::make_shared(1); - inodeStorage_ = std::make_shared( - kvStorage_, nameGenerator, 0); + inodeStorage_ = + std::make_shared(kvStorage_, nameGenerator, 0); trashManager_ = std::make_shared(); + logIndex_ = 0; } void TearDown() override { @@ -137,6 +138,7 @@ class TestTrash : public ::testing::Test { std::shared_ptr kvStorage_; std::shared_ptr inodeStorage_; std::shared_ptr trashManager_; + int64_t logIndex_; }; TEST_F(TestTrash, testAdd3ItemAndDelete) { @@ -152,9 +154,9 @@ TEST_F(TestTrash, testAdd3ItemAndDelete) { trashManager_->Add(1, trash1); trashManager_->Add(2, trash2); - inodeStorage_->Insert(GenInodeHasChunks(1, 1)); - inodeStorage_->Insert(GenInodeHasChunks(1, 2)); - inodeStorage_->Insert(GenInodeHasChunks(2, 1)); + inodeStorage_->Insert(GenInodeHasChunks(1, 1), logIndex_++); + inodeStorage_->Insert(GenInodeHasChunks(1, 2), logIndex_++); + inodeStorage_->Insert(GenInodeHasChunks(2, 1), logIndex_++); ASSERT_EQ(inodeStorage_->Size(), 3); @@ -185,9 +187,9 @@ TEST_F(TestTrash, testAdd3ItemAndNoDelete) { auto trash1 = std::make_shared(inodeStorage_); trashManager_->Add(1, trash1); - inodeStorage_->Insert(GenInode(1, 1)); - inodeStorage_->Insert(GenInode(1, 2)); - inodeStorage_->Insert(GenInode(2, 1)); + inodeStorage_->Insert(GenInode(1, 1), logIndex_++); + inodeStorage_->Insert(GenInode(1, 2), logIndex_++); + inodeStorage_->Insert(GenInode(2, 1), logIndex_++); ASSERT_EQ(inodeStorage_->Size(), 3); trash1->Add(1, 1, 0); trash1->Add(1, 2, 0); diff --git a/curvefs_python/configure.sh b/curvefs_python/configure.sh index ce6f86b1b6..159f21eef4 100755 --- a/curvefs_python/configure.sh +++ b/curvefs_python/configure.sh @@ -35,6 +35,12 @@ if [ $# -ge 1 ]; then PYTHON_VER=$1 fi +bazelbin="bazel-bin" +if [ -n "$BAZEL_BIN" ]; then + bazelbin=$BAZEL_BIN +fi +echo "bazel-bin path is $bazelbin" + echo "configure for ${PYTHON_VER}" if [ "${PYTHON_VER}" = "python2" ] || [ "${PYTHON_VER}" = "python3" ]; then @@ -61,7 +67,7 @@ libs=`cat BUILD | tr -d "[:blank:]" | grep "^\"-l" | sed 's/[",]//g' | awk '{ pr rm -rf tmplib mkdir tmplib -for i in `find $curve_path/bazel-bin/|grep -w so|grep -v solib|grep -v params` +for i in `find $curve_path/$bazelbin/|grep -w so|grep -v solib|grep -v params` do basename=$(basename $i) linkname=`echo $basename | awk -F'.' '{ print $1 }' | awk '{ print substr($0, 4) }'` diff --git a/docker/openeuler/Dockerfile b/docker/openeuler/Dockerfile new file mode 100644 index 0000000000..75b5e634fb --- /dev/null +++ b/docker/openeuler/Dockerfile @@ -0,0 +1,21 @@ +FROM opencurvedocker/curve-base:openeuler +ENV TZ=Asia/Shanghai +COPY --from=opencurvedocker/curve-base:curve-tgt-openeuler /curve-tgt/ /curve-tgt/ +COPY --from=opencurvedocker/curve-base:curve-tgt-openeuler /curve/curve-sdk /curve-tgt/curve-sdk +RUN cd /curve-tgt/curve-sdk && \ + cp -f lib/* /usr/lib && \ + cp -f bin/* /usr/bin && \ + mkdir -p /usr/curvefs && \ + cp -f curvefs/* /usr/curvefs && \ + cp -f include/* /usr/include && \ + ldconfig && \ + cd /curve-tgt/ && \ + make install-programs && \ + rm -rf /curve-tgt +COPY curvebs /curvebs +RUN mkdir -p /etc/curve /etc/nebd /curve/init.d/ && \ + chmod a+x /entrypoint.sh && \ + cp /curvebs/nbd/sbin/curve-nbd /usr/bin/ && \ + cp /curvebs/tools/sbin/curve_ops_tool /usr/bin/ && \ + cp /curvebs/tools-v2/sbin/curve /usr/bin/ +ENTRYPOINT ["/entrypoint.sh"] diff --git a/docker/openeuler/base/Dockerfile b/docker/openeuler/base/Dockerfile new file mode 100644 index 0000000000..c4089ea7d9 --- /dev/null +++ b/docker/openeuler/base/Dockerfile @@ -0,0 +1,19 @@ +FROM openeuler/openeuler:22.03-lts-sp2 +ENV GITHUB_PROXY=https://ghproxy.com/ +ENV ETCD_VERSION=v3.4.0 \ + DOWNLOAD_ETCD_URL=${GITHUB_PROXY}https://github.com/etcd-io/etcd/releases/download \ + GOPROXY=https://goproxy.io,direct \ + PROTOC_VERSION=21.8 \ + GO_VERSION=1.19.6 \ + PROTOC_GEN_GO_VERSION=v1.28 \ + PROTOC_GEN_GO_GRPC_VERSION=v1.2 +RUN yum update -y\ + && yum install -y jemalloc libevent fuse3 snappy zlib openssl libnl3 libuuid libcurl boost libunwind musl-libc nginx wget make gcc gcc-c++ util-linux\ + && wget https://curve-build.nos-eastchina1.126.net/libfiu-1.00.tar.gz \ + && tar -zxvf libfiu-1.00.tar.gz && cd libfiu-1.00 && make libfiu \ + && cp libfiu/libfiu.so.1.00 /usr/lib && ln -s libfiu.so.1.00 libfiu.so\ + && cp libfiu/*.h /usr/include \ + && cd .. && rm -rf libfiu-1.00 \ + && ln -s /usr/lib64/libjemalloc.so.2 /usr/local/lib/libjemalloc.so +ONBUILD COPY entrypoint.sh / +ENTRYPOINT ["/entrypoint.sh"] diff --git a/docker/openeuler/base/Makefile b/docker/openeuler/base/Makefile new file mode 100644 index 0000000000..8ae42dc338 --- /dev/null +++ b/docker/openeuler/base/Makefile @@ -0,0 +1,4 @@ +.PHONY: build + +build: + docker build -t opencurvedocker/curve-base:openeuler . \ No newline at end of file diff --git a/docker/openeuler/compile/Dockerfile b/docker/openeuler/compile/Dockerfile new file mode 100644 index 0000000000..d9b9a23bc1 --- /dev/null +++ b/docker/openeuler/compile/Dockerfile @@ -0,0 +1,36 @@ +FROM openeuler/openeuler:22.03-lts-sp2 +ENV GITHUB_PROXY=https://ghproxy.com/ +ENV ETCD_VERSION=v3.4.0 \ + DOWNLOAD_ETCD_URL=${GITHUB_PROXY}https://github.com/etcd-io/etcd/releases/download \ + GOPROXY=https://goproxy.io,direct \ + PROTOC_VERSION=21.8 \ + GO_VERSION=1.19.6 \ + PROTOC_GEN_GO_VERSION=v1.28 \ + PROTOC_GEN_GO_GRPC_VERSION=v1.2 +RUN yum update -y\ + && yum install -y gcc gcc-c++ bison bison-devel python3-devel python3-pip flex flex-devel cmake make wget curl git protobuf-compiler patch libevent-devel musl-gcc\ + && yum install -y unzip which zlib zlib-devel openssl openssl-devel libnl3 libnl3-devel libuuid libuuid-devel libcurl-devel boost boost-devel wget cmake tar \ + && yum install -y libunwind libunwind-devel +RUN wget https://github.com/bazelbuild/bazel/releases/download/4.2.2/bazel-4.2.2-installer-linux-x86_64.sh && bash bazel-4.2.2-installer-linux-x86_64.sh && rm bazel-4.2.2-installer-linux-x86_64.sh -f && cd /usr/local/ && wget https://golang.google.cn/dl/go1.20.6.linux-amd64.tar.gz && tar xzvf go1.20.6.linux-amd64.tar.gz && rm go1.20.6.linux-amd64.tar.gz -rf + +RUN wget https://curve-build.nos-eastchina1.126.net/libfiu-1.00.tar.gz && tar -zxvf libfiu-1.00.tar.gz && cd libfiu-1.00 && make libfiu && cp libfiu/*.h /usr/include && cp libfiu/libfiu.so.1.00 /usr/lib && ln -s /usr/lib/libfiu.so.1.00 /usr/lib/libfiu.so && cd .. &&rm -rf libfiu-1.00* +RUN wget https://curve-build.nos-eastchina1.126.net/memcache/memcached-1.6.17.tar.gz \ + && tar xvf memcached-1.6.17.tar.gz && cd memcached-1.6.17/ \ + && ./configure && make -j&& make install \ + && cd .. && rm -rf memcached-1.6.17 memcached-1.6.17.tar.gz \ + && yum install -y fuse3 fuse3-devel lz4-devel snappy-devel + + +RUN wget https://www.python.org/ftp/python/2.7.18/Python-2.7.18.tgz && tar xvf Python-2.7.18.tgz && \ + cd Python-2.7.18 &&\ + ./configure --prefix=/usr/ && \ + make all -j $(nproc)&&\ + make altinstall &&\ + ln -s /usr/bin/python2.7 /usr/bin/python && \ + ln -s /usr/bin/python2.7 /usr/bin/python2 && \ + python3 -m pip install -i https://pypi.tuna.tsinghua.edu.cn/simple wheel +RUN wget https://curve-build.nos-eastchina1.126.net/memcache/libmemcached-1.1.2.tar.gz && tar -zxf libmemcached-1.1.2.tar.gz && cd libmemcached-1.1.2 && mkdir build-libmemcached && cd build-libmemcached && cmake .. && make libmemcached -j && make install +RUN export GOPATH=/usr/local/go && export PATH=$PATH:$GOPATH/bin +RUN echo -e "export GOPATH=/usr/local/go\nexport PATH=\$PATH:\$GOPATH/bin" >> ~/.bashrc +ENV GOPATH=/usr/local/go \ + PATH=$PATH:$GOPATH/bin diff --git a/docker/openeuler/compile/Makefile b/docker/openeuler/compile/Makefile new file mode 100644 index 0000000000..45e2f85b07 --- /dev/null +++ b/docker/openeuler/compile/Makefile @@ -0,0 +1,4 @@ +.PHONY: build + +build: + docker build -t opencurvedocker/curve-base:build-openeuler . \ No newline at end of file diff --git a/docker/openeuler/curve-tgt/Dockerfile b/docker/openeuler/curve-tgt/Dockerfile new file mode 100644 index 0000000000..a3b9866e61 --- /dev/null +++ b/docker/openeuler/curve-tgt/Dockerfile @@ -0,0 +1,30 @@ +FROM opencurvedocker/curve-base:build-openeuler AS curve-sdk +ENV GITHUB_PROXY=https://ghproxy.com/ + +RUN git clone https://github.com/opencurve/curve && \ + cd /curve && git checkout a9db5df70898dad53c76369ed0f732477f3e4c4f \ + && cp thirdparties/brpc/fix-gcc11.patch / \ + && git checkout a14bcd3ecdfcedd9398cbe74edf17b8173e417a0 \ + && mv /fix-gcc11.patch thirdparties/brpc/fix-gcc11.patch \ + && sed -i "s;//:thirdparties/brpc/brpc.patch;//:thirdparties/brpc/brpc.patch\",\"//:thirdparties/brpc/fix-gcc11.patch;g" WORKSPACE +RUN cd /curve && sed -i "s;https://gitee.com/mirrors/etcd;https://github.com/etcd-io/etcd;g" thirdparties/etcdclient/Makefile && make dep stor=bs +RUN cd /curve && make dep stor=fs +RUN wget https://bootstrap.pypa.io/pip/2.7/get-pip.py && python2 get-pip.py && python2 -m pip install wheel setuptools +RUN cd /curve && bash -x mk-tar.sh +RUN cd /curve && mv curve_*.tar.gz curve_sdk.tar.gz +RUN cd /curve && ls ./curvefs_python/tmplib/ +FROM opencurvedocker/curve-base:build-openeuler AS curve-tgt +ENV GITHUB_PROXY=https://ghproxy.com/ +COPY --from=curve-sdk /curve/curve_sdk.tar.gz / +RUN tar -zxvf curve_sdk.tar.gz && \ + rm curve_sdk.tar.gz && \ + cd /curve/curve-sdk && \ + cp -f lib/* /usr/lib && \ + cp -f bin/* /usr/bin && \ + mkdir -p /usr/curvefs && \ + cp -f curvefs/* /usr/curvefs && \ + cp -f include/* /usr/include && \ + cd / && \ + git clone --branch curve ${GITHUB_PROXY}https://github.com/opencurve/curve-tgt && \ + cd curve-tgt && \ + make programs diff --git a/docker/openeuler/curve-tgt/Makefile b/docker/openeuler/curve-tgt/Makefile new file mode 100644 index 0000000000..d0303e696d --- /dev/null +++ b/docker/openeuler/curve-tgt/Makefile @@ -0,0 +1,7 @@ +.PHONY: build + +curve-sdk: + docker build --target curve-tgt -t opencurvedocker/curve-base:curve-tgt-openeuler . + +build: + docker build --target curve-tgt -t opencurvedocker/curve-base:curve-tgt-openeuler . \ No newline at end of file diff --git a/docker/openeuler/entrypoint.sh b/docker/openeuler/entrypoint.sh new file mode 100644 index 0000000000..476cc65f3c --- /dev/null +++ b/docker/openeuler/entrypoint.sh @@ -0,0 +1,137 @@ +#!/usr/bin/env bash + +# Copyright (C) 2021 Jingli Chen (Wine93), NetEase Inc. + +############################ GLOBAL VARIABLES +g_role="" +g_args="" +g_prefix="" +g_preexec="/curvebs/tools-v2/sbin/daemon" +g_binary="" +g_start_args="" + +############################ BASIC FUNCTIONS +function msg() { + printf '%b' "$1" >&2 +} + +function success() { + msg "\33[32m[✔]\33[0m ${1}${2}" +} + +function die() { + msg "\33[31m[✘]\33[0m ${1}${2}" + exit 1 +} + +############################ FUNCTIONS +function usage () { + cat << _EOC_ +Usage: + entrypoint.sh --role=ROLE + entrypoint.sh --role=ROLE --args=ARGS +Examples: + entrypoint.sh --role=etcd + entrypoint.sh --role=client --args="-o default_permissions" +_EOC_ +} + +function get_options() { + local long_opts="role:,args:,help" + local args=`getopt -o ra --long $long_opts -n "$0" -- "$@"` + eval set -- "${args}" + while true + do + case "$1" in + -r|--role) + g_role=$2 + shift 2 + ;; + -a|--args) + g_args=$2 + shift 2 + ;; + -h) + usage + exit 1 + ;; + --) + shift + break + ;; + *) + exit 1 + ;; + esac + done +} + +function prepare() { + g_prefix="/curvebs/$g_role" + conf_path="$g_prefix/conf/$g_role.conf" + + case $g_role in + etcd) + g_binary="$g_prefix/sbin/etcd" + g_start_args="--config-file $conf_path" + ;; + mds) + g_binary="$g_prefix/sbin/curvebs-mds" + g_start_args="--confPath $conf_path" + ;; + chunkserver) + g_binary="$g_prefix/sbin/curvebs-chunkserver" + g_start_args="--conf=$conf_path" + ;; + snapshotclone) + g_preexec="/usr/sbin/nginx -c $g_prefix/conf/nginx.conf" + g_binary="$g_prefix/sbin/curvebs-snapshotclone" + g_start_args="--conf=$conf_path" + ;; + nebd) + g_binary="$g_prefix/sbin/nebd-server" + g_start_args="-confPath=$g_prefix/conf/nebd-server.conf -log_dir=$g_prefix/logs" + ;; + monitor) + g_binary="python" + g_start_args="target_json.py" + ;; + *) + usage + exit 1 + ;; + esac + + if [ "$g_args" != "" ]; then + g_start_args=$g_args + fi +} + +function create_directory() { + chmod 700 "$g_prefix/data" + if [ "$g_role" == "etcd" ]; then + mkdir -p "$g_prefix/data/wal" + elif [ "$g_role" == "client" ]; then + mkdir -p "$g_prefix/mnt" + fi +} + +function main() { + get_options "$@" + + prepare + create_directory + [[ $(command -v crontab) ]] && cron + [[ ! -z $g_preexec ]] && $g_preexec & + if [ $g_role == "etcd" ]; then + exec $g_binary $g_start_args >>$g_prefix/logs/etcd.log 2>&1 + elif [ $g_role == "monitor" ]; then + cd $g_prefix + exec $g_binary $g_start_args + else + exec $g_binary $g_start_args + fi +} + +############################ MAIN() +main "$@" \ No newline at end of file diff --git a/docs/cn/build_and_run.md b/docs/cn/build_and_run.md index 128fa6ae90..11fdfcf47e 100644 --- a/docs/cn/build_and_run.md +++ b/docs/cn/build_and_run.md @@ -43,8 +43,8 @@ docker run --rm -v $(pwd):/curve -w /curve -v ${HOME}/.cache:${HOME}/.cache -v $ # (中国大陆可选)将外部依赖替换为国内下载点或镜像仓库,可以加快编译速度: bash replace-curve-repo.sh # curve v2.0 之前 -bash mk-tar.sh (编译 curvebs 并打tar包) -bash mk-deb.sh (编译 curvebs 并打debian包) +make tar dep=1 (编译 curvebs 并打tar包) +make deb dep=1 (编译 curvebs 并打debian包) # (当前)curve v2.0 及之后 # 编译 curvebs: @@ -57,7 +57,7 @@ make build stor=fs dep=1 make dep stor=fs && make build stor=fs ``` -**注意:** `mk-tar.sh` 和 `mk-deb.sh` 用于 curve v2.0 之前版本的编译打包,v2.0 版本之后不再维护。 +**注意:** `make tar` 和 `make deb` 用于 curve v2.0 之前版本的编译打包,v2.0 版本之后不再维护。 ## 在物理机上编译 @@ -83,8 +83,8 @@ Curve的其他依赖项,均由bazel去管理,不可单独安装。 git clone https://github.com/opencurve/curve.git 或者 git clone https://gitee.com/mirrors/curve.git # (中国大陆可选)将外部依赖替换为国内下载点或镜像仓库,可以加快下载速度: bash replace-curve-repo.sh # curve v2.0 之前 -bash mk-tar.sh (编译 curvebs 并打tar包) -bash mk-deb.sh (编译 curvebs 并打debian包) +make tar dep=1 (编译 curvebs 并打tar包) +make deb dep=1 (编译 curvebs 并打debian包) # (当前)curve v2.0 及之后 # 编译 curvebs: diff --git a/docs/en/build_and_run_en.md b/docs/en/build_and_run_en.md index c2037fd777..95e1228236 100644 --- a/docs/en/build_and_run_en.md +++ b/docs/en/build_and_run_en.md @@ -42,8 +42,8 @@ docker run --rm -v $(pwd):/curve -w /curve -v ${HOME}:${HOME} --user $(id -u ${U # (Optional for Chinese mainland) Replace external dependencies with domestic download points or mirror warehouses, which can speed up compilation: bash replace-curve-repo.sh # before curve v2.0 -bash mk-tar.sh (compile curvebs and make tar package) -bash mk-deb.sh (compile curvebs and make debian package) +make tar dep=1 (compile curvebs and make tar package) +make deb dep=1 (compile curvebs and make debian package) # (current) after curve v2.0 # compile curvebs: @@ -56,7 +56,7 @@ make build stor=fs dep=1 make dep stor=fs && make build stor=fs ``` -**Note:** `mk-tar.sh` and `mk-deb.sh` are used for compiling and packaging curve v2.0. They are no longer maintained after v2.0. +**Note:** `make tar` and `make deb` are used for compiling and packaging curve v2.0. They are no longer maintained after v2.0. ## Compile on a physical machine @@ -82,8 +82,8 @@ For dependencies, you can refer to the installation steps in [dockerfile](../../ git clone https://github.com/opencurve/curve.git or git clone https://gitee.com/mirrors/curve.git # (Mainland China optional) Replace external dependencies with domestic download points or mirror warehouses, which can speed up compilation: bash replace-curve-repo.sh # before curve v2.0 -bash mk-tar.sh (compile curvebs and make tar package) -bash mk-deb.sh (compile curvebs and make debian package) +make tar dep=1 (compile curvebs and make tar package) +make deb dep=1 (compile curvebs and make debian package) # (current) after curve v2.0 # compile curvebs: diff --git a/docs/practical/curvebs_csi.md b/docs/practical/curvebs_csi.md index 26029b1caa..9ec0cbe813 100644 --- a/docs/practical/curvebs_csi.md +++ b/docs/practical/curvebs_csi.md @@ -23,7 +23,7 @@ # 编译curve release2.5 $ cd /编译路径/curve $ bash replace-curve-repo.sh - $ bash mk-tar.sh + $ make tar dep=1 # 编译完成所需的二进制和ansile脚本,用于部署curvebs客户端 $ cd /编译路径/curve/build $ ls diff --git a/mk-deb.sh b/mk-deb.sh deleted file mode 100755 index 5e3e3a7935..0000000000 --- a/mk-deb.sh +++ /dev/null @@ -1,352 +0,0 @@ -#!/bin/bash - -# -# Copyright (c) 2020 NetEase Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -if ! grep -iq debian /etc/os-release; then - echo "$0 only support Debian" -fi - -set -o errexit - -dir=$(pwd) - -# step1 清除生成的目录和文件 -bazel clean - -cleandir=( - curvefs_python/BUILD - curvefs_python/tmplib/ - curvesnapshot_python/BUILD - curvesnapshot_python/tmplib/ - *.deb - *.whl - *.tar.gz - build -) - -rm -rf "${cleandir[@]}" - -git submodule update --init - -# step2 获取tag版本和git提交版本信息 -# 获取tag版本 -tag_version=$(git status | grep -Ew "HEAD detached at|On branch" | awk '{print $NF}' | awk -F"v" '{print $2}') -if [ -z ${tag_version} ]; then - echo "not found version info, set version to 9.9.9" - tag_version=9.9.9 -fi - -# 获取git提交版本信息 -commit_id=$(git rev-parse --short HEAD) -if [ "$1" = "debug" ]; then - debug="+debug" -else - debug="" -fi - -curve_version=${tag_version}+${commit_id}${debug} - -function create_python_wheel() { - local PYTHON_VER=$(basename $1) - local curdir=$(pwd) - local basedir="build/curvefs_${PYTHON_VER}/" - - mkdir -p ${basedir}/tmplib - mkdir -p ${basedir}/curvefs - - cp ./curvefs_python/tmplib/* ${basedir}/tmplib - cp ./curvefs_python/setup.py ${basedir}/setup.py - cp ./curvefs_python/__init__.py ${basedir}/curvefs - cp ./curvefs_python/curvefs.py ${basedir}/curvefs - cp ./bazel-bin/curvefs_python/libcurvefs.so ${basedir}/curvefs/_curvefs.so - - cd ${basedir} - sed -i "s/version-anchor/${curve_version}/g" setup.py - - deps=$(ldd curvefs/_curvefs.so | awk '{ print $1 }' | sed '/^$/d') - for i in $(find tmplib/ -name "lib*so"); do - basename=$(basename $i) - if [[ $deps =~ $basename ]]; then - echo $i - cp $i curvefs - fi - done - - ${1} setup.py bdist_wheel - cp dist/*whl ${curdir} - - cd ${curdir} -} - -function build_curvefs_python() { - for bin in "/usr/bin/python3" "/usr/bin/python2"; do - if [ ! -f ${bin} ]; then - echo "${bin} not exist" - continue - fi - - if ! bash ./curvefs_python/configure.sh $(basename ${bin}); then - echo "configure for ${bin} failed" - continue - fi - - # backup and recover python depends shared libraries - mkdir -p ./build/py_deps_libs - cp ./curvefs_python/tmplib/* ./build/py_deps_libs/ - cp ./build/py_deps_libs/* ./curvefs_python/tmplib/ - - rm -rf ./bazel-bin/curvefs_python - - if [ "$1" = "release" ]; then - bazel build curvefs_python:curvefs --copt -DHAVE_ZLIB=1 --copt -O2 \ - --define=with_glog=true --define=libunwind=true --copt -DGFLAGS_NS=google \ - --copt -Wno-error=format-security --copt -DUSE_BTHREAD_MUTEX --linkopt \ - -L${dir}/curvefs_python/tmplib/ --copt -DCURVEVERSION=${curve_version} \ - ${bazelflags} - else - bazel build curvefs_python:curvefs --copt -DHAVE_ZLIB=1 --compilation_mode=dbg \ - --define=with_glog=true --define=libunwind=true --copt -DGFLAGS_NS=google \ - --copt -Wno-error=format-security --copt -DUSE_BTHREAD_MUTEX --linkopt \ - -L${dir}/curvefs_python/tmplib/ --copt -DCURVEVERSION=${curve_version} \ - ${bazelflags} - fi - - create_python_wheel ${bin} - done -} - -# step3 执行编译 -bazel_version=$(bazel version | grep "Build label" | awk '{print $3}') -if [ -z ${bazel_version} ]; then - echo "please install bazel 4.2.2 first" - exit 1 -fi -if [ ${bazel_version} != "4.2.2" ]; then - echo "bazel version must be 4.2.2" - echo "current version is ${bazel_version}" - exit 1 -fi -echo "bazel version : ${bazel_version}" - -# check gcc version, gcc version must >= 4.8.5 -gcc_version_major=$(gcc -dumpversion | awk -F'.' '{print $1}') -gcc_version_minor=$(gcc -dumpversion | awk -F'.' '{print $2}') -gcc_version_pathlevel=$(gcc -dumpversion | awk -F'.' '{print $3}') -if [ ${gcc_version_major} -lt 4 ]; then - echo "gcc version must >= 4.8.5, current version is "$(gcc -dumpversion) - exit 1 -fi - -if [[ ${gcc_version_major} -eq 4 ]] && [[ ${gcc_version_minor} -lt 8 ]]; then - echo "gcc version must >= 4.8.5, current version is "$(gcc -dumpversion) - exit 1 -fi - -if [[ ${gcc_version_major} -eq 4 ]] && [[ ${gcc_version_minor} -eq 8 ]] && [[ ${gcc_version_pathlevel} -lt 5 ]]; then - echo "gcc version must >= 4.8.5, current version is "$(gcc -dumpversion) - exit 1 -fi -echo "gcc version : "$(gcc -dumpversion) - -echo "start compiling" - -cd ${dir}/thirdparties/etcdclient && - make clean && - make all && - cd $OLDPWD - -cp ${dir}/thirdparties/etcdclient/libetcdclient.h ${dir}/include/etcdclient/etcdclient.h - -if [ $(gcc -dumpversion | awk -F'.' '{print $1}') -le 6 ]; then - bazelflags='--copt -w' -else - bazelflags='--copt -w --cxxopt -faligned-new' -fi - -if [ "$1" = "debug" ]; then - make build stor=bs release=0 dep=1 only=src/* - - fail_count=0 - for python in "python2" "python3"; do - if ! bash ./curvefs_python/configure.sh ${python}; then - echo "configure ${python} failed" - let fail_count++ - fi - done - - if [[ $fail_count -ge 2 ]]; then - echo "configure python2/3 failed" - exit - fi - - bazel build curvefs_python:curvefs --copt -DHAVE_ZLIB=1 --compilation_mode=dbg \ - --define=with_glog=true --define=libunwind=true --copt -DGFLAGS_NS=google \ - --copt \ - -Wno-error=format-security --copt -DUSE_BTHREAD_MUTEX --linkopt \ - -L${dir}/curvefs_python/tmplib/ --copt -DCURVEVERSION=${curve_version} \ - --linkopt -L/usr/local/lib ${bazelflags} -else - make build stor=bs release=1 dep=1 only=src/* - - fail_count=0 - for python in "python2" "python3"; do - if ! bash ./curvefs_python/configure.sh ${python}; then - echo "configure ${python} failed" - let fail_count++ - fi - done - - if [[ $fail_count -ge 2 ]]; then - echo "configure python2/3 failed" - exit - fi - - bazel build curvefs_python:curvefs --copt -DHAVE_ZLIB=1 --copt -O2 \ - --define=with_glog=true --define=libunwind=true --copt -DGFLAGS_NS=google \ - --copt \ - -Wno-error=format-security --copt -DUSE_BTHREAD_MUTEX --linkopt \ - -L${dir}/curvefs_python/tmplib/ --copt -DCURVEVERSION=${curve_version} \ - --linkopt -L/usr/local/lib ${bazelflags} -fi -echo "end compile" - -#step4 创建临时目录,拷贝二进制、lib库和配置模板 -mkdir build -cp -r curve-mds build/ -cp -r curve-chunkserver build/ - -cp -r curve-sdk build/ -cp -r curve-tools build/ -cp -r curve-monitor build/ -cp -r curve-snapshotcloneserver build/ -cp -r curve-nginx build/ - -mkdir -p build/curve-mds/usr/bin - -mkdir -p build/curve-mds/etc/curve -mkdir -p build/curve-mds/usr/lib -mkdir -p build/curve-tools/usr/bin -cp ./bazel-bin/src/mds/main/curvemds build/curve-mds/usr/bin/curve-mds -cp thirdparties/etcdclient/libetcdclient.so \ - build/curve-mds/usr/lib/libetcdclient.so -cp ./bazel-bin/tools/curvefsTool build/curve-mds/usr/bin/curve-tool -cp -r tools/snaptool build/curve-tools/usr/bin/snaptool-lib -cp tools/snaptool/snaptool build/curve-tools/usr/bin/snaptool -chmod a+x build/curve-tools/usr/bin/snaptool -cp ./bazel-bin/src/tools/curve_tool \ - build/curve-tools/usr/bin/curve_ops_tool -mkdir -p build/curve-chunkserver/usr/bin -mkdir -p build/curve-chunkserver/etc/curve -cp ./bazel-bin/src/chunkserver/chunkserver \ - build/curve-chunkserver/usr/bin/curve-chunkserver -cp ./bazel-bin/src/tools/curve_chunkserver_tool \ - build/curve-chunkserver/usr/bin/curve_chunkserver_tool - -cp ./bazel-bin/src/tools/curve_format \ - build/curve-chunkserver/usr/bin/curve-format - -mkdir -p build/curve-sdk/usr/curvefs -mkdir -p build/curve-sdk/usr/bin -mkdir -p build/curve-sdk/etc/curve -mkdir -p build/curve-sdk/usr/lib -mkdir -p build/curve-sdk/usr/include -cp ./bazel-bin/curvefs_python/libcurvefs.so \ - build/curve-sdk/usr/curvefs/_curvefs.so -cp curvefs_python/curvefs.py build/curve-sdk/usr/curvefs/curvefs.py -cp curvefs_python/__init__.py build/curve-sdk/usr/curvefs/__init__.py -cp curvefs_python/curvefs_tool.py build/curve-sdk/usr/curvefs/curvefs_tool.py -cp curvefs_python/parser.py build/curve-sdk/usr/curvefs/parser.py -cp curvefs_python/curve build/curve-sdk/usr/bin/curve -chmod a+x build/curve-sdk/usr/bin/curve -cp curvefs_python/tmplib/* build/curve-sdk/usr/lib/ -cp include/client/libcurve.h build/curve-sdk/usr/include -cp include/client/libcbd.h build/curve-sdk/usr/include -cp include/client/libcurve_define.h build/curve-sdk/usr/include -mkdir -p build/curve-monitor/etc/curve/monitor -cp -r monitor/* build/curve-monitor/etc/curve/monitor -mkdir -p build/curve-snapshotcloneserver/usr/bin -cp ./bazel-bin/src/snapshotcloneserver/snapshotcloneserver \ - build/curve-snapshotcloneserver/usr/bin/curve-snapshotcloneserver - -mkdir -p build/curve-nginx/etc/curve/nginx/app/etc -mkdir -p build/curve-nginx/etc/curve/nginx/conf -# step 4.1 prepare for nebd-package -cp -r nebd/nebd-package build/ -mkdir -p build/nebd-package/usr/include/nebd -mkdir -p build/nebd-package/usr/bin -mkdir -p build/nebd-package/usr/lib/nebd - -mkdir -p k8s/nebd/nebd-package/usr/bin -cp nebd/nebd-package/usr/bin/nebd-daemon k8s/nebd/nebd-package/usr/bin -sed -i '/^baseLogPath=/cbaseLogPath=/var/log/nebd' k8s/nebd/nebd-package/usr/bin/nebd-daemon -cp -r k8s/nebd/nebd-package build/k8s-nebd-package -mkdir -p build/k8s-nebd-package/usr/bin -mkdir -p build/k8s-nebd-package/usr/lib/nebd - -for i in $(find bazel-bin/ | grep -w so | grep -v solib | grep -v params | grep -v test | grep -v fake); do - cp -f $i build/nebd-package/usr/lib/nebd - cp -f $i build/k8s-nebd-package/usr/lib/nebd -done - -cp nebd/src/part1/libnebd.h build/nebd-package/usr/include/nebd -cp bazel-bin/nebd/src/part2/nebd-server build/nebd-package/usr/bin -cp bazel-bin/nebd/src/part2/nebd-server build/k8s-nebd-package/usr/bin - -# step 4.2 prepare for curve-nbd package -cp -r nbd/nbd-package build -mkdir -p build/nbd-package/usr/bin -cp bazel-bin/nbd/src/curve-nbd build/nbd-package/usr/bin - -cp -r k8s/nbd/nbd-package build/k8s-nbd-package -mkdir -p build/k8s-nbd-package/usr/bin -cp bazel-bin/nbd/src/curve-nbd build/k8s-nbd-package/usr/bin - -#step5 记录到debian包的配置文件,打包debian包 -debian_version=$(grep VERSION_ID /etc/os-release | cut -d '=' -f 2 | tr -d '"') -version="Version: ${curve_version}+deb${debian_version}" - -echo ${version} >> build/curve-mds/DEBIAN/control -echo ${version} >> build/curve-sdk/DEBIAN/control -echo ${version} >> build/curve-chunkserver/DEBIAN/control -echo ${version} >> build/curve-tools/DEBIAN/control -echo ${version} >> build/curve-monitor/DEBIAN/control -echo ${version} >> build/curve-snapshotcloneserver/DEBIAN/control -echo ${version} >> build/curve-nginx/DEBIAN/control -echo ${version} >> build/nebd-package/DEBIAN/control -echo ${version} >> build/k8s-nebd-package/DEBIAN/control -echo ${version} >> build/nbd-package/DEBIAN/control -echo ${version} >> build/k8s-nbd-package/DEBIAN/control - -dpkg-deb -b build/curve-mds . -dpkg-deb -b build/curve-sdk . -dpkg-deb -b build/curve-chunkserver . -dpkg-deb -b build/curve-tools . -dpkg-deb -b build/curve-monitor . -dpkg-deb -b build/curve-snapshotcloneserver . -dpkg-deb -b build/curve-nginx . -dpkg-deb -b build/nebd-package . -dpkg-deb -b build/k8s-nebd-package . -dpkg-deb -b build/nbd-package . -dpkg-deb -b build/k8s-nbd-package . - -# step6 清理libetcdclient.so编译出现的临时文件 -cd ${dir}/thirdparties/etcdclient -make clean -cd ${dir} - -# step7 打包python wheel -build_curvefs_python $1 diff --git a/mk-tar.sh b/mk-tar.sh deleted file mode 100755 index 0bb25540c2..0000000000 --- a/mk-tar.sh +++ /dev/null @@ -1,333 +0,0 @@ -#!/bin/bash - -# -# Copyright (c) 2020 NetEase Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -dir=$(pwd) - -# step1 清除生成的目录和文件 -bazel clean - -cleandir=( - curvefs_python/BUILD - curvefs_python/tmplib/ - curvesnapshot_python/BUILD - curvesnapshot_python/tmplib/ - *.deb - *.whl - *.tar.gz - build -) - -rm -rf "${cleandir[@]}" - -git submodule update --init - -# step2 获取tag版本和git提交版本信息 -# 获取tag版本 -tag_version=$(git status | grep -Ew "HEAD detached at|On branch" | awk '{print $NF}' | awk -F"v" '{print $2}') -if [ -z ${tag_version} ]; then - echo "not found version info, set version to 9.9.9" - tag_version=9.9.9 -fi - -# 获取git提交版本信息 -commit_id=$(git rev-parse --short HEAD) -if [ "$1" = "debug" ]; then - debug="+debug" -else - debug="" -fi - -curve_version=${tag_version}+${commit_id}${debug} - -function create_python_wheel() { - local PYTHON_VER=$(basename $1) - local curdir=$(pwd) - local basedir="build/curvefs_${PYTHON_VER}/" - - mkdir -p ${basedir}/tmplib - mkdir -p ${basedir}/curvefs - - cp ./curvefs_python/tmplib/* ${basedir}/tmplib - cp ./curvefs_python/setup.py ${basedir}/setup.py - cp ./curvefs_python/__init__.py ${basedir}/curvefs - cp ./curvefs_python/curvefs.py ${basedir}/curvefs - cp ./bazel-bin/curvefs_python/libcurvefs.so ${basedir}/curvefs/_curvefs.so - - cd ${basedir} - sed -i "s/version-anchor/${curve_version}/g" setup.py - - deps=$(ldd curvefs/_curvefs.so | awk '{ print $1 }' | sed '/^$/d') - for i in $(find tmplib/ -name "lib*so"); do - basename=$(basename $i) - if [[ $deps =~ $basename ]]; then - echo $i - cp $i curvefs - fi - done - - ${1} setup.py bdist_wheel - cp dist/*whl ${curdir} - - cd ${curdir} -} - -function build_curvefs_python() { - for bin in "/usr/bin/python3" "/usr/bin/python2"; do - if [ ! -f ${bin} ]; then - echo "${bin} not exist" - continue - fi - - if ! bash ./curvefs_python/configure.sh $(basename ${bin}); then - echo "configure for ${bin} failed" - continue - fi - - # backup and recover python depends shared libraries - mkdir -p ./build/py_deps_libs - cp ./curvefs_python/tmplib/* ./build/py_deps_libs/ - cp ./build/py_deps_libs/* ./curvefs_python/tmplib/ - - rm -rf ./bazel-bin/curvefs_python - - if [ "$1" = "release" ]; then - bazel build curvefs_python:curvefs --copt -DHAVE_ZLIB=1 --copt -O2 -s \ - --define=with_glog=true --define=libunwind=true --copt -DGFLAGS_NS=google \ - --copt -Wno-error=format-security --copt -DUSE_BTHREAD_MUTEX --linkopt \ - -L${dir}/curvefs_python/tmplib/ --copt -DCURVEVERSION=${curve_version} \ - ${bazelflags} - else - bazel build curvefs_python:curvefs --copt -DHAVE_ZLIB=1 --compilation_mode=dbg -s \ - --define=with_glog=true --define=libunwind=true --copt -DGFLAGS_NS=google \ - --copt -Wno-error=format-security --copt -DUSE_BTHREAD_MUTEX --linkopt \ - -L${dir}/curvefs_python/tmplib/ --copt -DCURVEVERSION=${curve_version} \ - ${bazelflags} - fi - - create_python_wheel ${bin} - done -} - -# step3 执行编译 -bazel_version=$(bazel version | grep "Build label" | awk '{print $3}') -if [ -z ${bazel_version} ]; then - echo "please install bazel 4.2.2 first" - exit 1 -fi -if [ ${bazel_version} != "4.2.2" ]; then - echo "bazel version must be 4.2.2" - echo "current version is ${bazel_version}" - exit 1 -fi -echo "bazel version : ${bazel_version}" - -# check gcc version, gcc version must >= 4.8.5 -gcc_version_major=$(gcc -dumpversion | awk -F'.' '{print $1}') -gcc_version_minor=$(gcc -dumpversion | awk -F'.' '{print $2}') -gcc_version_pathlevel=$(gcc -dumpversion | awk -F'.' '{print $3}') -if [ ${gcc_version_major} -lt 4 ]; then - echo "gcc version must >= 4.8.5, current version is "$(gcc -dumpversion) - exit 1 -fi - -if [[ ${gcc_version_major} -eq 4 ]] && [[ ${gcc_version_minor} -lt 8 ]]; then - echo "gcc version must >= 4.8.5, current version is "$(gcc -dumpversion) - exit 1 -fi - -if [[ ${gcc_version_major} -eq 4 ]] && [[ ${gcc_version_minor} -eq 8 ]] && [[ ${gcc_version_pathlevel} -lt 5 ]]; then - echo "gcc version must >= 4.8.5, current version is "$(gcc -dumpversion) - exit 1 -fi -echo "gcc version : "$(gcc -dumpversion) - -echo "start compiling" - -cd ${dir}/thirdparties/etcdclient && - make clean && - make all && - cd $OLDPWD - -cp ${dir}/thirdparties/etcdclient/libetcdclient.h ${dir}/include/etcdclient/etcdclient.h - -if [ $(gcc -dumpversion | awk -F'.' '{print $1}') -le 6 ]; then - bazelflags='' -else - bazelflags='--copt -faligned-new' -fi - -if [ "$1" = "debug" ]; then - make build stor=bs release=0 dep=1 only=src/* - - fail_count=0 - for python in "python2" "python3"; do - if ! bash ./curvefs_python/configure.sh ${python}; then - echo "configure ${python} failed" - let fail_count++ - fi - done - - if [[ $fail_count -ge 2 ]]; then - echo "configure python2/3 failed" - exit - fi - - bazel build curvefs_python:curvefs --copt -DHAVE_ZLIB=1 --compilation_mode=dbg -s \ - --define=with_glog=true --define=libunwind=true --copt -DGFLAGS_NS=google \ - --copt \ - -Wno-error=format-security --copt -DUSE_BTHREAD_MUTEX --linkopt \ - -L${dir}/curvefs_python/tmplib/ --copt -DCURVEVERSION=${curve_version} \ - --linkopt -L/usr/local/lib ${bazelflags} -else - make build stor=bs release=1 dep=1 only=src/* - - fail_count=0 - for python in "python2" "python3"; do - if ! bash ./curvefs_python/configure.sh ${python}; then - echo "configure ${python} failed" - let fail_count++ - fi - done - - if [[ $fail_count -ge 2 ]]; then - echo "configure python2/3 failed" - exit - fi - - bazel build curvefs_python:curvefs --copt -DHAVE_ZLIB=1 --copt -O2 -s \ - --define=with_glog=true --define=libunwind=true --copt -DGFLAGS_NS=google \ - --copt \ - -Wno-error=format-security --copt -DUSE_BTHREAD_MUTEX --linkopt \ - -L${dir}/curvefs_python/tmplib/ --copt -DCURVEVERSION=${curve_version} \ - --linkopt -L/usr/local/lib ${bazelflags} -fi -echo "end compile" - -#step4 创建临时目录,拷贝二进制、lib库和配置模板 -echo "start copy" -mkdir -p build/curve/ -# curve-mds -mkdir -p build/curve/curve-mds/bin -mkdir -p build/curve/curve-mds/lib -cp ./bazel-bin/src/mds/main/curvemds build/curve/curve-mds/bin/curve-mds -cp thirdparties/etcdclient/libetcdclient.so \ - build/curve/curve-mds/lib/libetcdclient.so -cp ./bazel-bin/tools/curvefsTool build/curve/curve-mds/bin/curve-tool -# curve-tools -mkdir -p build/curve/curve-tools/bin -cp ./bazel-bin/src/tools/curve_tool \ - build/curve/curve-tools/bin/curve_ops_tool -cp -r tools/snaptool build/curve/curve-tools/bin/snaptool-lib -cp tools/snaptool/snaptool build/curve/curve-tools/bin/snaptool -chmod a+x build/curve/curve-tools/bin/snaptool -# curve-chunkserver -mkdir -p build/curve/curve-chunkserver/bin -cp ./bazel-bin/src/chunkserver/chunkserver \ - build/curve/curve-chunkserver/bin/curve-chunkserver -cp ./bazel-bin/src/tools/curve_chunkserver_tool \ - build/curve/curve-chunkserver/bin/curve_chunkserver_tool -cp ./bazel-bin/src/tools/curve_format \ - build/curve/curve-chunkserver/bin/curve-format -# curve-sdk -mkdir -p build/curve/curve-sdk/curvefs -mkdir -p build/curve/curve-sdk/bin -mkdir -p build/curve/curve-sdk/lib -mkdir -p build/curve/curve-sdk/include -cp ./bazel-bin/curvefs_python/libcurvefs.so \ - build/curve/curve-sdk/curvefs/_curvefs.so -cp curvefs_python/curvefs.py build/curve/curve-sdk/curvefs/curvefs.py -cp curvefs_python/__init__.py build/curve/curve-sdk/curvefs/__init__.py -cp curvefs_python/curvefs_tool.py build/curve/curve-sdk/curvefs/curvefs_tool.py -cp curvefs_python/parser.py build/curve/curve-sdk/curvefs/parser.py -cp curvefs_python/curve build/curve/curve-sdk/bin/curve -chmod a+x build/curve/curve-sdk/bin/curve -cp curvefs_python/tmplib/* build/curve/curve-sdk/lib/ -cp include/client/libcurve.h build/curve/curve-sdk/include -cp include/client/libcbd.h build/curve/curve-sdk/include -cp include/client/libcurve_define.h build/curve/curve-sdk/include -# curve-snapshotcloneserver -mkdir -p build/curve/curve-snapshotcloneserver/bin -cp ./bazel-bin/src/snapshotcloneserver/snapshotcloneserver \ - build/curve/curve-snapshotcloneserver/bin/curve-snapshotcloneserver -mkdir -p build/curve/curve-snapshotcloneserver/lib -cp thirdparties/etcdclient/libetcdclient.so \ - build/curve/curve-snapshotcloneserver/lib/libetcdclient.so -# curve-nginx -mkdir -p build/curve/curve-nginx/app/etc -mkdir -p build/curve/curve-nginx/conf -# ansible -cp -r curve-ansible build/curve/ -# README - -# curve-monitor -mkdir -p build/curve-monitor -cp -r monitor/* build/curve-monitor/ -echo "end copy" - -# step 4.1 prepare for nebd-package -mkdir -p build/nebd-package/include/nebd -mkdir -p build/nebd-package/bin -mkdir -p build/nebd-package/lib/nebd - -for i in $(find bazel-bin/ | grep -w so | grep -v solib | grep -v params | grep -v test | grep -v fake); do - cp -f $i build/nebd-package/lib/nebd -done - -cp nebd/src/part1/libnebd.h build/nebd-package/include/nebd -cp bazel-bin/nebd/src/part2/nebd-server build/nebd-package/bin - -# step 4.2 prepare for curve-nbd package -mkdir -p build/nbd-package/bin -mkdir -p build/nbd-package/etc -cp bazel-bin/nbd/src/curve-nbd build/nbd-package/bin -cp nbd/nbd-package/usr/bin/map_curve_disk.sh build/nbd-package/bin -cp nbd/nbd-package/etc/curve/curvetab build/nbd-package/etc -cp nbd/nbd-package/etc/systemd/system/map_curve_disk.service build/nbd-package/etc - -# step5 打包tar包 -echo "start make tarball" -cd ${dir}/build -curve_name="curve_${curve_version}.tar.gz" -echo "curve_name: ${curve_name}" -tar zcf ${curve_name} curve -cp ${curve_name} $dir -monitor_name="curve-monitor_${curve_version}.tar.gz" -echo "monitor_name: ${monitor_name}" -tar zcf ${monitor_name} curve-monitor -cp ${monitor_name} $dir -nebd_name="nebd_${curve_version}.tar.gz" -echo "nebd_name: ${nebd_name}" -tar zcf ${nebd_name} nebd-package -cp ${nebd_name} $dir -nbd_name="nbd_${curve_version}.tar.gz" -echo "nbd_name: ${nbd_name}" -tar zcf ${nbd_name} nbd-package -cp ${nbd_name} $dir -echo "end make tarball" - -# step6 清理libetcdclient.so编译出现的临时文件 -echo "start clean etcd" -cd ${dir}/thirdparties/etcdclient -make clean -cd ${dir} -echo "end clean etcd" - -# step7 打包python wheel -echo "start make python wheel" -build_curvefs_python $1 -echo "end make python wheel" diff --git a/nebd/src/common/configuration.cpp b/nebd/src/common/configuration.cpp index 51ff19fcca..69a23ebe43 100644 --- a/nebd/src/common/configuration.cpp +++ b/nebd/src/common/configuration.cpp @@ -39,8 +39,7 @@ bool Configuration::LoadConfig() { // FIXME: may not remove middle spaces line.erase(std::remove_if(line.begin(), line.end(), isspace), line.end()); - if (line[0] == '#' || line.empty()) - continue; + if (line.empty() || line[0] == '#') continue; int delimiterPos = line.find("="); std::string key = line.substr(0, delimiterPos); diff --git a/nebd/src/part2/metafile_manager.cpp b/nebd/src/part2/metafile_manager.cpp index 23ce070f6e..6fcdc5c94b 100644 --- a/nebd/src/part2/metafile_manager.cpp +++ b/nebd/src/part2/metafile_manager.cpp @@ -205,7 +205,6 @@ int NebdMetaFileParser::Parse(Json::Value root, } for (const auto& volume : volumes) { - std::string fileName; NebdFileMeta meta; if (volume[kFileName].isNull()) { diff --git a/replace-curve-repo.sh b/replace-curve-repo.sh index 9acb0fc5ab..138066bd3f 100755 --- a/replace-curve-repo.sh +++ b/replace-curve-repo.sh @@ -50,5 +50,11 @@ sed -i "s;https://github.com/bazelbuild/platforms/archive/98939346da932eef0b54cf # rules_cc sed -i "s;https://github.com/bazelbuild/rules_cc/archive/9e10b8a6db775b1ecd358d8ddd3dab379a2c29a5.zip;https://curve-build.nos-eastchina1.126.net/rules_cc-9e10b8a6db775b1ecd358d8ddd3dab379a2c29a5.zip;g" WORKSPACE +# spdlog +sed -i "s;https://github.com/gabime/spdlog/archive/refs/tags/v1.11.0.tar.gz;https://curve-build.nos-eastchina1.126.net/spdlog-1.11.0.tar.gz;g" WORKSPACE + +# fmt +sed -i "s;https://github.com/fmtlib/fmt/archive/9.1.0.tar.gz;https://curve-build.nos-eastchina1.126.net/fmt-9.1.0.tar.gz;g" WORKSPACE + # curve-nbd sed -i "s;https://github.com/opencurve/curve-nbd;https://gitee.com/NetEase_Curve/curve-nbd;g" .gitmodules diff --git a/robot/init_env.sh b/robot/init_env.sh index b34dac337d..f67bc145f1 100644 --- a/robot/init_env.sh +++ b/robot/init_env.sh @@ -3,7 +3,7 @@ set -ex conf_url="/var/lib/jenkins/workspace/ansibe-conf" bash replace-curve-repo.sh -bash mk-tar.sh debug +make tar release=0 ls *.tar.gz | xargs -n1 tar xzvf \cp ${conf_url}/server.ini curve/curve-ansible/ \cp ${conf_url}/client.ini curve/curve-ansible/ diff --git a/scripts/ci/check_coverage.sh b/scripts/ci/check_coverage.sh new file mode 100755 index 0000000000..1edb63a6cb --- /dev/null +++ b/scripts/ci/check_coverage.sh @@ -0,0 +1,83 @@ +#!/usr/bin/env bash + +### Check coverage from coverage reports + +set -e + +if [ ! -d coverage ]; then + echo "coverage report not found" + exit -1 +fi + +check_repo_line_coverage() { + expected_coverage=$1 + actual_coverage=$(cat coverage/index.html | grep -A5 "Lines" | grep % | awk -F'>' '{ print $2 }' | awk '{ print $1 }') + + if (($(echo "$actual_coverage >= $expected_coverage" | bc -l))); then + echo "repo line coverage ratio: $actual_coverage" + else + echo "repo line coverage isn't ok, actual coverage ratio: $actual_coverage, expected coverage ratio: $expected_coverage" + exit -1 + fi +} + +check_repo_branch_coverage() { + expected_coverage=$1 + actual_coverage=$(cat coverage/index.html | grep -A5 "Branches" | grep % | awk -F'>' '{ print $2 }' | awk '{ print $1 }' | grep -v tr) + + if (($(echo "$actual_coverage >= $expected_coverage" | bc -l))); then + echo "repo branch coverage ratio: $actual_coverage" + else + echo "repo branch coverage isn'tvim ok, actual coverage ratio: $actual_coverage, expected coverage ratio: $expected_coverage" + exit -1 + fi +} + +check_module_branch_coverage() { + base_dir=$1 + expected_coverage=$2 + + find coverage/$base_dir -name index.html | xargs cat | grep -A5 "Branches" | grep % | awk -F'>' '{ print $2 }' | awk '{ print $1 }' | grep -v tr >dummy.all + if [ -s dummy.all ]; then + actual_coverage=$(cat dummy.all | awk '{ sum+= $1 } END { print sum/NR }') + rm dummy.all || true + else + actual_coverage=0 + fi + + if (($(echo "$actual_coverage >= $expected_coverage" | bc -l))); then + echo "$base_dir branch coverage ratio: ${actual_coverage}" + else + echo "$base_dir branch coverage is not ok, actual branch coverage ratio: ${actual_coverage}, expected branch coverage ratio: ${expected_coverage}" + exit -1 + fi +} +if [ $1 == "curvebs" ];then + +check_repo_branch_coverage 59 +check_repo_line_coverage 73 + +## two arguments are module and expected branch coverage ratio +check_module_branch_coverage "src/mds" 70 +check_module_branch_coverage "src/client" 78 +check_module_branch_coverage "src/chunkserver" 65 +check_module_branch_coverage "src/snapshotcloneserver" 65 +check_module_branch_coverage "src/common" 65 +check_module_branch_coverage "src/fs" 65 +check_module_branch_coverage "src/idgenerator" 79 +check_module_branch_coverage "src/kvstorageclient" 70 +check_module_branch_coverage "src/leader_election" 100 +check_module_branch_coverage "nebd" 75 + +elif [ $1 == "curvefs" ];then + +check_module_branch_coverage "mds" 59 +check_module_branch_coverage "client" 59 +check_module_branch_coverage "metaserver" 65 +check_module_branch_coverage "common" 16 +check_module_branch_coverage "tools" 0 +fi + +echo "Checking repo coverage succeeded!" + +exit 0 diff --git a/src/chunkserver/copyset_node_manager.cpp b/src/chunkserver/copyset_node_manager.cpp index 873f5334e9..78f4afec89 100755 --- a/src/chunkserver/copyset_node_manager.cpp +++ b/src/chunkserver/copyset_node_manager.cpp @@ -481,12 +481,13 @@ bool CopysetNodeManager::PurgeCopysetNodeData(const LogicPoolID &logicPoolId, << ToGroupIdString(logicPoolId, copysetId) << " persistently."; ret = false; + } else { + LOG(INFO) << "Move copyset" + << ToGroupIdString(logicPoolId, copysetId) + << "to trash success."; + copysetNodeMap_.erase(it); + ret = true; } - LOG(INFO) << "Move copyset" - << ToGroupIdString(logicPoolId, copysetId) - << "to trash success."; - copysetNodeMap_.erase(it); - ret = true; } } diff --git a/src/client/client_config.cpp b/src/client/client_config.cpp index 16f464685d..0be6f60e82 100644 --- a/src/client/client_config.cpp +++ b/src/client/client_config.cpp @@ -287,25 +287,25 @@ int ClientConfig::Init(const std::string& configpath) { // only client side need these follow 5 options ret = conf_.GetUInt32Value("csClientOpt.rpcTimeoutMs", - &fileServiceOption_.csClientOpt.rpcTimeoutMs); - LOG_IF(WARNING, ret = false) << "config no csClientOpt.rpcTimeoutMs info"; + &fileServiceOption_.csClientOpt.rpcTimeoutMs); + LOG_IF(WARNING, ret == false) << "config no csClientOpt.rpcTimeoutMs info"; ret = conf_.GetUInt32Value("csClientOpt.rpcMaxTry", - &fileServiceOption_.csClientOpt.rpcMaxTry); - LOG_IF(WARNING, ret = false) << "config no csClientOpt.rpcMaxTry info"; + &fileServiceOption_.csClientOpt.rpcMaxTry); + LOG_IF(WARNING, ret == false) << "config no csClientOpt.rpcMaxTry info"; ret = conf_.GetUInt32Value("csClientOpt.rpcIntervalUs", &fileServiceOption_.csClientOpt.rpcIntervalUs); - LOG_IF(WARNING, ret = false) << "config no csClientOpt.rpcIntervalUs info"; + LOG_IF(WARNING, ret == false) << "config no csClientOpt.rpcIntervalUs info"; ret = conf_.GetUInt32Value("csClientOpt.rpcMaxTimeoutMs", &fileServiceOption_.csClientOpt.rpcIntervalUs); - LOG_IF(WARNING, ret = false) + LOG_IF(WARNING, ret == false) << "config no csClientOpt.rpcMaxTimeoutMs info"; ret = conf_.GetUInt32Value("csBroadCasterOpt.broadCastMaxNum", &fileServiceOption_.csBroadCasterOpt.broadCastMaxNum); - LOG_IF(WARNING, ret = false) + LOG_IF(WARNING, ret == false) << "config no csBroadCasterOpt.broadCastMaxNum info"; return 0; diff --git a/src/common/configuration.cpp b/src/common/configuration.cpp index 4496045be6..0956423a3c 100644 --- a/src/common/configuration.cpp +++ b/src/common/configuration.cpp @@ -41,8 +41,7 @@ bool Configuration::LoadConfig() { // FIXME: may not remove middle spaces line.erase(std::remove_if(line.begin(), line.end(), isspace), line.end()); - if (line[0] == '#' || line.empty()) - continue; + if (line.empty() || line[0] == '#') continue; int delimiterPos = line.find("="); std::string key = line.substr(0, delimiterPos); diff --git a/src/common/s3_adapter.h b/src/common/s3_adapter.h index 67be6f7949..2adbbfb3bc 100644 --- a/src/common/s3_adapter.h +++ b/src/common/s3_adapter.h @@ -142,7 +142,8 @@ struct GetObjectAsyncContext : public Aws::Client::AsyncCallerContext { offset(offset), len(len), cb(std::move(cb)), - type(type) {} + type(type), + timer(butil::Timer::STARTED) {} }; /* @@ -171,7 +172,8 @@ struct PutObjectAsyncContext : public Aws::Client::AsyncCallerContext { buffer(buffer), bufferSize(bufferSize), cb(std::move(cb)), - type(type) {} + type(type), + timer(butil::Timer::STARTED) {} }; class S3Adapter { diff --git a/src/mds/schedule/leaderScheduler.cpp b/src/mds/schedule/leaderScheduler.cpp index 2a75e6e14b..557f796c91 100644 --- a/src/mds/schedule/leaderScheduler.cpp +++ b/src/mds/schedule/leaderScheduler.cpp @@ -157,7 +157,7 @@ bool LeaderScheduler::transferLeaderOut(ChunkServerIdType source, int count, candidateInfos.emplace_back(cInfo); } - if (candidateInfos.size() <= 0) { + if (candidateInfos.size() == 0) { return false; } @@ -241,7 +241,7 @@ bool LeaderScheduler::transferLeaderIn(ChunkServerIdType target, int count, } } - if (candidateInfos.size() <= 0) { + if (candidateInfos.size() == 0) { return false; } diff --git a/src/mds/schedule/recoverScheduler.cpp b/src/mds/schedule/recoverScheduler.cpp index 7876bd442c..0003275bc9 100644 --- a/src/mds/schedule/recoverScheduler.cpp +++ b/src/mds/schedule/recoverScheduler.cpp @@ -100,7 +100,7 @@ int RecoverScheduler::Schedule() { } } - if (offlinelists.size() <= 0) { + if (offlinelists.size() == 0) { continue; } @@ -206,7 +206,7 @@ void RecoverScheduler::CalculateExcludesChunkServer( continue; } - if (unhealthyStateCS.count(cs.info.serverId) <= 0) { + if (unhealthyStateCS.count(cs.info.serverId) == 0) { unhealthyStateCS[cs.info.serverId] = std::vector{cs.info.id}; } else { diff --git a/src/mds/schedule/scheduler.cpp b/src/mds/schedule/scheduler.cpp index 0117b8607b..05ca1e7d64 100644 --- a/src/mds/schedule/scheduler.cpp +++ b/src/mds/schedule/scheduler.cpp @@ -146,7 +146,6 @@ ChunkServerIdType Scheduler::SelectBestPlacementChunkServer( } // calculate the influence on scatter-width of other replicas - std::map> out; int source = UNINTIALIZE_ID; int target = cs.info.id; int affected = 0; diff --git a/src/mds/schedule/scheduler_helper.cpp b/src/mds/schedule/scheduler_helper.cpp index 3e0fa106c0..f85a388999 100644 --- a/src/mds/schedule/scheduler_helper.cpp +++ b/src/mds/schedule/scheduler_helper.cpp @@ -133,7 +133,7 @@ bool SchedulerHelper::SatisfyZoneAndScatterWidthLimit( } } - if (zoneList.count(targetZone) <= 0) { + if (zoneList.count(targetZone) == 0) { zoneList[targetZone] = 1; } else { zoneList[targetZone] += 1; @@ -246,7 +246,6 @@ void SchedulerHelper::CalculateAffectOfMigration( std::map> *scatterWidth) { // get scatter-width map and scatter-width of target std::map targetMap; - std::pair targetScatterWidth; if (target != UNINTIALIZE_ID) { topo->GetChunkServerScatterMap(target, &targetMap); (*scatterWidth)[target].first = targetMap.size(); @@ -254,7 +253,6 @@ void SchedulerHelper::CalculateAffectOfMigration( // get scatter-width map and scatter-width of the source std::map sourceMap; - std::pair sourceScatterWidth; if (source != UNINTIALIZE_ID) { topo->GetChunkServerScatterMap(source, &sourceMap); (*scatterWidth)[source].first = sourceMap.size(); @@ -272,14 +270,14 @@ void SchedulerHelper::CalculateAffectOfMigration( // if target was initialized if (target != UNINTIALIZE_ID) { // influence on target - if (targetMap.count(peer.id) <= 0) { + if (targetMap.count(peer.id) == 0) { targetMap[peer.id] = 1; } else { targetMap[peer.id]++; } // target's influence on other chunkservers - if (tmpMap.count(target) <= 0) { + if (tmpMap.count(target) == 0) { tmpMap[target] = 1; } else { tmpMap[target]++; diff --git a/src/tools/curve_format_main.cpp b/src/tools/curve_format_main.cpp index 795813cdfd..08aa1f62ed 100644 --- a/src/tools/curve_format_main.cpp +++ b/src/tools/curve_format_main.cpp @@ -107,7 +107,7 @@ using curve::chunkserver::FilePoolMeta; class CompareInternal { public: - bool operator()(std::string s1, std::string s2) { + bool operator()(const std::string& s1, const std::string& s2) { auto index1 = std::atoi(s1.c_str()); auto index2 = std::atoi(s2.c_str()); return index1 < index2; @@ -178,7 +178,7 @@ static int AllocateFiles(AllocateStruct* allocatestruct) { break; } - allocatestruct->fsptr->Close(fd); + ret = allocatestruct->fsptr->Close(fd); if (ret < 0) { *allocatestruct->checkwrong = true; LOG(ERROR) << "close failed, " << tmpchunkfilepath; diff --git a/src/tools/mds_client.cpp b/src/tools/mds_client.cpp index 22e807ea9e..7a119c77bc 100644 --- a/src/tools/mds_client.cpp +++ b/src/tools/mds_client.cpp @@ -207,9 +207,8 @@ int MDSClient::ListDir(const std::string& dirName, } return 0; } - std::cout << "ListDir fail with errCode: " - << response.statuscode() << std::endl; - return -1; + std::cout << "ListDir fail with errCode: " << response.statuscode() + << std::endl; return -1; } diff --git a/src/tools/status_tool.cpp b/src/tools/status_tool.cpp index e6bfc116a4..bb9e8f97b9 100644 --- a/src/tools/status_tool.cpp +++ b/src/tools/status_tool.cpp @@ -955,6 +955,7 @@ int StatusTool::PrintChunkserverStatus(bool checkLeftSize) { // check use walpool ret = CheckUseWalPool(poolChunkservers, &useWalPool, &useChunkFilePoolAsWalPool, metricClient_); + // get chunkserver left size std::map> poolChunkLeftSize; std::map> poolWalSegmentLeftSize; diff --git a/thirdparties/etcdclient/Makefile b/thirdparties/etcdclient/Makefile index d5149e70d4..18f694b1e3 100644 --- a/thirdparties/etcdclient/Makefile +++ b/thirdparties/etcdclient/Makefile @@ -18,6 +18,17 @@ SHELL=/bin/bash pwd := $(shell pwd) +# Download and build libetcdclient only when necessary, i.e., +# when dependent code is changed. +# Typically, this target should always be used to avoid unnecessary +# rebuilds. +build: libetcdclient.so libetcdclient.h expose-session-for-election.patch etcdclient.go objectManager.go + +libetcdclient.so libetcdclient.h: # do not use `clean' and `all' targets here as they will always be built + $(MAKE) clean all + +# Download and build libetcdclient, regardless of whether the code +# is changed or not. all: intall-go install-etcdclient libetcdclient intall-go: @@ -27,7 +38,7 @@ intall-go: install-etcdclient: mkdir -p $(pwd)/tmp/gosrc/src/go.etcd.io - cd $(pwd)/tmp/gosrc/src/go.etcd.io && git clone --branch v3.4.0 https://gitee.com/mirrors/etcd + cd $(pwd)/tmp/gosrc/src/go.etcd.io && git clone --branch v3.4.0 --depth=1 https://gitee.com/mirrors/etcd cd $(pwd)/tmp/gosrc/src/go.etcd.io/etcd && cp $(pwd)/expose-session-for-election.patch . && patch -p1 < expose-session-for-election.patch vendorpath := $(pwd)/tmp/gosrc/src/go.etcd.io/etcd/vendor @@ -40,7 +51,6 @@ libetcdclient: mv $(vendorpath)/github.com $(pwd)/tmp/gosrc/src/ $(pwd)/tmp/go/bin/go build -buildmode=c-shared -o libetcdclient.so ${pwd}/tmp/gosrc/src/etcdclient.go ${pwd}/tmp/gosrc/src/objectManager.go - clean: rm -fr $(pwd)/tmp rm -fr $(pwd)/libetcdclient.so diff --git a/tools-v2/Makefile b/tools-v2/Makefile index ed33716180..b42c70de73 100644 --- a/tools-v2/Makefile +++ b/tools-v2/Makefile @@ -73,6 +73,8 @@ install_grpc_protobuf: # wget ${GITHUB_PROXY}https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/protoc-${PROTOC_VERSION}-linux-x86_64.zip \ # && unzip protoc-${PROTOC_VERSION}-linux-x86_64.zip "bin/protoc" -d /usr/ \ # && rm protoc-${PROTOC_VERSION}-linux-x86_64.zip + # enable Go Modules + go env -w GO111MODULE=on go install google.golang.org/protobuf/cmd/protoc-gen-go@${PROTOC_GEN_GO_VERSION} go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@${PROTOC_GEN_GO_GRPC_VERSION} diff --git a/tools-v2/README.md b/tools-v2/README.md index b9c7a32110..b9fea277f8 100644 --- a/tools-v2/README.md +++ b/tools-v2/README.md @@ -42,7 +42,10 @@ A tool for CurveFS & CurveBs. - [usage inode](#usage-inode) - [usage metadata](#usage-metadata) - [warmup](#warmup) - - [warmup add](#warmup-add) + - [warmup add](#warmup-add) + - [warmup cancel](#warmup-cancel) + - [warmup list](#warmup-list) + - [warmup query](#warmup-query) - [bs](#bs) - [list](#list-1) - [list logical-pool](#list-logical-pool) @@ -876,19 +879,70 @@ Output: #### warmup +When using object storage with CurveFS, our data is stored remotely on platforms like MinIO or AWS S3. +Each time we access a file within the CurveFS mount, we have to fetch the content from the remote platform. +This process involves overhead like network connections, file writing, and memory access. + +To alleviate the wait when users require a file, the warmup tool allows users to pre-fetch files of interest in advance. +This way, the needed files are readily available when users need them. + #### warmup add -warmup a file(directory), or given a list file contains a list of files(directories) that you want to warmup. +Initiate warming up for either a file or directory, or provide a single file (file list) containing a list of files and directories you want to warm up. Usage: ```shell +curve fs warmup add [--filelist] curve fs warmup add /mnt/curvefs/warmup curve fs warmup add --filelist /mnt/curvefs/warmup.list ``` > `curve fs warmup add /mnt/curvefs/warmup` will warmup a file(directory). -> /mnt/curvefs/warmup.list + +> `curve fs warmup add --filelist /mnt/curvefs/warmup.list` will warmup a filelist. + +#### warmup cancel + +Cancel a warmup job that's currently in progress for either a single file or directory, or for a list of files and directories contained within a file list. + +Usage: + +```shell +curve fs warmup cancel [--filelist] +curve fs warmup cancel /mnt/curvefs/warmup +curve fs warmup cancel --filelist /mnt/curvefs/warmup.list +``` + +> `curve fs warmup cancel /mnt/curvefs/warmup` will cancel a warmup job running on the speicfied file(directory). + +> `curve fs warmup cancel --filelist /mnt/curvefs/warmup.list` will cancel a warmup job running on the speicfied filelist. + +#### warmup list + +Display a list of all currently active warmup jobs initiated within the specified CurveFS filesystem. + +Usage: + +```shell +curve fs warmup list +curve fs warmup list /mnt/curvefs +``` + +> `curve fs warmup list /mnt/curvefs` will list out all the running warmup jobs within the specified CurveFS filesystem mount path. + +#### warmup query + +Check the status of a warmup job for either a single file or directory, or for a list of files and directories contained within a file list. + +Usage: + +```shell +curve fs warmup query +curve fs warmup query /mnt/curvefs/warmup +``` + +> `curve fs warmup query /mnt/curvefs/warmup` will display the warmup job progress running on the specified target. ### bs @@ -1700,6 +1754,24 @@ Output: +---------+--------+ ``` +##### update volume flatten + +update volume flatten in curvebs cluster + +Usage: +```bash +curve bs update volume --user root --taskid d26e27a8-fcbd-4f7a-adf8-53795217cbb0 +``` + +Output: +``` ++------+--------------------------------------+---------+ +| USER | TASK ID | RESULT | ++------+--------------------------------------+---------+ +| root | d26e27a8-fcbd-4f7a-adf8-53795217cbb0 | success | ++------+--------------------------------------+---------+ +``` + #### create ##### create file diff --git a/tools-v2/go.mod b/tools-v2/go.mod index c3da47d06d..c51325de20 100644 --- a/tools-v2/go.mod +++ b/tools-v2/go.mod @@ -5,11 +5,10 @@ go 1.19 replace github.com/optiopay/kafka => github.com/cilium/kafka v0.0.0-20180809090225-01ce283b732b require ( - github.com/cilium/cilium v1.13.4 + github.com/cilium/cilium v1.13.7 github.com/deckarep/golang-set/v2 v2.3.0 github.com/docker/cli v24.0.2+incompatible github.com/dustin/go-humanize v1.0.1 - github.com/go-resty/resty/v2 v2.7.0 github.com/gookit/color v1.5.3 github.com/moby/term v0.5.0 github.com/olekukonko/tablewriter v0.0.5 diff --git a/tools-v2/go.sum b/tools-v2/go.sum index 2b7ad9655c..518e3d6aa4 100644 --- a/tools-v2/go.sum +++ b/tools-v2/go.sum @@ -69,8 +69,8 @@ github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/cilium v1.13.4 h1:rvbcA6q7YMWtDfEtx9y4olL46wBFoZfM2BazUoJlaRI= -github.com/cilium/cilium v1.13.4/go.mod h1:bppTjxboubrsI835+yp3vZ2S79wRQbZCbUiHxrAIOQY= +github.com/cilium/cilium v1.13.7 h1:d+TgNuV8pIBBLtVEtCkA4n0FxDbqAdu18NHjupVyDuM= +github.com/cilium/cilium v1.13.7/go.mod h1:TKeog+rE2T62B9WqByWGRGk92n0oMyvocCAylG16rXw= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004 h1:lkAMpLVBDaj17e85keuznYcH5rqI438v41pKcBl4ZxQ= github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA= @@ -141,8 +141,6 @@ github.com/go-openapi/spec v0.20.7 h1:1Rlu/ZrOCCob0n+JKKJAWhNWMPW8bOZRg8FJaY+0SK github.com/go-openapi/strfmt v0.21.3 h1:xwhj5X6CjXEZZHMWy1zKJxvW9AfHC9pkyUjLvHtKG7o= github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/validate v0.22.0 h1:b0QecH6VslW/TxtpKgzpO1SNG7GU2FsaqKdP1E2T50Y= -github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY= -github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I= github.com/go-sql-driver/mysql v1.3.0 h1:pgwjLi/dvffoP9aabwkT3AKpXQM93QARkjFhDDqC1UE= github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -506,7 +504,6 @@ golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= diff --git a/tools-v2/internal/error/error.go b/tools-v2/internal/error/error.go index 9639218c69..f0e3382456 100644 --- a/tools-v2/internal/error/error.go +++ b/tools-v2/internal/error/error.go @@ -475,12 +475,19 @@ var ( ErrBsGetAllSnapshotResult = func() *CmdError { return NewInternalCmdError(72, "get all snapshot results fail, err: %s") } + ErrVerifyError = func() *CmdError { + return NewInternalCmdError(73, "verify fail, err: %s") + } + ErrListWarmup = func() *CmdError { + return NewInternalCmdError(74, "list warmup progress fail, err: %s") + } ErrBsGetSegmentInfo = func() *CmdError { - return NewInternalCmdError(73, "get segment info fail, err: %s") + return NewInternalCmdError(75, "get segment info fail, err: %s") } ErrBsGetChunkHash = func() *CmdError { - return NewInternalCmdError(74, "get chunk hash fail, err: %s") + return NewInternalCmdError(76, "get chunk hash fail, err: %s") } + // http error ErrHttpUnreadableResult = func() *CmdError { return NewHttpResultCmdError(1, "http response is unreadable, the uri is: %s, the error is: %s") diff --git a/tools-v2/internal/utils/mountpoint.go b/tools-v2/internal/utils/mountpoint.go index 9ecc51b2bf..20ab7bca4d 100644 --- a/tools-v2/internal/utils/mountpoint.go +++ b/tools-v2/internal/utils/mountpoint.go @@ -23,6 +23,7 @@ package cobrautil import ( + "path" "path/filepath" "strings" @@ -59,3 +60,13 @@ func Path2CurvefsPath(path string, mountpoint *mountinfo.MountInfo) string { curvefsPath, _ := filepath.Abs(strings.Replace(path, mountPoint, root, 1)) return curvefsPath } + +func CurvefsPath2ClientPath(curvefsPath string, mountpointInfo *mountinfo.MountInfo) string { + curvefsPath, _ = filepath.Abs(curvefsPath) + mountPoint := mountpointInfo.MountPoint + rootInCurvefs := mountpointInfo.Root + filename := strings.Replace(curvefsPath, rootInCurvefs, "", 1) + + clientPath := path.Join(mountPoint, filename) + return clientPath +} diff --git a/tools-v2/internal/utils/row.go b/tools-v2/internal/utils/row.go index 8b94801f37..4154203619 100644 --- a/tools-v2/internal/utils/row.go +++ b/tools-v2/internal/utils/row.go @@ -88,9 +88,11 @@ const ( ROW_PEER_ID = "peerId" ROW_PEER_NUMBER = "peerNumber" ROW_PHYPOOL = "phyPool" + ROW_PATH = "path" ROW_POOL = "pool" ROW_POOL_ID = "poolId" ROW_PORT = "port" + ROW_PROGRESS = "progress" ROW_READONLY = "readonly" ROW_REASON = "reason" ROW_RECOVERING = "recovering" diff --git a/tools-v2/internal/utils/snapshot.go b/tools-v2/internal/utils/snapshot.go index 9010ada75a..358c7b4c44 100644 --- a/tools-v2/internal/utils/snapshot.go +++ b/tools-v2/internal/utils/snapshot.go @@ -53,6 +53,9 @@ const ( ActionGetCloneTaskList = "GetCloneTaskList" ActionGetFileSnapshotList = "GetFileSnapshotList" ActionGetFileSnapshotInfo = "GetFileSnapshotInfo" + + ResultCode = "Code" + ResultSuccess = "0" ) func NewSnapshotQuerySubUri(params map[string]any) string { diff --git a/tools-v2/pkg/cli/command/curvebs/delete/volume/clone/clone.go b/tools-v2/pkg/cli/command/curvebs/delete/volume/clone/clone.go index 06acd5df68..82a85f6993 100644 --- a/tools-v2/pkg/cli/command/curvebs/delete/volume/clone/clone.go +++ b/tools-v2/pkg/cli/command/curvebs/delete/volume/clone/clone.go @@ -105,7 +105,7 @@ func (rCmd *CloneCmd) RunCommand(cmd *cobra.Command, args []string) error { if err := json.Unmarshal([]byte(result), &resp); err != nil { return err } - if resp.Code != "0" { + if resp.Code != cobrautil.ResultSuccess { return fmt.Errorf("get clone list fail, error code: %s", resp.Code) } if len(resp.TaskInfos) == 0 { @@ -120,6 +120,7 @@ func (rCmd *CloneCmd) RunCommand(cmd *cobra.Command, args []string) error { for _, item := range records { wg.Add(1) go func(item map[string]string) { + defer wg.Done() params := map[string]any{ cobrautil.QueryAction: cobrautil.ActionCleanCloneTask, cobrautil.QueryUser: item["User"], @@ -127,14 +128,21 @@ func (rCmd *CloneCmd) RunCommand(cmd *cobra.Command, args []string) error { } subUri := cobrautil.NewSnapshotQuerySubUri(params) metric := basecmd.NewMetric(rCmd.snapshotAddrs, subUri, rCmd.timeout) - _, err := basecmd.QueryMetric(metric) + result, err := basecmd.QueryMetric(metric) if err.TypeCode() != cmderror.CODE_SUCCESS { - item["Result"] = "fail" + item[cobrautil.ROW_RESULT] = cobrautil.ROW_VALUE_FAILED } else { - item["Result"] = "success" + payload := map[string]any{} + if err := json.Unmarshal([]byte(result), &payload); err != nil { + item[cobrautil.ROW_RESULT] = cobrautil.ROW_VALUE_FAILED + } else { + if payload[cobrautil.ResultCode] != cobrautil.ResultSuccess { + item[cobrautil.ROW_RESULT] = cobrautil.ROW_VALUE_FAILED + } + } } + item[cobrautil.ROW_RESULT] = cobrautil.ROW_VALUE_SUCCESS rCmd.TableNew.Append([]string{item["User"], item["Src"], item["UUID"], item["File"], item["Result"]}) - wg.Done() }(item) } wg.Wait() diff --git a/tools-v2/pkg/cli/command/curvebs/delete/volume/recover/recover.go b/tools-v2/pkg/cli/command/curvebs/delete/volume/recover/recover.go index 8cc5b6b0c6..678abb0a4a 100644 --- a/tools-v2/pkg/cli/command/curvebs/delete/volume/recover/recover.go +++ b/tools-v2/pkg/cli/command/curvebs/delete/volume/recover/recover.go @@ -104,7 +104,7 @@ func (rCmd *RecoverCmd) RunCommand(cmd *cobra.Command, args []string) error { if err := json.Unmarshal([]byte(result), &resp); err != nil { return err } - if resp.Code != "0" { + if resp.Code != cobrautil.ResultSuccess { return fmt.Errorf("get clone list fail, error code: %s", resp.Code) } if len(resp.TaskInfos) == 0 { @@ -119,6 +119,7 @@ func (rCmd *RecoverCmd) RunCommand(cmd *cobra.Command, args []string) error { for _, item := range records { wg.Add(1) go func(item map[string]string) { + defer wg.Done() params := map[string]any{ cobrautil.QueryAction: cobrautil.ActionCleanCloneTask, cobrautil.QueryUser: item["User"], @@ -126,14 +127,21 @@ func (rCmd *RecoverCmd) RunCommand(cmd *cobra.Command, args []string) error { } subUri := cobrautil.NewSnapshotQuerySubUri(params) metric := basecmd.NewMetric(rCmd.snapshotAddrs, subUri, rCmd.timeout) - _, err := basecmd.QueryMetric(metric) + result, err := basecmd.QueryMetric(metric) if err.TypeCode() != cmderror.CODE_SUCCESS { - item["Result"] = "fail" + item["Result"] = cobrautil.ROW_VALUE_FAILED } else { - item["Result"] = "success" + payload := map[string]any{} + if err := json.Unmarshal([]byte(result), &payload); err != nil { + item["Result"] = cobrautil.ROW_VALUE_FAILED + } else { + if payload[cobrautil.ResultCode] != cobrautil.ResultSuccess { + item["Result"] = cobrautil.ROW_VALUE_FAILED + } + } } + item["Result"] = cobrautil.ROW_VALUE_SUCCESS rCmd.TableNew.Append([]string{item["User"], item["Src"], item["UUID"], item["File"], item["Result"]}) - wg.Done() }(item) } wg.Wait() diff --git a/tools-v2/pkg/cli/command/curvebs/update/update.go b/tools-v2/pkg/cli/command/curvebs/update/update.go index efc5707bd3..7eae7bed5d 100644 --- a/tools-v2/pkg/cli/command/curvebs/update/update.go +++ b/tools-v2/pkg/cli/command/curvebs/update/update.go @@ -23,16 +23,16 @@ package update import ( - "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/update/copyset" - "github.com/spf13/cobra" - basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/update/copyset" "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/update/file" "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/update/leader" "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/update/leader_schedule" "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/update/peer" "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/update/scan_state" "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/update/throttle" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/update/volume" + "github.com/spf13/cobra" ) type UpdateCommand struct { @@ -50,6 +50,7 @@ func (updateCmd *UpdateCommand) AddSubCommands() { scan_state.NewScanStateCommand(), copyset.NewCopysetCommand(), leader_schedule.NewLeaderScheduleCommand(), + volume.NewVolumeCommand(), ) } diff --git a/tools-v2/pkg/cli/command/curvebs/update/volume/flatten/flatten.go b/tools-v2/pkg/cli/command/curvebs/update/volume/flatten/flatten.go new file mode 100644 index 0000000000..eab3240da6 --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/update/volume/flatten/flatten.go @@ -0,0 +1,123 @@ +/* +* Copyright (c) 2023 NetEase Inc. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ +/* +* Project: CurveCli +* Created Date: 2023-09-16 +* Author: baytan0720 + */ + +package flatten + +import ( + "encoding/json" + "time" + + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/spf13/cobra" +) + +const ( + flattenExample = `$ curve bs update volume flatten --user root --taskid d26e27a8-fcbd-4f7a-adf8-53795217cbb0` +) + +type FlattenCmd struct { + basecmd.FinalCurveCmd + snapshotAddrs []string + timeout time.Duration + + user string + taskID string +} + +var _ basecmd.FinalCurveCmdFunc = (*FlattenCmd)(nil) + +func NewCommand() *cobra.Command { + return NewFlattenCmd().Cmd +} + +func NewFlattenCmd() *FlattenCmd { + fCmd := &FlattenCmd{ + FinalCurveCmd: basecmd.FinalCurveCmd{ + Use: "flatten", + Short: "update volume flatten in curvebs cluster", + Example: flattenExample, + }, + } + basecmd.NewFinalCurveCli(&fCmd.FinalCurveCmd, fCmd) + return fCmd +} + +func (fCmd *FlattenCmd) AddFlags() { + config.AddBsSnapshotCloneFlagOption(fCmd.Cmd) + config.AddHttpTimeoutFlag(fCmd.Cmd) + config.AddBsUserRequireFlag(fCmd.Cmd) + config.AddBsTaskIDRequireFlag(fCmd.Cmd) +} + +func (fCmd *FlattenCmd) Init(cmd *cobra.Command, args []string) error { + snapshotAddrs, err := config.GetBsSnapshotAddrSlice(fCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS || len(snapshotAddrs) == 0 { + return err.ToError() + } + fCmd.snapshotAddrs = snapshotAddrs + fCmd.timeout = config.GetFlagDuration(fCmd.Cmd, config.HTTPTIMEOUT) + fCmd.user = config.GetBsFlagString(fCmd.Cmd, config.CURVEBS_USER) + fCmd.taskID = config.GetBsFlagString(fCmd.Cmd, config.CURVEBS_TASKID) + fCmd.SetHeader([]string{cobrautil.ROW_USER, cobrautil.ROW_TASK_ID, cobrautil.ROW_RESULT}) + return nil +} + +func (fCmd *FlattenCmd) RunCommand(cmd *cobra.Command, args []string) error { + params := map[string]any{ + cobrautil.QueryAction: cobrautil.ActionFlatten, + cobrautil.QueryUser: fCmd.user, + cobrautil.QueryUUID: fCmd.taskID, + } + subUri := cobrautil.NewSnapshotQuerySubUri(params) + metric := basecmd.NewMetric(fCmd.snapshotAddrs, subUri, fCmd.timeout) + result, err := basecmd.QueryMetric(metric) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + payload := map[string]any{} + if err := json.Unmarshal([]byte(result), &payload); err != nil { + return err + } + row := make(map[string]string) + row[cobrautil.ROW_USER] = fCmd.user + row[cobrautil.ROW_TASK_ID] = fCmd.taskID + + if payload[cobrautil.ResultCode] != cobrautil.ResultSuccess { + row[cobrautil.ROW_RESULT] = cobrautil.ROW_VALUE_FAILED + } else { + row[cobrautil.ROW_RESULT] = cobrautil.ROW_VALUE_SUCCESS + } + + fCmd.Result = row + return nil +} + +func (fCmd *FlattenCmd) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&fCmd.FinalCurveCmd, fCmd) +} + +func (fCmd *FlattenCmd) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&fCmd.FinalCurveCmd) +} diff --git a/tools-v2/pkg/cli/command/curvebs/update/volume/volume.go b/tools-v2/pkg/cli/command/curvebs/update/volume/volume.go new file mode 100644 index 0000000000..8ed6ff15e1 --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/update/volume/volume.go @@ -0,0 +1,50 @@ +/* +* Copyright (c) 2023 NetEase Inc. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ +/* +* Project: CurveCli +* Created Date: 2023-09-16 +* Author: baytan0720 + */ + +package volume + +import ( + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/update/volume/flatten" + "github.com/spf13/cobra" +) + +type VolumeCommand struct { + basecmd.MidCurveCmd +} + +var _ basecmd.MidCurveCmdFunc = (*VolumeCommand)(nil) // check interface + +func (volumeCmd *VolumeCommand) AddSubCommands() { + volumeCmd.Cmd.AddCommand( + flatten.NewCommand(), + ) +} + +func NewVolumeCommand() *cobra.Command { + volumeCmd := &VolumeCommand{ + basecmd.MidCurveCmd{ + Use: "volume", + Short: "update volume resources in the curvebs", + }, + } + return basecmd.NewMidCurveCli(&volumeCmd.MidCurveCmd, volumeCmd) +} diff --git a/tools-v2/pkg/cli/command/curvefs/warmup/add/add.go b/tools-v2/pkg/cli/command/curvefs/warmup/add/add.go index 539d839a13..9b7db13a78 100644 --- a/tools-v2/pkg/cli/command/curvefs/warmup/add/add.go +++ b/tools-v2/pkg/cli/command/curvefs/warmup/add/add.go @@ -48,8 +48,8 @@ $ curve fs warmup add /mnt/warmup # warmup all files in /mnt/warmup` const ( CURVEFS_WARMUP_OP_XATTR = "curvefs.warmup.op" - CURVEFS_WARMUP_OP_ADD_SINGLE = "add\nsingle\n%s\n%s" - CURVEFS_WARMUP_OP_ADD_LIST = "add\nlist\n%s\n%s" + CURVEFS_WARMUP_OP_ADD_SINGLE = "add\nsingle\n%s\n%s\n%s\n%s" + CURVEFS_WARMUP_OP_ADD_LIST = "add\nlist\n%s\n%s\n%s\n%s" ) var STORAGE_TYPE = map[string]string{ @@ -59,12 +59,11 @@ var STORAGE_TYPE = map[string]string{ type AddCommand struct { basecmd.FinalCurveCmd - Mountpoint *mountinfo.MountInfo - Path string // path in user system - CurvefsPath string // path in curvefs - Single bool // warmup a single file or directory - StorageType string // warmup storage type - ConvertFails []string + Mountpoint *mountinfo.MountInfo + Path string // path in user system + CurvefsPath string // path in curvefs + Single bool // warmup a single file or directory + StorageType string // warmup storage type } var _ basecmd.FinalCurveCmdFunc = (*AddCommand)(nil) // check interface @@ -161,7 +160,7 @@ func (aCmd *AddCommand) Print(cmd *cobra.Command, args []string) error { return output.FinalCmdOutput(&aCmd.FinalCurveCmd, aCmd) } -func (aCmd *AddCommand) convertFilelist() *cmderror.CmdError { +func (aCmd *AddCommand) verifyFilelist() *cmderror.CmdError { data, err := ioutil.ReadFile(aCmd.Path) if err != nil { readErr := cmderror.ErrReadFile() @@ -170,36 +169,40 @@ func (aCmd *AddCommand) convertFilelist() *cmderror.CmdError { } lines := strings.Split(string(data), "\n") - validPath := "" - for _, line := range lines { + + verifyFailMsg := "" + var verifyReplaceErr error + for i, line := range lines { + if line == "" { + continue + } rel, err := filepath.Rel(aCmd.Mountpoint.MountPoint, line) - if err == nil && !strings.HasPrefix(rel, "..") { - // convert to curvefs path - curvefsAbspath := cobrautil.Path2CurvefsPath(line, aCmd.Mountpoint) - validPath += (curvefsAbspath + "\n") - } else { - convertFail := fmt.Sprintf("[%s] is not saved in curvefs", line) - aCmd.ConvertFails = append(aCmd.ConvertFails, convertFail) + if err != nil || strings.HasPrefix(rel, "..") { + verifyReplaceErr = err + verifyFailMsg += fmt.Sprintf("line %d: [%s:%s] is not saved in curvefs\n", i + 1, aCmd.Path, line) } } - if err = ioutil.WriteFile(aCmd.Path, []byte(validPath), 0644); err != nil { - writeErr := cmderror.ErrWriteFile() - writeErr.Format(aCmd.Path, err.Error()) + + if verifyReplaceErr != nil { + verifyErr := cmderror.ErrVerifyError() + verifyErr.Format(verifyFailMsg, verifyReplaceErr.Error()) + return verifyErr } + return cmderror.ErrSuccess() } func (aCmd *AddCommand) RunCommand(cmd *cobra.Command, args []string) error { xattr := CURVEFS_WARMUP_OP_ADD_SINGLE if !aCmd.Single { - convertErr := aCmd.convertFilelist() - if convertErr.TypeCode() != cmderror.CODE_SUCCESS { - return convertErr.ToError() + verifyErr := aCmd.verifyFilelist() + if verifyErr.TypeCode() != cmderror.CODE_SUCCESS { + return verifyErr.ToError() } xattr = CURVEFS_WARMUP_OP_ADD_LIST } - value := fmt.Sprintf(xattr, aCmd.CurvefsPath, aCmd.StorageType) - err := unix.Setxattr(aCmd.Path, CURVEFS_WARMUP_OP_XATTR, []byte(value), 0) + values := fmt.Sprintf(xattr, aCmd.CurvefsPath, aCmd.StorageType, aCmd.Mountpoint.MountPoint, aCmd.Mountpoint.Root) + err := unix.Setxattr(aCmd.Path, CURVEFS_WARMUP_OP_XATTR, []byte(values), 0) if err == unix.ENOTSUP || err == unix.EOPNOTSUPP { return fmt.Errorf("filesystem does not support extended attributes") } else if err != nil { diff --git a/tools-v2/pkg/cli/command/curvefs/warmup/cancel/cancel.go b/tools-v2/pkg/cli/command/curvefs/warmup/cancel/cancel.go new file mode 100644 index 0000000000..0e34c57892 --- /dev/null +++ b/tools-v2/pkg/cli/command/curvefs/warmup/cancel/cancel.go @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: CurveCli + * Created Date: 2023-08-10 + * Author: ken90242 (Ken Han) + */ + +package cancel + +import ( + "errors" + "fmt" + "os" + + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvefs/warmup/query" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/spf13/cobra" + "golang.org/x/sys/unix" +) + +const ( + cancelCommand = `$ curve fs warmup cancel --filelist /mnt/warmup/0809.list # cancel warmup task in related to the file(dir) saved in /mnt/warmup/0809.list + $ curve fs warmup cancel /mnt/warmup # cancel the warmup tasks of all files in /mnt/warmup` +) + +const ( + CURVEFS_WARMUP_OP_XATTR = "curvefs.warmup.op" + CURVEFS_WARMUP_OP_CANCEL_SINGLE = "cancel\nsingle" + CURVEFS_WARMUP_OP_CANCEL_LIST = "cancel\nlist" +) + +var STORAGE_TYPE = map[string]string{ + "disk": "disk", + "mem": "kvclient", +} + +type CancelCommand struct { + basecmd.FinalCurveCmd + Path string // path in user system + Single bool // cancel the warmup of a single file or directory +} + +var _ basecmd.FinalCurveCmdFunc = (*CancelCommand)(nil) // check interface + +func NewCancelWarmupCommand() *CancelCommand { + cCmd := &CancelCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{ + Use: "cancel", + Short: "tell client to cancel warmup files(directories) to local", + Example: cancelCommand, + }, + } + basecmd.NewFinalCurveCli(&cCmd.FinalCurveCmd, cCmd) + return cCmd +} + +func NewCancelCommand() *cobra.Command { + return NewCancelWarmupCommand().Cmd +} + +func (cCmd *CancelCommand) AddFlags() { + config.AddFileListOptionFlag(cCmd.Cmd) + config.AddDaemonOptionPFlag(cCmd.Cmd) + config.AddStorageOptionFlag(cCmd.Cmd) +} + +func (cCmd *CancelCommand) Init(cmd *cobra.Command, args []string) error { + // check has curvefs mountpoint + mountpoints, err := cobrautil.GetCurveFSMountPoints() + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } else if len(mountpoints) == 0 { + return errors.New("no curvefs mountpoint found") + } + + // check args + cCmd.Single = false + fileList := config.GetFileListOptionFlag(cCmd.Cmd) + if fileList == "" && len(args) == 0 { + cmd.SilenceUsage = false + return fmt.Errorf("no --filelist or file(dir) specified") + } else if fileList != "" { + cCmd.Path = fileList + } else { + cCmd.Path = args[0] + cCmd.Single = true + } + + // check file is exist + info, errStat := os.Stat(cCmd.Path) + if errStat != nil { + if os.IsNotExist(errStat) { + return fmt.Errorf("[%s]: no such file or directory", cCmd.Path) + } else { + return fmt.Errorf("stat [%s] fail: %s", cCmd.Path, errStat.Error()) + } + } else if !cCmd.Single && info.IsDir() { + // --filelist must be a file + return fmt.Errorf("[%s]: must be a file", cCmd.Path) + } + + return nil +} + +func (cCmd *CancelCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&cCmd.FinalCurveCmd, cCmd) +} + +func (cCmd *CancelCommand) RunCommand(cmd *cobra.Command, args []string) error { + xattr := CURVEFS_WARMUP_OP_CANCEL_SINGLE + if !cCmd.Single { + xattr = CURVEFS_WARMUP_OP_CANCEL_LIST + } + err := unix.Setxattr(cCmd.Path, CURVEFS_WARMUP_OP_XATTR, []byte(xattr), 0) + if err == unix.ENOTSUP || err == unix.EOPNOTSUPP { + return fmt.Errorf("filesystem does not support extended attributes") + } else if err != nil { + setErr := cmderror.ErrSetxattr() + setErr.Format(CURVEFS_WARMUP_OP_XATTR, err.Error()) + return setErr.ToError() + } + if config.GetDaemonFlag(cCmd.Cmd) { + query.GetWarmupProgress(cCmd.Cmd, cCmd.Path) + } + return nil +} + +func (cCmd *CancelCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&cCmd.FinalCurveCmd) +} diff --git a/tools-v2/pkg/cli/command/curvefs/warmup/list/list.go b/tools-v2/pkg/cli/command/curvefs/warmup/list/list.go new file mode 100644 index 0000000000..e005713c85 --- /dev/null +++ b/tools-v2/pkg/cli/command/curvefs/warmup/list/list.go @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: CurveCli + * Created Date: 2023-08-09 + * Author: Ken Han (ken90242) + */ + +package list + +import ( + "errors" + "fmt" + "path/filepath" + "strings" + + "github.com/cilium/cilium/pkg/mountinfo" + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/pkg/xattr" + "github.com/spf13/cobra" +) + +const ( + listExample = `$ curve fs warmup list ` + + CURVEFS_WARMUP_OP_XATTR = "curvefs.warmup.op.list" +) + +type ListCommand struct { + basecmd.FinalCurveCmd + MountpointInfo *mountinfo.MountInfo + mountpoint string +} + +var _ basecmd.FinalCurveCmdFunc = (*ListCommand)(nil) // check interface + +func NewListWarmupCommand() *ListCommand { + lCmd := &ListCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{ + Use: "list", + Short: "list the ongoing warmup progress for all tasks that are currently in process", + Example: listExample, + }, + } + basecmd.NewFinalCurveCli(&lCmd.FinalCurveCmd, lCmd) + return lCmd +} + +func NewListCommand() *cobra.Command { + return NewListWarmupCommand().Cmd +} + +func (lCmd *ListCommand) AddFlags() { + config.AddIntervalOptionFlag(lCmd.Cmd) +} + +func (lCmd *ListCommand) Init(cmd *cobra.Command, args []string) error { + if len(args) < 1 { + return errors.New("please provide the path where CurveFS is being mounted. i.e., curve fs warmup list ") + } + + mountpoints, err := cobrautil.GetCurveFSMountPoints() + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } else if len(mountpoints) == 0 { + return errors.New("no curvefs mountpoint found") + } + + absPath, _ := filepath.Abs(args[0]) + lCmd.mountpoint = absPath + + lCmd.MountpointInfo = nil + for _, mountpointInfo := range mountpoints { + rel, err := filepath.Rel(mountpointInfo.MountPoint, absPath) + if err == nil && !strings.HasPrefix(rel, "..") { + // found the mountpoint + if lCmd.MountpointInfo == nil || + len(lCmd.MountpointInfo.MountPoint) < len(mountpointInfo.MountPoint) { + // Prevent the curvefs directory from being mounted under the curvefs directory + // /a/b/c: + // test-1 mount in /a + // test-1 mount in /a/b + // warmup /a/b/c. + lCmd.MountpointInfo = mountpointInfo + } + } + } + return nil +} + +func (lCmd *ListCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&lCmd.FinalCurveCmd, lCmd) +} + +func (lCmd *ListCommand) RunCommand(cmd *cobra.Command, args []string) error { + + resultBytes, err := xattr.Get(lCmd.mountpoint, CURVEFS_WARMUP_OP_XATTR) + resultStr := string(resultBytes) + + if err != nil { + return err + } + + entries := strings.Split(resultStr, ";") + + tableRows := make([]map[string]string, 0) + + if resultStr == "finished" { + row := make(map[string]string) + lCmd.Header = append(lCmd.Header, cobrautil.ROW_RESULT) + row[cobrautil.ROW_RESULT] = "No tasks in queue" + tableRows = append(tableRows, row) + } else { + lCmd.Header = append(lCmd.Header, cobrautil.ROW_PATH) + lCmd.Header = append(lCmd.Header, cobrautil.ROW_PROGRESS) + rows := make([]map[string]string, 0) + for _, entry := range entries { + if entry == "" { + continue + } + + parts := strings.Split(entry, ":") + + if len(parts) != 2 { + return fmt.Errorf("invalid entry: %s", entry) + } + + row := make(map[string]string) + + curvefsFilePath := parts[0] + clientFilePath := cobrautil.CurvefsPath2ClientPath(curvefsFilePath, lCmd.MountpointInfo) + + row[cobrautil.ROW_PATH] = fmt.Sprintf("%s", clientFilePath) + + progress := parts[1] + row[cobrautil.ROW_PROGRESS] = fmt.Sprintf("%s", progress) + + rows = append(rows, row) + } + + tableRows = append(tableRows, rows...) + } + + lCmd.SetHeader(lCmd.Header) + list := cobrautil.ListMap2ListSortByKeys(tableRows, lCmd.Header, []string{}) + lCmd.TableNew.AppendBulk(list) + + return nil +} + +func (lCmd *ListCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&lCmd.FinalCurveCmd) +} diff --git a/tools-v2/pkg/cli/command/curvefs/warmup/warmup.go b/tools-v2/pkg/cli/command/curvefs/warmup/warmup.go index bfbf61e32e..2250ce232a 100644 --- a/tools-v2/pkg/cli/command/curvefs/warmup/warmup.go +++ b/tools-v2/pkg/cli/command/curvefs/warmup/warmup.go @@ -25,6 +25,8 @@ package warmup import ( basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvefs/warmup/add" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvefs/warmup/cancel" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvefs/warmup/list" "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvefs/warmup/query" "github.com/spf13/cobra" ) @@ -39,6 +41,8 @@ func (warmupCmd *WarmupCommand) AddSubCommands() { warmupCmd.Cmd.AddCommand( add.NewAddCommand(), query.NewQueryCommand(), + cancel.NewCancelCommand(), + list.NewListCommand(), ) } diff --git a/tools-v2/pkg/config/bs.go b/tools-v2/pkg/config/bs.go index ee8ba3224d..08b80dd179 100644 --- a/tools-v2/pkg/config/bs.go +++ b/tools-v2/pkg/config/bs.go @@ -658,6 +658,14 @@ func AddBsFailedOptionFlag(cmd *cobra.Command) { AddBsBoolOptionFlag(cmd, CURVEBS_FAILED, "failed") } +func AddBsUserRequireFlag(cmd *cobra.Command) { + AddBsStringRequiredFlag(cmd, CURVEBS_USER, "user name") +} + +func AddBsTaskIDRequireFlag(cmd *cobra.Command) { + AddBsStringRequiredFlag(cmd, CURVEBS_TASKID, "task id") +} + // get stingslice flag func GetBsFlagStringSlice(cmd *cobra.Command, flagName string) []string { var value []string diff --git a/tools/curvefsTool.cpp b/tools/curvefsTool.cpp index 698e54d535..2227257bf3 100644 --- a/tools/curvefsTool.cpp +++ b/tools/curvefsTool.cpp @@ -222,11 +222,11 @@ int CurvefsTools::ScanLogicalPool() { } for (auto it = logicalPoolInfos.begin(); it != logicalPoolInfos.end();) { - auto ix = std::find_if(lgPoolDatas.begin(), - lgPoolDatas.end(), - [it] (CurveLogicalPoolData& data) { - return data.name == it->logicalpoolname(); - }); + auto ix = + std::find_if(lgPoolDatas.begin(), lgPoolDatas.end(), + [it](const CurveLogicalPoolData& data) { + return data.name == it->logicalpoolname(); + }); if (ix != lgPoolDatas.end()) { lgPoolDatas.erase(ix); it++; @@ -796,9 +796,9 @@ int CurvefsTools::ScanCluster() { for (auto it = zoneInfos.begin(); it != zoneInfos.end();) { - auto ix = std::find_if(zoneToAdd.begin(), - zoneToAdd.end(), - [it] (CurveZoneData &data) { + auto ix = std::find_if( + zoneToAdd.begin(), zoneToAdd.end(), + [it](const CurveZoneData& data) { return (data.physicalPoolName == it->physicalpoolname()) && (data.zoneName == @@ -856,13 +856,14 @@ int CurvefsTools::ScanCluster() { for (auto it = serverInfos.begin(); it != serverInfos.end(); it++) { - auto ix = std::find_if(serverToAdd.begin(), - serverToAdd.end(), - [it] (CurveServerData &data) { + auto ix = + std::find_if( + serverToAdd.begin(), serverToAdd.end(), + [it](const CurveServerData& data) { return (data.serverName == it->hostname()) && (data.zoneName == it->zonename()) && (data.physicalPoolName == it->physicalpoolname()); - }); + }); if (ix != serverToAdd.end()) { serverToAdd.erase(ix); } else { @@ -1295,7 +1296,7 @@ int CurvefsTools::ScanPoolset() { } for (auto it = poolsetInfos.begin(); it != poolsetInfos.end();) { auto ix = std::find_if(poolsetToAdd.begin(), poolsetToAdd.end(), - [it](CurvePoolsetData& data) { + [it](const CurvePoolsetData& data) { return data.name == it->poolsetname(); }); if (ix != poolsetToAdd.end()) { diff --git a/ut.sh b/ut.sh index 74326eb7e5..8a3599f800 100644 --- a/ut.sh +++ b/ut.sh @@ -27,7 +27,7 @@ print_title() { ############################ FUNCTIONS get_options() { - local args=`getopt -o ldorh --long stor:,list,dep:,only:,os:,release:,ci:,build_rocksdb: -n "$0" -- "$@"` + local args=`getopt -o ldorhS --long sanitizer:,stor:,list,dep:,only:,os:,release:,ci:,build_rocksdb: -n "$0" -- "$@"` eval set -- "${args}" while true do @@ -56,6 +56,10 @@ get_options() { g_ci=$2 shift 2 ;; + -S|--sanitizer) + g_san=$2 + shift 2 + ;; --os) g_os=$2 shift 2 diff --git a/util/basic.sh b/util/basic.sh old mode 100644 new mode 100755 diff --git a/util/build.sh b/util/build.sh old mode 100644 new mode 100755 index 02b3d76880..a62eaac6ce --- a/util/build.sh +++ b/util/build.sh @@ -5,6 +5,7 @@ set -x ############################ GLOBAL VARIABLES g_os="debian11" +source "$(dirname "${BASH_SOURCE}")/docker_opts.sh" ############################ BASIC FUNCTIONS msg() { @@ -83,7 +84,13 @@ get_options() { main() { get_options "$@" - sudo docker run --rm -w /curve --user $(id -u ${USER}):$(id -g ${USER}) -v $(pwd):/curve -v ${HOME}:${HOME} -v /etc/passwd:/etc/passwd:ro -v /etc/group:/etc/group:ro -v /etc/shadow:/etc/shadow:ro --privileged opencurvedocker/curve-base:build-$g_os bash util/build_in_image.sh "$@" + sudo docker run \ + --rm \ + -w /curve \ + -v $(pwd):/curve \ + ${g_docker_opts[@]} \ + opencurvedocker/curve-base:build-$g_os \ + bash util/build_in_image.sh "$@" } ############################ MAIN() diff --git a/util/build_in_image.sh b/util/build_in_image.sh old mode 100644 new mode 100755 index fea01388e3..acf47f041a --- a/util/build_in_image.sh +++ b/util/build_in_image.sh @@ -19,6 +19,13 @@ g_build_opts=( "--copt -DGFLAGS_NS=google" "--copt -DUSE_BTHREAD_MUTEX" ) +# allow user to specify extra build options +# using environment variable BUILD_OPTS , if any. +# custom build options will be appended to g_build_opts +if [ -n "$BUILD_OPTS" ]; then + echo "Custom build options: $BUILD_OPTS" + g_build_opts+=("$BUILD_OPTS") +fi g_os="debian9" @@ -78,11 +85,15 @@ _EOC_ } get_options() { - local args=`getopt -o ldorh --long stor:,list,dep:,only:,os:,release:,ci:,build_rocksdb: -n "$0" -- "$@"` + local args=`getopt -o ldorhS --long sanitizer:,stor:,list,dep:,only:,os:,release:,ci:,build_rocksdb: -n "$0" -- "$@"` eval set -- "${args}" while true do case "$1" in + -S|--sanitizer) + g_san=$2 + shift 2 + ;; -s|--stor) g_stor=$2 shift 2 @@ -180,17 +191,22 @@ build_target() { fi local target_array + + # Allows specifying multiple targets in `g_target`, like "src/*,tools/*" + IFS=',' read -ra target_splitted <<<"$g_target" + target_splitted=("${target_splitted[@]/#/-e}") + if [ "$g_stor" == "bs" ]; then if [[ "$g_target" = "*" ]]; then target_array=("-- //... -//curvefs/...") else - target_array=($(bazel query 'kind("cc_(test|binary)", //... -//curvefs/...)' | grep -E "$g_target")) + target_array=($(bazel query 'kind("cc_(test|binary)", //... -//curvefs/...)' | grep -E ${target_splitted[@]})) fi elif [ "$g_stor" == "fs" ]; then if [[ "$g_target" = "*" ]]; then target_array=("-- //curvefs/...") else - target_array=($(bazel query 'kind("cc_(test|binary)", //curvefs/...)' | grep -E "$g_target")) + target_array=($(bazel query 'kind("cc_(test|binary)", //curvefs/...)' | grep -E ${target_splitted[@]})) fi fi @@ -199,6 +215,12 @@ build_target() { target_array=("...") fi + if [ $g_san == 1 ]; then + g_build_opts+=("--config=asan") + elif [ $g_san == 2 ]; then + g_build_opts+=("--config=msan") + fi + for target in "${target_array[@]}" do bazel build ${g_build_opts[@]} $target @@ -244,7 +266,7 @@ build_requirements() { (cd ${g_memcache_root} && make build) fi g_etcdclient_root="thirdparties/etcdclient" - (cd ${g_etcdclient_root} && make clean && make all) + (cd ${g_etcdclient_root} && make) # Use cache when necessary. No need to do full-rebuild every time. } main() { diff --git a/util/check.sh b/util/check.sh old mode 100644 new mode 100755 diff --git a/util/cppcheck/cppcheck.suppressions b/util/cppcheck/cppcheck.suppressions new file mode 100644 index 0000000000..a1924c2014 --- /dev/null +++ b/util/cppcheck/cppcheck.suppressions @@ -0,0 +1,41 @@ +noValidConfiguration +unmatchedSuppression + +missingInclude +missingIncludeSystem + +unusedFunction + +# style +cstyleCast +unreadVariable +variableScope +constVariableReference +shadowVariable +shadowFunction +shadowArgument +missingOverride +useStlAlgorithm +redundantInitialization +constVariable +knownConditionTrueFalse:curvefs/src/client/s3/client_s3_cache_manager.cpp +unusedStructMember:src/chunkserver/raftlog/curve_segment.cpp +redundantAssignment:src/tools/status_tool.cpp + +# portability +arithOperationsOnVoidPointer + +# information +ConfigurationNotChecked + +# warning +invalidscanf:src/client/client_common.h + +# error +ctuOneDefinitionRuleViolation:curvefs/src/mds/topology/topology_id_generator.h +deallocuse:curvefs/src/metaserver/storage/rocksdb_storage.cpp +deallocuse:nebd/src/part2/file_service.cpp +internalAstError + +# performance +uselessCallsSubstr diff --git a/util/docker.sh b/util/docker.sh old mode 100644 new mode 100755 index e4e72887fc..afcbe55db2 --- a/util/docker.sh +++ b/util/docker.sh @@ -6,19 +6,7 @@ `` g_os="debian11" g_ci=0 -g_docker_opts=( - "-v ${HOME}:${HOME}" - "--user $(id -u ${USER}):$(id -g ${USER})" - "-v /etc/passwd:/etc/passwd:ro" - "-v /etc/group:/etc/group:ro" - "-v /etc/sudoers.d/:/etc/sudoers.d/" - "-v /etc/sudoers:/etc/sudoers:ro" - "-v /etc/shadow:/etc/shadow:ro" - "-v /var/run/docker.sock:/var/run/docker.sock" - "-v /root/.docker:/root/.docker" - "--ulimit core=-1" - "--privileged" -) +source "$(dirname "${BASH_SOURCE}")/docker_opts.sh" ############################ BASIC FUNCTIONS msg() { diff --git a/util/docker_opts.sh b/util/docker_opts.sh new file mode 100755 index 0000000000..d83aeec9ec --- /dev/null +++ b/util/docker_opts.sh @@ -0,0 +1,16 @@ +# This file must be used with "source docker_opts.sh" from bash +# you cannot run it directly. + +g_docker_opts=( + "-v ${HOME}:${HOME}" + "--user $(id -u ${USER}):$(id -g ${USER})" + "-v /etc/passwd:/etc/passwd:ro" + "-v /etc/group:/etc/group:ro" + "-v /etc/sudoers.d/:/etc/sudoers.d/" + "-v /etc/sudoers:/etc/sudoers:ro" + "-v /etc/shadow:/etc/shadow:ro" + "-v /var/run/docker.sock:/var/run/docker.sock" + "-v /root/.docker:/root/.docker" + "--ulimit core=-1" + "--privileged" +) diff --git a/util/image.sh b/util/image.sh old mode 100644 new mode 100755 diff --git a/util/install.sh b/util/install.sh old mode 100644 new mode 100755 diff --git a/util/package.sh b/util/package.sh new file mode 100755 index 0000000000..41355d015a --- /dev/null +++ b/util/package.sh @@ -0,0 +1,405 @@ +#!/usr/bin/env bash + +# Copyright (c) 2023 NetEase Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit + +set -x + +dir=$(pwd) +echo "Working directory: ${dir}" + +tag_version=$(git status | grep -Ew "HEAD detached at|On branch" | awk '{print $NF}' | awk -F"v" '{print $2}') +if [ -z ${tag_version} ]; then + echo "not found version info, set version to 9.9.9" + tag_version=9.9.9 +fi + +commit_id=$(git rev-parse --short HEAD) +if [ "${RELEASE:-}" != "1" ]; then + debug="+debug" +else + debug="" +fi + +curve_version=${tag_version}+${commit_id}${debug} + +echo "curve version: ${curve_version}" + +function create_python_wheel() { + local PYTHON_VER=$(basename $1) + local curdir=$(pwd) + local basedir="build/curvefs_${PYTHON_VER}/" + + mkdir -p ${basedir}/tmplib + mkdir -p ${basedir}/curvefs + + cp ./curvefs_python/tmplib/* ${basedir}/tmplib + cp ./curvefs_python/setup.py ${basedir}/setup.py + cp ./curvefs_python/__init__.py ${basedir}/curvefs + cp ./curvefs_python/curvefs.py ${basedir}/curvefs + cp ./bazel-bin/curvefs_python/libcurvefs.so ${basedir}/curvefs/_curvefs.so + + cd ${basedir} + sed -i "s/version-anchor/${curve_version}/g" setup.py + + deps=$(ldd curvefs/_curvefs.so | awk '{ print $1 }' | sed '/^$/d') + for i in $(find tmplib/ -name "lib*so"); do + basename=$(basename $i) + if [[ $deps =~ $basename ]]; then + echo $i + cp $i curvefs + fi + done + + ${1} setup.py bdist_wheel + cp dist/*whl ${curdir} + + cd ${curdir} +} + +if [ "${CREATE_PY_WHEEL}" == "1" ]; then + create_python_wheel /usr/bin/python3 + exit 0 +fi + +if [[ "$1" != "tar" && "$1" != "deb" ]]; then + echo "Usage: $0 " 1>&2 + exit 1 +fi + +source "$(dirname "${BASH_SOURCE}")/docker_opts.sh" + +outdir="bazel-bin-merged" + +cleandir=( + curvefs_python/BUILD + curvefs_python/tmplib/ + curvesnapshot_python/BUILD + curvesnapshot_python/tmplib/ + *.deb + *.whl + *.tar.gz + build + $outdir +) + +rm -rf "${cleandir[@]}" + +echo "start compiling" + +make build stor=bs release=${RELEASE:-0} dep=${DEP:-0} + +mkdir -p $outdir +for i in $(readlink -f bazel-bin)/*; do + cp -rf $i $outdir +done + +for _ in {1..2}; do + sudo docker run \ + -it --rm \ + -w /curve \ + -v $(pwd):/curve \ + ${g_docker_opts[@]} \ + -e BAZEL_BIN=${outdir} \ + opencurvedocker/curve-base:build-${OS:-debian11} \ + bash ./curvefs_python/configure.sh python3 # python2 is not built against anymore + + if [ "${RELEASE:-}" == "1" ]; then + sudo docker run \ + -it --rm \ + -w /curve \ + -v $(pwd):/curve \ + ${g_docker_opts[@]} \ + -e RELEASE=${RELEASE:-0} \ + -e DEP=${DEP:-0} \ + opencurvedocker/curve-base:build-${OS:-debian11} \ + bazel build curvefs_python:curvefs --config=gcc7-later --copt -DHAVE_ZLIB=1 --copt -O2 -s \ + --define=with_glog=true --define=libunwind=true --copt -DGFLAGS_NS=google \ + --copt \ + -Wno-error=format-security --copt -DUSE_BTHREAD_MUTEX --linkopt \ + -L/curve/curvefs_python/tmplib/ --copt -DCURVEVERSION=${curve_version} \ + --linkopt -L/usr/local/lib ${bazelflags} + else + sudo docker run \ + -it --rm \ + -w /curve \ + -v $(pwd):/curve \ + ${g_docker_opts[@]} \ + -e RELEASE=${RELEASE:-0} \ + -e DEP=${DEP:-0} \ + opencurvedocker/curve-base:build-${OS:-debian11} \ + bazel build curvefs_python:curvefs --config=gcc7-later --copt -DHAVE_ZLIB=1 --compilation_mode=dbg -s \ + --define=with_glog=true --define=libunwind=true --copt -DGFLAGS_NS=google \ + --copt \ + -Wno-error=format-security --copt -DUSE_BTHREAD_MUTEX --linkopt \ + -L/curve/curvefs_python/tmplib/ --copt -DCURVEVERSION=${curve_version} \ + --linkopt -L/usr/local/lib ${bazelflags} + fi + + for i in $(readlink -f bazel-bin)/curvefs*; do + cp -rf $i bazel-bin-merged + done +done + +echo "end compilation" + +function build_deb() { + # step4 create temporary dir, copy binary, libs, and conf + mkdir build + cp -r curve-mds build/ + cp -r curve-chunkserver build/ + + cp -r curve-sdk build/ + cp -r curve-tools build/ + cp -r curve-monitor build/ + cp -r curve-snapshotcloneserver build/ + cp -r curve-nginx build/ + + mkdir -p build/curve-mds/usr/bin + + mkdir -p build/curve-mds/etc/curve + mkdir -p build/curve-mds/usr/lib + mkdir -p build/curve-tools/usr/bin + cp $outdir/src/mds/main/curvemds build/curve-mds/usr/bin/curve-mds + cp thirdparties/etcdclient/libetcdclient.so \ + build/curve-mds/usr/lib/libetcdclient.so + cp $outdir/tools/curvefsTool build/curve-mds/usr/bin/curve-tool + cp -r tools/snaptool build/curve-tools/usr/bin/snaptool-lib + cp tools/snaptool/snaptool build/curve-tools/usr/bin/snaptool + chmod a+x build/curve-tools/usr/bin/snaptool + cp $outdir/src/tools/curve_tool \ + build/curve-tools/usr/bin/curve_ops_tool + mkdir -p build/curve-chunkserver/usr/bin + mkdir -p build/curve-chunkserver/etc/curve + cp $outdir/src/chunkserver/chunkserver \ + build/curve-chunkserver/usr/bin/curve-chunkserver + cp $outdir/src/tools/curve_chunkserver_tool \ + build/curve-chunkserver/usr/bin/curve_chunkserver_tool + + cp $outdir/src/tools/curve_format \ + build/curve-chunkserver/usr/bin/curve-format + + mkdir -p build/curve-sdk/usr/curvefs + mkdir -p build/curve-sdk/usr/bin + mkdir -p build/curve-sdk/etc/curve + mkdir -p build/curve-sdk/usr/lib + mkdir -p build/curve-sdk/usr/include + cp $outdir/curvefs_python/libcurvefs.so \ + build/curve-sdk/usr/curvefs/_curvefs.so + cp curvefs_python/curvefs.py build/curve-sdk/usr/curvefs/curvefs.py + cp curvefs_python/__init__.py build/curve-sdk/usr/curvefs/__init__.py + cp curvefs_python/curvefs_tool.py build/curve-sdk/usr/curvefs/curvefs_tool.py + cp curvefs_python/parser.py build/curve-sdk/usr/curvefs/parser.py + cp curvefs_python/curve build/curve-sdk/usr/bin/curve + chmod a+x build/curve-sdk/usr/bin/curve + cp curvefs_python/tmplib/* build/curve-sdk/usr/lib/ + cp include/client/libcurve.h build/curve-sdk/usr/include + cp include/client/libcbd.h build/curve-sdk/usr/include + cp include/client/libcurve_define.h build/curve-sdk/usr/include + mkdir -p build/curve-monitor/etc/curve/monitor + cp -r monitor/* build/curve-monitor/etc/curve/monitor + mkdir -p build/curve-snapshotcloneserver/usr/bin + cp $outdir/src/snapshotcloneserver/snapshotcloneserver \ + build/curve-snapshotcloneserver/usr/bin/curve-snapshotcloneserver + + mkdir -p build/curve-nginx/etc/curve/nginx/app/etc + mkdir -p build/curve-nginx/etc/curve/nginx/conf + # step 4.1 prepare for nebd-package + cp -r nebd/nebd-package build/ + mkdir -p build/nebd-package/usr/include/nebd + mkdir -p build/nebd-package/usr/bin + mkdir -p build/nebd-package/usr/lib/nebd + + mkdir -p k8s/nebd/nebd-package/usr/bin + cp nebd/nebd-package/usr/bin/nebd-daemon k8s/nebd/nebd-package/usr/bin + sed -i '/^baseLogPath=/cbaseLogPath=/var/log/nebd' k8s/nebd/nebd-package/usr/bin/nebd-daemon + cp -r k8s/nebd/nebd-package build/k8s-nebd-package + mkdir -p build/k8s-nebd-package/usr/bin + mkdir -p build/k8s-nebd-package/usr/lib/nebd + + for i in $(find $outdir/ | grep -w so | grep -v solib | grep -v params | grep -v test | grep -v fake); do + cp -f $i build/nebd-package/usr/lib/nebd + cp -f $i build/k8s-nebd-package/usr/lib/nebd + done + + cp nebd/src/part1/libnebd.h build/nebd-package/usr/include/nebd + cp $outdir/nebd/src/part2/nebd-server build/nebd-package/usr/bin + cp $outdir/nebd/src/part2/nebd-server build/k8s-nebd-package/usr/bin + + # step 4.2 prepare for curve-nbd package + cp -r nbd/nbd-package build + mkdir -p build/nbd-package/usr/bin + cp $outdir/nbd/src/curve-nbd build/nbd-package/usr/bin + + cp -r k8s/nbd/nbd-package build/k8s-nbd-package + mkdir -p build/k8s-nbd-package/usr/bin + cp $outdir/nbd/src/curve-nbd build/k8s-nbd-package/usr/bin + + # step5 记录到debian包的配置文件,打包debian包 + version="Version: ${curve_version}" + echo ${version} >>build/curve-mds/DEBIAN/control + echo ${version} >>build/curve-sdk/DEBIAN/control + echo ${version} >>build/curve-chunkserver/DEBIAN/control + echo ${version} >>build/curve-tools/DEBIAN/control + echo ${version} >>build/curve-monitor/DEBIAN/control + echo ${version} >>build/curve-snapshotcloneserver/DEBIAN/control + echo ${version} >>build/curve-nginx/DEBIAN/control + echo ${version} >>build/nebd-package/DEBIAN/control + echo ${version} >>build/k8s-nebd-package/DEBIAN/control + echo ${version} >>build/nbd-package/DEBIAN/control + echo ${version} >>build/k8s-nbd-package/DEBIAN/control + + dpkg-deb -b build/curve-mds . + dpkg-deb -b build/curve-sdk . + dpkg-deb -b build/curve-chunkserver . + dpkg-deb -b build/curve-tools . + dpkg-deb -b build/curve-monitor . + dpkg-deb -b build/curve-snapshotcloneserver . + dpkg-deb -b build/curve-nginx . + dpkg-deb -b build/nebd-package . + dpkg-deb -b build/k8s-nebd-package . + dpkg-deb -b build/nbd-package . + dpkg-deb -b build/k8s-nbd-package . +} + +function build_tar() { + # step4 create temporary dir, copy binary, libs, and conf + echo "start copy" + mkdir -p build/curve/ + # curve-mds + mkdir -p build/curve/curve-mds/bin + mkdir -p build/curve/curve-mds/lib + cp $outdir/src/mds/main/curvemds build/curve/curve-mds/bin/curve-mds + cp thirdparties/etcdclient/libetcdclient.so \ + build/curve/curve-mds/lib/libetcdclient.so + cp $outdir/tools/curvefsTool build/curve/curve-mds/bin/curve-tool + # curve-tools + mkdir -p build/curve/curve-tools/bin + cp $outdir/src/tools/curve_tool \ + build/curve/curve-tools/bin/curve_ops_tool + cp -r tools/snaptool build/curve/curve-tools/bin/snaptool-lib + cp tools/snaptool/snaptool build/curve/curve-tools/bin/snaptool + chmod a+x build/curve/curve-tools/bin/snaptool + # curve-chunkserver + mkdir -p build/curve/curve-chunkserver/bin + cp $outdir/src/chunkserver/chunkserver \ + build/curve/curve-chunkserver/bin/curve-chunkserver + cp $outdir/src/tools/curve_chunkserver_tool \ + build/curve/curve-chunkserver/bin/curve_chunkserver_tool + cp $outdir/src/tools/curve_format \ + build/curve/curve-chunkserver/bin/curve-format + # curve-sdk + mkdir -p build/curve/curve-sdk/curvefs + mkdir -p build/curve/curve-sdk/bin + mkdir -p build/curve/curve-sdk/lib + mkdir -p build/curve/curve-sdk/include + cp $outdir/curvefs_python/libcurvefs.so \ + build/curve/curve-sdk/curvefs/_curvefs.so + cp curvefs_python/curvefs.py build/curve/curve-sdk/curvefs/curvefs.py + cp curvefs_python/__init__.py build/curve/curve-sdk/curvefs/__init__.py + cp curvefs_python/curvefs_tool.py build/curve/curve-sdk/curvefs/curvefs_tool.py + cp curvefs_python/parser.py build/curve/curve-sdk/curvefs/parser.py + cp curvefs_python/curve build/curve/curve-sdk/bin/curve + chmod a+x build/curve/curve-sdk/bin/curve + cp curvefs_python/tmplib/* build/curve/curve-sdk/lib/ + cp include/client/libcurve.h build/curve/curve-sdk/include + cp include/client/libcbd.h build/curve/curve-sdk/include + cp include/client/libcurve_define.h build/curve/curve-sdk/include + # curve-snapshotcloneserver + mkdir -p build/curve/curve-snapshotcloneserver/bin + cp $outdir/src/snapshotcloneserver/snapshotcloneserver \ + build/curve/curve-snapshotcloneserver/bin/curve-snapshotcloneserver + mkdir -p build/curve/curve-snapshotcloneserver/lib + cp thirdparties/etcdclient/libetcdclient.so \ + build/curve/curve-snapshotcloneserver/lib/libetcdclient.so + # curve-nginx + mkdir -p build/curve/curve-nginx/app/etc + mkdir -p build/curve/curve-nginx/conf + # ansible + cp -r curve-ansible build/curve/ + # README + + # curve-monitor + mkdir -p build/curve-monitor + cp -r monitor/* build/curve-monitor/ + echo "end copy" + + # step 4.1 prepare for nebd-package + mkdir -p build/nebd-package/include/nebd + mkdir -p build/nebd-package/bin + mkdir -p build/nebd-package/lib/nebd + + for i in $(find $outdir/ | grep -w so | grep -v solib | grep -v params | grep -v test | grep -v fake); do + cp -f $i build/nebd-package/lib/nebd + done + + cp nebd/src/part1/libnebd.h build/nebd-package/include/nebd + cp $outdir/nebd/src/part2/nebd-server build/nebd-package/bin + + # step 4.2 prepare for curve-nbd package + mkdir -p build/nbd-package/bin + mkdir -p build/nbd-package/etc + cp $outdir/nbd/src/curve-nbd build/nbd-package/bin + cp nbd/nbd-package/usr/bin/map_curve_disk.sh build/nbd-package/bin + cp nbd/nbd-package/etc/curve/curvetab build/nbd-package/etc + cp nbd/nbd-package/etc/systemd/system/map_curve_disk.service build/nbd-package/etc + + # step5 package tar + echo "start make tarball" + cd ${dir}/build + curve_name="curve_${curve_version}.tar.gz" + echo "curve_name: ${curve_name}" + tar zcf ${curve_name} curve + cp ${curve_name} $dir + monitor_name="curve-monitor_${curve_version}.tar.gz" + echo "monitor_name: ${monitor_name}" + tar zcf ${monitor_name} curve-monitor + cp ${monitor_name} $dir + nebd_name="nebd_${curve_version}.tar.gz" + echo "nebd_name: ${nebd_name}" + tar zcf ${nebd_name} nebd-package + cp ${nebd_name} $dir + nbd_name="nbd_${curve_version}.tar.gz" + echo "nbd_name: ${nbd_name}" + tar zcf ${nbd_name} nbd-package + cp ${nbd_name} $dir + echo "end make tarball" + cd $OLDPWD +} + +if [ "$1" == "tar" ]; then + build_tar +elif [ "$1" == "deb" ]; then + build_deb +else + echo "Usage: $0 " 1>&2 + exit 1 +fi + +# step7 package python wheel +mkdir -p ./build/py_deps_libs +cp -rf ./curvefs_python/tmplib/* ./build/py_deps_libs/ +cp -rf ./build/py_deps_libs/* ./curvefs_python/tmplib/ +sudo docker run \ + -it --rm \ + -w /curve \ + -v $(pwd):/curve \ + ${g_docker_opts[@]} \ + -e RELEASE=${RELEASE:-0} \ + -e DEP=${DEP:-0} \ + -e CREATE_PY_WHEEL=1 \ + opencurvedocker/curve-base:build-${OS:-debian11} \ + $0 diff --git a/util/playground.sh b/util/playground.sh old mode 100644 new mode 100755 diff --git a/util/servicectl.sh b/util/servicectl.sh old mode 100644 new mode 100755 diff --git a/util/test.sh b/util/test.sh old mode 100644 new mode 100755 diff --git a/util/ut_in_image.sh b/util/ut_in_image.sh old mode 100644 new mode 100755 index 56cecaceb8..ee2010fe28 --- a/util/ut_in_image.sh +++ b/util/ut_in_image.sh @@ -1,4 +1,27 @@ #!/bin/bash +get_options() { + local args=`getopt -o S --long sanitizer: -n "$0" -- "$@"` + eval set -- "${args}" + while true + do + case "$1" in + -S|--sanitizer) + g_san=$2 + shift 2 + ;; + --) + shift + break + ;; + *) + exit 1 + ;; + esac + done +} + +get_options "$@" + WORKSPACE="/var/lib/jenkins/workspace/curve/curve_multijob/" sudo mkdir -p /var/lib/jenkins/log/curve_unittest/$BUILD_NUMBER git config --global --add safe.directory /var/lib/jenkins/workspace/curve/curve_multijob @@ -68,10 +91,10 @@ set -e #test_bin_dirs="bazel-bin/test/ bazel-bin/nebd/test/ bazel-bin/curvefs/test/" if [ $1 == "curvebs" ];then -make ci-build stor=bs ci=1 dep=1 +make ci-build stor=bs ci=1 dep=1 sanitizer=$g_san test_bin_dirs="bazel-bin/test/ bazel-bin/nebd/test/" elif [ $1 == "curvefs" ];then -make ci-build stor=fs ci=1 dep=1 only=curvefs/test/* +make ci-build stor=fs ci=1 dep=1 only=curvefs/test/* sanitizer=$g_san test_bin_dirs="bazel-bin/curvefs/test/" fi echo $test_bin_dirs @@ -79,12 +102,14 @@ echo $test_bin_dirs for i in 0 1 2 3; do mkdir -p $i/{copysets,recycler}; done +exclude_test_names="snapshot-server|snapshot_dummy_server|client-test|server-test|multi|topology_dummy|curve_client_workflow|curve_fake_mds|curve_tool_test" + # run all unittests background -for i in `find ${test_bin_dirs} -type f -executable -exec file -i '{}' \; | grep -E 'executable|sharedlib' | grep "charset=binary" | grep -v ".so"|grep test | grep -Ev 'snapshot-server|snapshot_dummy_server|client-test|server-test|multi|topology_dummy|curve_client_workflow|curve_fake_mds' | awk -F":" '{print $1}' | sed -n '1,40p' ` ;do sudo $i 2>&1 | tee $i.log & done +for i in `find ${test_bin_dirs} -type f -executable -exec file -i '{}' \; | grep -E 'executable|sharedlib' | grep "charset=binary" | grep -v ".so"|grep test | grep -Ev $exclude_test_names | awk -F":" '{print $1}' | sed -n '1,40p' ` ;do sudo $i 2>&1 | tee $i.log & done if [ $1 == "curvebs" ];then sleep 360 fi -for i in `find ${test_bin_dirs} -type f -executable -exec file -i '{}' \; | grep -E 'executable|sharedlib' | grep "charset=binary" | grep -v ".so"|grep test | grep -Ev 'snapshot-server|snapshot_dummy_server|client-test|server-test|multi|topology_dummy|curve_client_workflow|curve_fake_mds' | awk -F":" '{print $1}' | sed -n '41,$p' ` ;do sudo $i 2>&1 | tee $i.log & done +for i in `find ${test_bin_dirs} -type f -executable -exec file -i '{}' \; | grep -E 'executable|sharedlib' | grep "charset=binary" | grep -v ".so"|grep test | grep -Ev $exclude_test_names | awk -F":" '{print $1}' | sed -n '41,$p' ` ;do sudo $i 2>&1 | tee $i.log & done count=2 @@ -108,8 +133,8 @@ do now_test=`ps -ef | grep test | grep -v 'test[0-9]' | grep -v grep | awk '{print $8}'` echo "now_test case is "$now_test - for i in `find ${test_bin_dirs} -type f -executable -exec file -i '{}' \; | grep -E 'executable|sharedlib' | grep "charset=binary" | grep -v ".so"|grep test | grep -Ev 'snapshot-server|snapshot_dummy_server|client-test|server-test|multi|topology_dummy|curve_client_workflow|curve_client_workflow|curve_fake_mds' | awk -F":" '{print $1'}`;do a=`cat $i.log | grep "FAILED ]" | wc -l`;if [ $a -gt 0 ];then f1=`cat $i.log | grep "FAILED ]"`;f1_file="${i}.log"; echo "fail test is $i"; check=1; fi;done - for i in `find ${test_bin_dirs} -type f -executable -exec file -i '{}' \; | grep -E 'executable|sharedlib' | grep "charset=binary" | grep -v ".so"|grep test | grep -Ev 'snapshot-server|snapshot_dummy_server|client-test|server-test|multi|topology_dummy|curve_client_workflow|curve_client_workflow|curve_fake_mds' | awk -F":" '{print $1'}`;do b=`cat $i.log | grep "Failure" | wc -l`;if [ $b -gt 0 ];then f2=`cat $i.log | grep "Failure"`; f2_file="${i}.log";echo "fail test is $i"; check=1; fi;done + for i in `find ${test_bin_dirs} -type f -executable -exec file -i '{}' \; | grep -E 'executable|sharedlib' | grep "charset=binary" | grep -v ".so"|grep test | grep -Ev $exclude_test_names | awk -F":" '{print $1'}`;do a=`cat $i.log | grep "FAILED ]" | wc -l`;if [ $a -gt 0 ];then f1=`cat $i.log | grep "FAILED ]"`;f1_file="${i}.log"; echo "fail test is $i"; check=1; fi;done + for i in `find ${test_bin_dirs} -type f -executable -exec file -i '{}' \; | grep -E 'executable|sharedlib' | grep "charset=binary" | grep -v ".so"|grep test | grep -Ev $exclude_test_names | awk -F":" '{print $1'}`;do b=`cat $i.log | grep "Failure" | wc -l`;if [ $b -gt 0 ];then f2=`cat $i.log | grep "Failure"`; f2_file="${i}.log";echo "fail test is $i"; check=1; fi;done if [ $check -eq 1 ];then echo "=========================test fail,Here is the logs of failed use cases=========================" echo "=========================test fail,Here is the logs of failed use cases=========================" @@ -142,10 +167,10 @@ tar xvf ci.tar.gz if [ $1 == "curvebs" ];then ./gen-coverage_bs.py -./check_coverage.sh "curvebs" +${WORKSPACE}/scripts/ci/check_coverage.sh "curvebs" elif [ $1 == "curvefs" ];then ./gen-coverage_fs.py -./check_coverage.sh "curvefs" +${WORKSPACE}/scripts/ci/check_coverage.sh "curvefs" fi cp -r coverage ${WORKSPACE} if [ $1 == "curvebs" ];then