diff --git a/.bazelrc b/.bazelrc index 66d96a61b5..bd9e2fd766 100644 --- a/.bazelrc +++ b/.bazelrc @@ -1,7 +1,10 @@ +build --verbose_failures + build --define=with_glog=true --define=libunwind=true build --copt -DHAVE_ZLIB=1 --copt -DGFLAGS_NS=google --copt -DUSE_BTHREAD_MUTEX build --cxxopt -Wno-error=format-security build:gcc7-later --cxxopt -faligned-new build --incompatible_blacklisted_protos_requires_proto_info=false build --copt=-fdiagnostics-color=always -run --copt=-fdiagnostics-color=always \ No newline at end of file + +run --copt=-fdiagnostics-color=always diff --git a/.gitignore b/.gitignore index 60718543d4..1a66484ca7 100755 --- a/.gitignore +++ b/.gitignore @@ -147,3 +147,22 @@ tools-v2/proto/curvefs/* tools-v2/*/*.test tools-v2/__debug_bin tools-v2/vendor/ + +.test +.note +.playground +.dumpfile +metastore_test.dat +GPATH +GRTAGS +GTAGS +core.* + +test/integration/*.conf +test/integration/client/config/client.conf* +test/integration/snapshotcloneserver/config/*.conf + +.pre-commit-config.yaml + +*.deb +*.whl diff --git a/.obm.cfg b/.obm.cfg new file mode 100644 index 0000000000..93bee71c49 --- /dev/null +++ b/.obm.cfg @@ -0,0 +1,2 @@ +container_name: curve-build-playground-master +container_image: opencurvedocker/curve-base:build-debian9 diff --git a/Makefile b/Makefile index a2d112ce41..7204e6347e 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ # Copyright (C) 2021 Jingli Chen (Wine93), NetEase Inc. -.PHONY: list build dep install image +.PHONY: list build dep install image playground check test stor?="" prefix?= "$(PWD)/projects" @@ -70,3 +70,12 @@ install: image: @bash util/image.sh $(stor) $(tag) $(os) + +playground: + @bash util/playground.sh + +check: + @bash util/check.sh $(stor) + +test: + @bash util/test.sh $(stor) $(only) diff --git a/README.md b/README.md index c5ba925edb..1405e0be98 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,8 @@ **A cloud-native distributed storage system** +**A sandbox project hosted by the CNCF Foundation** + #### English | [简体中文](README_cn.md) ### 📄 [Documents](https://github.com/opencurve/curve/tree/master/docs) || 🌐 [Official Website](https://www.opencurve.io/Curve/HOME) || 🏠 [Forum](https://ask.opencurve.io/t/topic/7)
@@ -156,7 +158,7 @@ Curve supports deployment in private and public cloud environments, and can also
-One of them, CurveFS shared file storage system, can be elasticly scaled to public cloud storage, which can provide users with greater capacity elasticity, lower cost, and better performance experience. +One of them, CurveFS shared file storage system, can be elastically scaled to public cloud storage, which can provide users with greater capacity elasticity, lower cost, and better performance experience. @@ -223,6 +225,7 @@ Please refer to the [Test environment configuration](docs/cn/测试环境配置 ## Practical - [CurveBS+NFS Build NFS Server](docs/practical/curvebs_nfs.md) +- [CurveFS+MinIO S3 Gateway](https://github.com/opencurve/curve-meetup-slides/blob/main/PrePaper/2023/%E6%94%AF%E6%8C%81POSIX%E5%92%8CS3%E7%BB%9F%E4%B8%80%E5%91%BD%E5%90%8D%E7%A9%BA%E9%97%B4%E2%80%94%E2%80%94Curve%E6%96%87%E4%BB%B6%E7%B3%BB%E7%BB%9FS3%E7%BD%91%E5%85%B3%E9%83%A8%E7%BD%B2%E5%AE%9E%E8%B7%B5.md) ## Governance See [Governance](https://github.com/opencurve/community/blob/master/GOVERNANCE.md). diff --git a/README_cn.md b/README_cn.md index b9aa22b211..12d4140388 100644 --- a/README_cn.md +++ b/README_cn.md @@ -4,7 +4,9 @@
-**A cloud-native distributed storage system** +**云原生高性能分布式存储系统** + +**CNCF基金会的沙箱托管项目** #### [English](README.md) | 简体中文 ### 📄 [文档](https://github.com/opencurve/curve/tree/master/docs) || 🌐 [官网](https://www.opencurve.io/Curve/HOME) || 🏠 [论坛](https://ask.opencurve.io/t/topic/7) @@ -225,6 +227,7 @@ $ ./fio --thread --rw=randwrite --bs=4k --ioengine=nebd --nebd=cbd:pool//pfstest ## 最佳实践 - [CurveBS+NFS搭建NFS存储](docs/practical/curvebs_nfs.md) +- [CurveFS+S3网关部署实践](https://github.com/opencurve/curve-meetup-slides/blob/main/PrePaper/2023/%E6%94%AF%E6%8C%81POSIX%E5%92%8CS3%E7%BB%9F%E4%B8%80%E5%91%BD%E5%90%8D%E7%A9%BA%E9%97%B4%E2%80%94%E2%80%94Curve%E6%96%87%E4%BB%B6%E7%B3%BB%E7%BB%9FS3%E7%BD%91%E5%85%B3%E9%83%A8%E7%BD%B2%E5%AE%9E%E8%B7%B5.md) ## 行为守则 Curve 的行为守则遵循[CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md)。 diff --git a/WORKSPACE b/WORKSPACE index 5519360fa9..27eed28365 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -228,6 +228,24 @@ http_archive( sha256 = "59b862f50e710277f8ede96f083a5bb8d7c9595376146838b9580be90374ee1f", ) +# fmt +http_archive( + name = "fmt", + url = "https://github.com/fmtlib/fmt/archive/9.1.0.tar.gz", + sha256 = "5dea48d1fcddc3ec571ce2058e13910a0d4a6bab4cc09a809d8b1dd1c88ae6f2", + strip_prefix = "fmt-9.1.0", + build_file = "//:thirdparties/fmt.BUILD", +) + +# spdlog +http_archive( + name = "spdlog", + urls = ["https://github.com/gabime/spdlog/archive/refs/tags/v1.11.0.tar.gz"], + strip_prefix = "spdlog-1.11.0", + sha256 = "ca5cae8d6cac15dae0ec63b21d6ad3530070650f68076f3a4a862ca293a858bb", + build_file = "//:thirdparties/spdlog.BUILD", +) + # Bazel platform rules. http_archive( name = "platforms", @@ -248,14 +266,14 @@ new_local_repository( http_archive( name = "hedron_compile_commands", - # Replace the commit hash in both places (below) with the latest, rather than using the stale one here. + # Replace the commit hash in both places (below) with the latest, rather than using the stale one here. # Even better, set up Renovate and let it do the work for you (see "Suggestion: Updates" in the README). urls = [ "https://curve-build.nos-eastchina1.126.net/bazel-compile-commands-extractor-af9af15f7bc16fc3e407e2231abfcb62907d258f.tar.gz", "https://github.com/hedronvision/bazel-compile-commands-extractor/archive/af9af15f7bc16fc3e407e2231abfcb62907d258f.tar.gz", ], strip_prefix = "bazel-compile-commands-extractor-af9af15f7bc16fc3e407e2231abfcb62907d258f", - # When you first run this tool, it'll recommend a sha256 hash to put here with a message like: "DEBUG: Rule 'hedron_compile_commands' indicated that a canonical reproducible form can be obtained by modifying arguments sha256 = ..." + # When you first run this tool, it'll recommend a sha256 hash to put here with a message like: "DEBUG: Rule 'hedron_compile_commands' indicated that a canonical reproducible form can be obtained by modifying arguments sha256 = ..." ) load("@hedron_compile_commands//:workspace_setup.bzl", "hedron_compile_commands_setup") hedron_compile_commands_setup() diff --git a/buildfs.sh b/buildfs.sh index b34d1b86dc..ddde2cc641 100755 --- a/buildfs.sh +++ b/buildfs.sh @@ -7,12 +7,22 @@ then exit fi +if [ `gcc -dumpversion | awk -F'.' '{print $1}'` -le 6 ] +then + bazelflags='' +else + bazelflags='--copt -faligned-new' +fi + if [ "$1" = "debug" ] then DEBUG_FLAG="--compilation_mode=dbg" fi -bazel build curvefs/... --copt -DHAVE_ZLIB=1 ${DEBUG_FLAG} -s --define=with_glog=true --define=libunwind=true --copt -DGFLAGS_NS=google --copt -Wno-error=format-security --copt -DUSE_BTHREAD_MUTEX --copt -DCURVEVERSION=${curve_version} --linkopt -L/usr/local/lib +bazel build curvefs/... --copt -DHAVE_ZLIB=1 ${DEBUG_FLAG} -s \ +--define=with_glog=true --define=libunwind=true --copt -DGFLAGS_NS=google --copt -Wno-error=format-security --copt \ +-DUSE_BTHREAD_MUTEX --copt -DCURVEVERSION=${curve_version} --linkopt -L/usr/local/lib ${bazelflags} + if [ $? -ne 0 ] then echo "build curvefs failed" @@ -34,4 +44,5 @@ then echo "mds_test failed" exit fi -fi \ No newline at end of file +fi +echo "end compile" diff --git a/conf/mds.conf b/conf/mds.conf index f02634ffe5..6d3c54c825 100644 --- a/conf/mds.conf +++ b/conf/mds.conf @@ -156,7 +156,7 @@ mds.topology.CreateCopysetRpcRetryTimes=20 # 请求chunkserver上创建copyset重试间隔 mds.topology.CreateCopysetRpcRetrySleepTimeMs=1000 # Topology模块刷新metric时间间隔 -mds.topology.UpdateMetricIntervalSec=60 +mds.topology.UpdateMetricIntervalSec=10 #和mds.chunkserver.failure.tolerance设置有关,一个zone 标准配置20台节点,如果允许3台节点failover, #那么剩余17台机器需要承载原先20台机器的空间,17/20=0.85,即使用量超过这个值即不再往这个池分配, #具体分为来两种情况, 当不使用chunkfilepool,物理池限制使用百分比,当使用 chunkfilepool 进行chunkfilepool分配时需预留failover空间, @@ -237,3 +237,16 @@ mds.throttle.iopsPerGB=30 mds.throttle.bpsMinInMB=120 mds.throttle.bpsMaxInMB=260 mds.throttle.bpsPerGBInMB=0.3 + +# +## poolset rules +# +# for backward compatibility, rules are applied for select poolset when creating file +# +# for example +# mds.poolset.rules=/dir1/:poolset1;/dir2/:poolset2;/dir1/sub/:sub +# +# when creating file reqeust doesn't have poolset, above rules are used to select poolset +# - if filename is /dir1/file, then poolset1 is select +# - if filename is /dir1/sub/file, then sub is select +mds.poolset.rules= diff --git a/curvefs/conf/client.conf b/curvefs/conf/client.conf index 01dc2a46ee..d5c1a4f0fd 100644 --- a/curvefs/conf/client.conf +++ b/curvefs/conf/client.conf @@ -77,17 +77,7 @@ rpc.healthCheckIntervalSec=0 #### fuseClient # TODO(xuchaojie): add unit -fuseClient.attrTimeOut=1.0 -fuseClient.entryTimeOut=1.0 fuseClient.listDentryLimit=65536 -fuseClient.flushPeriodSec=5 -fuseClient.maxNameLength=255 -fuseClient.iCacheLruSize=65536 -fuseClient.dCacheLruSize=1000000 -fuseClient.enableICacheMetrics=true -fuseClient.enableDCacheMetrics=true -fuseClient.lruTimeOutSec=60 -fuseClient.cto=true fuseClient.downloadMaxRetryTimes=3 ### kvcache opt @@ -114,6 +104,61 @@ fuseClient.maxDataSize=1024 fuseClient.refreshDataIntervalSec=30 fuseClient.warmupThreadsNum=10 +# the write throttle bps of fuseClient, default no limit +fuseClient.throttle.avgWriteBytes=0 +# the write burst bps of fuseClient, default no limit +fuseClient.throttle.burstWriteBytes=0 +# the times that write burst bps can continue, default 180s +fuseClient.throttle.burstWriteBytesSecs=180 + +# the write throttle iops of fuseClient, default no limit +fuseClient.throttle.avgWriteIops=0 +# the write burst iops of fuseClient, default no limit +fuseClient.throttle.burstWriteIops=0 +# the times that write burst Iops can continue, default 180s +fuseClient.throttle.burstWriteIopsSecs=180 + +# the read throttle bps of fuseClient, default no limit +fuseClient.throttle.avgReadBytes=0 +# the read burst bps of fuseClient, default no limit +fuseClient.throttle.burstReadBytes=0 +# the times that read burst bps can continue, default 180s +fuseClient.throttle.burstReadBytesSecs=180 + +# the read throttle iops of fuseClient, default no limit +fuseClient.throttle.avgReadIops=0 +# the read burst Iops of fuseClient, default no limit +fuseClient.throttle.burstReadIops=0 +# the times that read burst Iops can continue, default 180s +fuseClient.throttle.burstReadIopsSecs=180 + +#### filesystem metadata +# { +# fs.disableXattr: +# if you want to get curvefs specified xattr, +# you can mount another fs with |fs.disableXattr| is true +# +# fs.lookupCache.negativeTimeoutSec: +# entry which not found will be cached if |timeout| > 0 +fs.cto=true +fs.maxNameLength=255 +fs.disableXattr=true +fs.accessLogging=true +fs.kernelCache.attrTimeoutSec=3600 +fs.kernelCache.dirAttrTimeoutSec=3600 +fs.kernelCache.entryTimeoutSec=3600 +fs.kernelCache.dirEntryTimeoutSec=3600 +fs.lookupCache.negativeTimeoutSec=0 +fs.lookupCache.minUses=1 +fs.lookupCache.lruSize=100000 +fs.dirCache.lruSize=5000000 +fs.openFile.lruSize=65536 +fs.attrWatcher.lruSize=5000000 +fs.rpc.listDentryLimit=65536 +fs.deferSync.delay=3 +fs.deferSync.deferDirMtime=false +# } + #### volume volume.bigFileSize=1048576 volume.volBlockSize=4096 @@ -135,8 +180,6 @@ volume.blockGroup.allocateOnce=4 #### s3 # this is for test. if s3.fakeS3=true, all data will be discarded s3.fakeS3=false -# the max size that fuse send -s3.fuseMaxSize=131072 s3.pageSize=65536 # prefetch blocks that disk cache use s3.prefetchBlocks=1 @@ -151,8 +194,11 @@ s3.baseSleepUs=500 s3.threadScheduleInterval=3 # data cache flush wait time s3.cacheFlushIntervalSec=5 +# write cache < 8,388,608 (8MB) is not allowed s3.writeCacheMaxByte=838860800 s3.readCacheMaxByte=209715200 +# file cache read thread num +s3.readCacheThreads=5 # http = 0, https = 1 s3.http_scheme=0 s3.verify_SSL=False diff --git a/curvefs/conf/metaserver.conf b/curvefs/conf/metaserver.conf index 3c413f6164..9c158f0a89 100644 --- a/curvefs/conf/metaserver.conf +++ b/curvefs/conf/metaserver.conf @@ -128,16 +128,25 @@ copyset.trash.scan_periodsec=120 # this config item should be tuned according cpu/memory/disk service.max_inflight_request=5000 -### apply queue options for each copyset -### apply queue is used to isolate raft threads, each worker has its own queue -### when a task can be applied it's been pushed into a corresponding worker queue by certain rules -# number of apply queue workers for each, each worker will start a indepent thread -applyqueue.worker_count=3 +# +# Concurrent apply queue +### concurrent apply queue options for each copyset +### concurrent apply queue is used to isolate raft threads, each worker has its own queue +### when a task can be applied it's been pushed into a corresponding read/write worker queue by certain rules -# apply queue depth for each copyset +# worker_count: number of apply queue workers for each, each worker will start a indepent thread +# queue_depth: apply queue depth for each copyset # all tasks in queue must be done when do raft snapshot, and raft apply and raft snapshot are executed in same thread # so, if queue depth is too large, it will cause other tasks to wait too long for apply -applyqueue.queue_depth=1 +# write apply queue workers count +applyqueue.write_worker_count=3 +# write apply queue depth +applyqueue.write_queue_depth=1 +# read apply queue workers count +applyqueue.read_worker_count=2 +# read apply queue depth +applyqueue.read_queue_depth=1 + # number of worker threads that created by brpc::Server # if set to |auto|, threads create by brpc::Server is equal to `getconf _NPROCESSORS_ONLN` + 1 diff --git a/curvefs/conf/tools.conf b/curvefs/conf/tools.conf index 52cbf70590..d911b3e67b 100644 --- a/curvefs/conf/tools.conf +++ b/curvefs/conf/tools.conf @@ -35,6 +35,9 @@ s3.bucket_name=bucket s3.blocksize=4194304 s3.chunksize=67108864 s3.useVirtualAddressing=false +# s3 objectPrefix, if set 0, means no prefix, if set 1, means inode prefix +# if set 2 and other values mean hash prefix +s3.objectPrefix=0 # statistic info in xattr, hardlink will not be supported when enable enableSumInDir=true diff --git a/curvefs/docker/debian9/Dockerfile b/curvefs/docker/debian9/Dockerfile index 166fec558c..1c61148e82 100644 --- a/curvefs/docker/debian9/Dockerfile +++ b/curvefs/docker/debian9/Dockerfile @@ -4,5 +4,5 @@ COPY curvefs /curvefs COPY libmemcached.so libmemcached.so.11 libhashkit.so.2 /usr/lib/ RUN mkdir -p /etc/curvefs /core /etc/curve && chmod a+x /entrypoint.sh \ && cp /curvefs/tools/sbin/curvefs_tool /usr/bin \ - && cp curvefs/tools-v2/sbin/curve /usr/bin/ + && cp /curvefs/tools-v2/sbin/curve /usr/bin/ ENTRYPOINT ["/entrypoint.sh"] diff --git a/curvefs/docker/debian9/entrypoint.sh b/curvefs/docker/debian9/entrypoint.sh index e95e09ed6d..0ca397dace 100755 --- a/curvefs/docker/debian9/entrypoint.sh +++ b/curvefs/docker/debian9/entrypoint.sh @@ -8,6 +8,7 @@ g_args="" g_prefix="" g_binary="" g_start_args="" +g_preexec="/curvefs/tools-v2/sbin/daemon" ############################ BASIC FUNCTIONS function msg() { @@ -119,6 +120,7 @@ function main() { prepare create_directory [[ $(command -v crontab) ]] && cron + [[ ! -z $g_preexec ]] && $g_preexec & if [ $g_role == "etcd" ]; then exec $g_binary $g_start_args >>$g_prefix/logs/etcd.log 2>&1 elif [ $g_role == "monitor" ]; then diff --git a/curvefs/monitor/grafana/provisioning/dashboards/client.json b/curvefs/monitor/grafana/provisioning/dashboards/client.json index bda858e2e5..a160f44f88 100644 --- a/curvefs/monitor/grafana/provisioning/dashboards/client.json +++ b/curvefs/monitor/grafana/provisioning/dashboards/client.json @@ -3,7 +3,10 @@ "list": [ { "builtIn": 1, - "datasource": "-- Grafana --", + "datasource": { + "type": "datasource", + "uid": "grafana" + }, "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", @@ -20,15 +23,17 @@ }, "description": "Curvefs client", "editable": true, - "gnetId": null, + "fiscalYearStartMonth": 0, "graphTooltip": 0, - "id": 3, - "iteration": 1657691847424, "links": [], + "liveNow": false, "panels": [ { - "collapsed": false, - "datasource": null, + "collapsed": true, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "gridPos": { "h": 1, "w": 24, @@ -36,257 +41,15 @@ "y": 0 }, "id": 6, - "panels": [], - "title": "process usage", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 1 - }, - "hiddenSeries": false, - "id": 4, - "interval": "1s", - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "max": true, - "min": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "8.0.6", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus" - }, - "exemplar": true, - "expr": "process_memory_resident{instance=~\"$instance\"}", - "interval": "", - "legendFormat": "process_memory_resident {{instance}}", - "refId": "process_memory_resident" - }, - { - "datasource": { - "type": "prometheus" - }, - "exemplar": true, - "expr": "process_memory_virtual{instance=~\"$instance\"}", - "hide": false, - "interval": "", - "legendFormat": "process_memory_virtual {{instance}}", - "refId": "process_memory_virtual" - }, - { - "datasource": { - "type": "prometheus" - }, - "exemplar": true, - "expr": "process_memory_shared{instance=~\"$instance\"}", - "hide": false, - "interval": "", - "legendFormat": "process_memory_shared {{instance}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "process memory usage", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:63", - "format": "decbytes", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:64", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 1 - }, - "hiddenSeries": false, - "id": 2, - "interval": "1s", - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "max": true, - "min": true, - "rightSide": false, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "8.0.6", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus" - }, - "exemplar": true, - "expr": "process_cpu_usage{instance=~\"$instance\"}", - "interval": "", - "legendFormat": "process_cpu_usage {{instance}}", - "refId": "process_cpu_usage" - }, - { - "datasource": { - "type": "prometheus" - }, - "exemplar": true, - "expr": "process_cpu_usage_system{instance=~\"$instance\"}", - "hide": false, - "interval": "", - "legendFormat": "process_cpu_usage_system {{instance}}", - "refId": "process_cpu_usage_system" - }, - { - "datasource": { - "type": "prometheus" - }, - "exemplar": true, - "expr": "process_cpu_usage_user{instance=~\"$instance\"}", - "hide": false, - "interval": "", - "legendFormat": "process_cpu_usage_user {{instance}}", - "refId": "process_cpu_usage_user" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "process cpu usage", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:495", - "format": "percentunit", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:496", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "collapsed": true, - "datasource": null, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 9 - }, - "id": 85, "panels": [ { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, - "fieldConfig": { - "defaults": { - "unit": "µs" - }, - "overrides": [] + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "fill": 1, "fillGradient": 0, @@ -294,10 +57,10 @@ "h": 8, "w": 12, "x": 0, - "y": 10 + "y": 1 }, "hiddenSeries": false, - "id": 107, + "id": 4, "interval": "1s", "legend": { "alignAsTable": true, @@ -316,7 +79,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "9.4.3", "pointradius": 2, "points": false, "renderer": "flot", @@ -326,18 +89,44 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_read_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "process_memory_resident{instance=~\"$instance\"}", "interval": "", - "legendFormat": "", + "legendFormat": "process_memory_resident {{instance}}", + "refId": "process_memory_resident" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "exemplar": true, + "expr": "process_memory_virtual{instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "process_memory_virtual {{instance}}", + "refId": "process_memory_virtual" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "exemplar": true, + "expr": "process_memory_shared{instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "process_memory_shared {{instance}}", "refId": "A" } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, - "title": "read_latency", + "title": "process memory usage", "tooltip": { "shared": true, "sort": 0, @@ -345,33 +134,26 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { - "format": "µs", - "label": null, + "$$hashKey": "object:63", + "format": "decbytes", "logBase": 1, - "max": null, - "min": null, "show": true }, { + "$$hashKey": "object:64", "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -379,12 +161,9 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, - "fieldConfig": { - "defaults": { - "unit": "µs" - }, - "overrides": [] + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "fill": 1, "fillGradient": 0, @@ -392,10 +171,10 @@ "h": 8, "w": 12, "x": 12, - "y": 10 + "y": 1 }, "hiddenSeries": false, - "id": 108, + "id": 2, "interval": "1s", "legend": { "alignAsTable": true, @@ -403,6 +182,7 @@ "current": false, "max": true, "min": true, + "rightSide": false, "show": true, "total": false, "values": true @@ -414,7 +194,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "9.4.3", "pointradius": 2, "points": false, "renderer": "flot", @@ -424,18 +204,44 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_write_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "process_cpu_usage{instance=~\"$instance\"}", "interval": "", - "legendFormat": "", - "refId": "A" + "legendFormat": "process_cpu_usage {{instance}}", + "refId": "process_cpu_usage" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "exemplar": true, + "expr": "process_cpu_usage_system{instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "process_cpu_usage_system {{instance}}", + "refId": "process_cpu_usage_system" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "exemplar": true, + "expr": "process_cpu_usage_user{instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "process_cpu_usage_user {{instance}}", + "refId": "process_cpu_usage_user" } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, - "title": "write_latency", + "title": "process cpu usage", "tooltip": { "shared": true, "sort": 0, @@ -443,41 +249,64 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { - "format": "µs", - "label": null, + "$$hashKey": "object:495", + "format": "percentunit", "logBase": 1, - "max": null, - "min": null, "show": true }, { + "$$hashKey": "object:496", "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } - }, + } + ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "refId": "A" + } + ], + "title": "process usage", + "type": "row" + }, + { + "collapsed": true, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 1 + }, + "id": 85, + "panels": [ { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -490,10 +319,10 @@ "h": 8, "w": 12, "x": 0, - "y": 18 + "y": 2 }, "hiddenSeries": false, - "id": 87, + "id": 107, "interval": "1s", "legend": { "alignAsTable": true, @@ -512,7 +341,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "9.4.3", "pointradius": 2, "points": false, "renderer": "flot", @@ -522,18 +351,20 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_mk_dir_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_read_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, - "title": "mkdir_latency", + "title": "read_latency", "tooltip": { "shared": true, "sort": 0, @@ -541,33 +372,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "µs", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -575,7 +397,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -588,10 +413,10 @@ "h": 8, "w": 12, "x": 12, - "y": 18 + "y": 2 }, "hiddenSeries": false, - "id": 91, + "id": 108, "interval": "1s", "legend": { "alignAsTable": true, @@ -610,7 +435,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "9.4.3", "pointradius": 2, "points": false, "renderer": "flot", @@ -620,18 +445,20 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_rm_dir_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_write_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, - "title": "rmdir_latency", + "title": "write_latency", "tooltip": { "shared": true, "sort": 0, @@ -639,33 +466,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "µs", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -673,7 +491,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -686,10 +507,10 @@ "h": 8, "w": 12, "x": 0, - "y": 26 + "y": 10 }, "hiddenSeries": false, - "id": 88, + "id": 87, "interval": "1s", "legend": { "alignAsTable": true, @@ -708,7 +529,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "9.4.3", "pointradius": 2, "points": false, "renderer": "flot", @@ -718,18 +539,20 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_create_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_mk_dir_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, - "title": "create_latency", + "title": "mkdir_latency", "tooltip": { "shared": true, "sort": 0, @@ -737,33 +560,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "µs", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -771,7 +585,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -784,10 +601,10 @@ "h": 8, "w": 12, "x": 12, - "y": 26 + "y": 10 }, "hiddenSeries": false, - "id": 92, + "id": 91, "interval": "1s", "legend": { "alignAsTable": true, @@ -806,7 +623,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "9.4.3", "pointradius": 2, "points": false, "renderer": "flot", @@ -816,18 +633,20 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_unlink_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_rm_dir_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, - "title": "unlink_latency", + "title": "rmdir_latency", "tooltip": { "shared": true, "sort": 0, @@ -835,33 +654,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "µs", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -869,7 +679,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -882,10 +695,10 @@ "h": 8, "w": 12, "x": 0, - "y": 34 + "y": 18 }, "hiddenSeries": false, - "id": 89, + "id": 88, "interval": "1s", "legend": { "alignAsTable": true, @@ -904,7 +717,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "9.4.3", "pointradius": 2, "points": false, "renderer": "flot", @@ -914,18 +727,20 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_open_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_create_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, - "title": "open_latency", + "title": "create_latency", "tooltip": { "shared": true, "sort": 0, @@ -933,33 +748,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "µs", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -967,7 +773,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -980,10 +789,10 @@ "h": 8, "w": 12, "x": 12, - "y": 34 + "y": 18 }, "hiddenSeries": false, - "id": 90, + "id": 92, "interval": "1s", "legend": { "alignAsTable": true, @@ -1002,7 +811,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "9.4.3", "pointradius": 2, "points": false, "renderer": "flot", @@ -1012,18 +821,20 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_release_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_unlink_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, - "title": "release_latency", + "title": "unlink_latency", "tooltip": { "shared": true, "sort": 0, @@ -1031,33 +842,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "µs", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -1065,7 +867,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -1078,10 +883,10 @@ "h": 8, "w": 12, "x": 0, - "y": 42 + "y": 26 }, "hiddenSeries": false, - "id": 93, + "id": 89, "interval": "1s", "legend": { "alignAsTable": true, @@ -1100,7 +905,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "9.4.3", "pointradius": 2, "points": false, "renderer": "flot", @@ -1110,18 +915,20 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_open_dir_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_open_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, - "title": "opendir_latency", + "title": "open_latency", "tooltip": { "shared": true, "sort": 0, @@ -1129,33 +936,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "µs", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -1163,7 +961,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -1176,10 +977,10 @@ "h": 8, "w": 12, "x": 12, - "y": 42 + "y": 26 }, "hiddenSeries": false, - "id": 94, + "id": 90, "interval": "1s", "legend": { "alignAsTable": true, @@ -1198,7 +999,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.0.6", + "pluginVersion": "9.4.3", "pointradius": 2, "points": false, "renderer": "flot", @@ -1208,18 +1009,20 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_release_dir_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_release_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, - "title": "releasedir_latency", + "title": "release_latency", "tooltip": { "shared": true, "sort": 0, @@ -1227,33 +1030,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "µs", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -1261,7 +1055,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -1274,10 +1071,10 @@ "h": 8, "w": 12, "x": 0, - "y": 50 + "y": 34 }, "hiddenSeries": false, - "id": 99, + "id": 93, "interval": "1s", "legend": { "alignAsTable": true, @@ -1306,18 +1103,20 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_read_dir_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_open_dir_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, - "title": "readdir_latency", + "title": "opendir_latency", "tooltip": { "shared": true, "sort": 0, @@ -1325,33 +1124,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "µs", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -1359,7 +1149,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -1372,10 +1165,10 @@ "h": 8, "w": 12, "x": 12, - "y": 50 + "y": 34 }, "hiddenSeries": false, - "id": 100, + "id": 94, "interval": "1s", "legend": { "alignAsTable": true, @@ -1404,18 +1197,20 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_read_link_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_release_dir_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, - "title": "readlink_latency", + "title": "releasedir_latency", "tooltip": { "shared": true, "sort": 0, @@ -1423,33 +1218,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "µs", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -1457,7 +1243,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -1470,10 +1259,10 @@ "h": 8, "w": 12, "x": 0, - "y": 58 + "y": 42 }, "hiddenSeries": false, - "id": 95, + "id": 99, "interval": "1s", "legend": { "alignAsTable": true, @@ -1502,18 +1291,20 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_lookup_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_read_dir_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, - "title": "lookup_latency", + "title": "readdir_latency", "tooltip": { "shared": true, "sort": 0, @@ -1521,33 +1312,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "µs", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -1555,7 +1337,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -1568,10 +1353,10 @@ "h": 8, "w": 12, "x": 12, - "y": 58 + "y": 42 }, "hiddenSeries": false, - "id": 96, + "id": 100, "interval": "1s", "legend": { "alignAsTable": true, @@ -1600,18 +1385,20 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_mk_nod_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_read_link_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, - "title": "mknod_latency", + "title": "readlink_latency", "tooltip": { "shared": true, "sort": 0, @@ -1619,33 +1406,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "µs", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -1653,7 +1431,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -1666,10 +1447,10 @@ "h": 8, "w": 12, "x": 0, - "y": 66 + "y": 50 }, "hiddenSeries": false, - "id": 97, + "id": 95, "interval": "1s", "legend": { "alignAsTable": true, @@ -1698,18 +1479,20 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_link_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_lookup_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, - "title": "link_latency", + "title": "lookup_latency", "tooltip": { "shared": true, "sort": 0, @@ -1717,33 +1500,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "µs", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -1751,7 +1525,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -1764,10 +1541,10 @@ "h": 8, "w": 12, "x": 12, - "y": 66 + "y": 50 }, "hiddenSeries": false, - "id": 98, + "id": 96, "interval": "1s", "legend": { "alignAsTable": true, @@ -1796,18 +1573,20 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_symlink_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_mk_nod_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, - "title": "symlink_latency", + "title": "mknod_latency", "tooltip": { "shared": true, "sort": 0, @@ -1815,33 +1594,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "µs", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -1849,7 +1619,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -1862,10 +1635,10 @@ "h": 8, "w": 12, "x": 0, - "y": 74 + "y": 58 }, "hiddenSeries": false, - "id": 101, + "id": 97, "interval": "1s", "legend": { "alignAsTable": true, @@ -1894,18 +1667,20 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_get_attr_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_link_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, - "title": "getattr_latency", + "title": "link_latency", "tooltip": { "shared": true, "sort": 0, @@ -1913,33 +1688,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "µs", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -1947,7 +1713,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -1960,10 +1729,10 @@ "h": 8, "w": 12, "x": 12, - "y": 74 + "y": 58 }, "hiddenSeries": false, - "id": 102, + "id": 98, "interval": "1s", "legend": { "alignAsTable": true, @@ -1992,18 +1761,20 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_set_attr_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_symlink_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, - "title": "setattr_latency", + "title": "symlink_latency", "tooltip": { "shared": true, "sort": 0, @@ -2011,33 +1782,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "µs", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -2045,7 +1807,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -2058,10 +1823,10 @@ "h": 8, "w": 12, "x": 0, - "y": 82 + "y": 66 }, "hiddenSeries": false, - "id": 103, + "id": 101, "interval": "1s", "legend": { "alignAsTable": true, @@ -2090,18 +1855,20 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, - "expr": "{__name__=~\"curvefs_client_op_get_xattr_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "expr": "{__name__=~\"curvefs_client_op_get_attr_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "A" } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, - "title": "getxattr_latency", + "title": "getattr_latency", "tooltip": { "shared": true, "sort": 0, @@ -2109,33 +1876,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "µs", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -2143,7 +1901,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -2156,10 +1917,198 @@ "h": 8, "w": 12, "x": 12, - "y": 82 + "y": 66 }, "hiddenSeries": false, - "id": 104, + "id": 102, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.0.6", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "exemplar": true, + "expr": "{__name__=~\"curvefs_client_op_set_attr_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "setattr_latency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "µs", + "logBase": 1, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "unit": "µs" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 74 + }, + "hiddenSeries": false, + "id": 103, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.0.6", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "exemplar": true, + "expr": "{__name__=~\"curvefs_client_op_get_xattr_lat_[[quantile:regex]]\", instance=~\"$instance\"}", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "getxattr_latency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "µs", + "logBase": 1, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "unit": "µs" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 74 + }, + "hiddenSeries": false, + "id": 104, "interval": "1s", "legend": { "alignAsTable": true, @@ -2188,6 +2137,10 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "{__name__=~\"curvefs_client_op_list_xattr_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", @@ -2196,9 +2149,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "listxattr_latency", "tooltip": { "shared": true, @@ -2207,33 +2158,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "µs", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -2241,7 +2183,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -2254,7 +2199,7 @@ "h": 8, "w": 12, "x": 0, - "y": 90 + "y": 82 }, "hiddenSeries": false, "id": 105, @@ -2286,6 +2231,10 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "{__name__=~\"curvefs_client_op_fsync_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", @@ -2294,9 +2243,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "fsync_latency", "tooltip": { "shared": true, @@ -2305,33 +2252,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "µs", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -2339,7 +2277,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -2352,7 +2293,7 @@ "h": 8, "w": 12, "x": 12, - "y": 90 + "y": 82 }, "hiddenSeries": false, "id": 106, @@ -2384,6 +2325,10 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "{__name__=~\"curvefs_client_op_flush_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", @@ -2392,9 +2337,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "flush_latency", "tooltip": { "shared": true, @@ -2403,33 +2346,24 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "µs", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -2437,7 +2371,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -2450,7 +2387,7 @@ "h": 8, "w": 12, "x": 0, - "y": 98 + "y": 90 }, "hiddenSeries": false, "id": 109, @@ -2482,6 +2419,10 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "{__name__=~\"curvefs_client_op_rename_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "interval": "", @@ -2490,9 +2431,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "rename_latency", "tooltip": { "shared": true, @@ -2501,47 +2440,50 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "µs", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } } ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "refId": "A" + } + ], "title": "op latency", "type": "row" }, { "collapsed": true, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 10 + "y": 2 }, "id": 176, "panels": [ @@ -2550,7 +2492,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -2563,7 +2508,7 @@ "h": 8, "w": 24, "x": 0, - "y": 11 + "y": 43 }, "hiddenSeries": false, "id": 177, @@ -2595,6 +2540,10 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "{__name__=~\"curvefs_client_op_[[op:regex]]_inflight_num\", instance=~\"$instance\"}", "interval": "", @@ -2603,9 +2552,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "op _inflight_num", "tooltip": { "shared": true, @@ -2614,47 +2561,50 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } } ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "refId": "A" + } + ], "title": "op inflight number", "type": "row" }, { "collapsed": true, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 11 + "y": 3 }, "id": 47, "panels": [ @@ -2663,7 +2613,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -2676,7 +2629,7 @@ "h": 8, "w": 12, "x": 0, - "y": 12 + "y": 44 }, "hiddenSeries": false, "id": 48, @@ -2709,7 +2662,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_.*[[fs:regex]]_user_read_qps\", instance=~\"$instance\"}", @@ -2719,9 +2673,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "read_qps", "tooltip": { "shared": true, @@ -2730,9 +2682,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -2759,7 +2709,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -2772,7 +2725,7 @@ "h": 8, "w": 12, "x": 12, - "y": 12 + "y": 44 }, "hiddenSeries": false, "id": 49, @@ -2805,7 +2758,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_.*[[fs:regex]]_user_write_qps\", instance=~\"$instance\"}", @@ -2815,9 +2769,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "write_qps", "tooltip": { "shared": true, @@ -2826,9 +2778,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -2855,7 +2805,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "binBps" @@ -2868,7 +2821,7 @@ "h": 8, "w": 12, "x": 0, - "y": 20 + "y": 52 }, "hiddenSeries": false, "id": 50, @@ -2901,7 +2854,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs.*[[fs:regex]]_user_read_bps\", instance=~\"$instance\"}", @@ -2911,9 +2865,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "read_bps", "tooltip": { "shared": true, @@ -2922,9 +2874,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -2951,7 +2901,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "binBps" @@ -2964,7 +2917,7 @@ "h": 8, "w": 12, "x": 12, - "y": 20 + "y": 52 }, "hiddenSeries": false, "id": 51, @@ -2997,7 +2950,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs.*[[fs:regex]]_user_write_bps\", instance=~\"$instance\"}", @@ -3007,9 +2961,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "write_bps", "tooltip": { "shared": true, @@ -3018,9 +2970,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -3047,7 +2997,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -3060,7 +3013,7 @@ "h": 8, "w": 12, "x": 0, - "y": 28 + "y": 60 }, "hiddenSeries": false, "id": 52, @@ -3093,7 +3046,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs.*[[fs:regex]]_user_read_eps\", instance=~\"$instance\"}", @@ -3103,9 +3057,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "read_eps", "tooltip": { "shared": true, @@ -3114,9 +3066,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -3143,14 +3093,17 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 12, - "y": 28 + "y": 60 }, "hiddenSeries": false, "id": 53, @@ -3183,7 +3136,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs.*[[fs:regex]]_adaptor_write_eps\", instance=~\"$instance\"}", @@ -3193,9 +3147,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "write_eps", "tooltip": { "shared": true, @@ -3204,9 +3156,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -3233,7 +3183,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "reqps" @@ -3246,7 +3199,7 @@ "h": 8, "w": 12, "x": 0, - "y": 36 + "y": 68 }, "hiddenSeries": false, "id": 54, @@ -3279,7 +3232,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs.*[[fs:regex]]_user_read_rps\", instance=~\"$instance\"}", @@ -3289,9 +3243,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "read_rps", "tooltip": { "shared": true, @@ -3300,9 +3252,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -3329,7 +3279,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "reqps" @@ -3342,7 +3295,7 @@ "h": 8, "w": 12, "x": 12, - "y": 36 + "y": 68 }, "hiddenSeries": false, "id": 43, @@ -3375,7 +3328,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs.*[[fs:regex]]_user_write_rps\", instance=~\"$instance\"}", @@ -3385,9 +3339,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "write_rps", "tooltip": { "shared": true, @@ -3396,9 +3348,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -3425,7 +3375,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "bytes" @@ -3438,7 +3391,7 @@ "h": 8, "w": 12, "x": 0, - "y": 44 + "y": 76 }, "hiddenSeries": false, "id": 56, @@ -3471,7 +3424,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs.*[[fs:regex]]_user_read_io_size\", instance=~\"$instance\"}", @@ -3481,9 +3435,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "read_io_size", "tooltip": { "shared": true, @@ -3492,9 +3444,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -3521,7 +3471,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "bytes" @@ -3534,7 +3487,7 @@ "h": 8, "w": 12, "x": 12, - "y": 44 + "y": 76 }, "hiddenSeries": false, "id": 57, @@ -3567,7 +3520,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs.*[[fs:regex]]_user_write_io_size\", instance=~\"$instance\"}", @@ -3577,9 +3531,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "write_io_size", "tooltip": { "shared": true, @@ -3588,9 +3540,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -3617,7 +3567,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -3630,7 +3583,7 @@ "h": 8, "w": 12, "x": 0, - "y": 52 + "y": 84 }, "hiddenSeries": false, "id": 58, @@ -3662,6 +3615,10 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "{__name__=~\"curvefs.*[[fs:regex]]_user_read_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "hide": false, @@ -3671,9 +3628,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "read_latency", "tooltip": { "shared": true, @@ -3682,9 +3637,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -3711,7 +3664,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -3724,7 +3680,7 @@ "h": 8, "w": 12, "x": 12, - "y": 52 + "y": 84 }, "hiddenSeries": false, "id": 59, @@ -3756,6 +3712,10 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "{__name__=~\"curvefs.*[[fs:regex]]_user_write_lat_[[quantile:regex]]\", instance=~\"$instance\"}", "hide": false, @@ -3765,9 +3725,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "write_latency", "tooltip": { "shared": true, @@ -3776,9 +3734,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -3801,17 +3757,29 @@ } } ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "refId": "A" + } + ], "title": "client interface performance", "type": "row" }, { "collapsed": true, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 12 + "y": 4 }, "id": 8, "panels": [ @@ -3820,7 +3788,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -3833,7 +3804,7 @@ "h": 8, "w": 12, "x": 0, - "y": 13 + "y": 45 }, "hiddenSeries": false, "id": 10, @@ -3866,7 +3837,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_qps\", job=\"client\", instance=~\"$instance\"}", @@ -3876,9 +3848,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "read_qps", "tooltip": { "shared": true, @@ -3887,9 +3857,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -3916,7 +3884,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -3929,7 +3900,7 @@ "h": 8, "w": 12, "x": 12, - "y": 13 + "y": 45 }, "hiddenSeries": false, "id": 11, @@ -3962,7 +3933,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_qps\", instance=~\"$instance\"}", @@ -3972,9 +3944,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "write_qps", "tooltip": { "shared": true, @@ -3983,9 +3953,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -4012,7 +3980,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "binBps" @@ -4025,7 +3996,7 @@ "h": 8, "w": 12, "x": 0, - "y": 21 + "y": 53 }, "hiddenSeries": false, "id": 12, @@ -4058,7 +4029,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_bps\", instance=~\"$instance\"}", @@ -4068,9 +4040,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "read_bps", "tooltip": { "shared": true, @@ -4079,9 +4049,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -4108,7 +4076,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "binBps" @@ -4121,7 +4092,7 @@ "h": 8, "w": 12, "x": 12, - "y": 21 + "y": 53 }, "hiddenSeries": false, "id": 13, @@ -4154,7 +4125,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_bps\", instance=~\"$instance\"}", @@ -4164,9 +4136,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "write_bps", "tooltip": { "shared": true, @@ -4175,9 +4145,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -4204,7 +4172,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -4217,7 +4188,7 @@ "h": 8, "w": 12, "x": 0, - "y": 29 + "y": 61 }, "hiddenSeries": false, "id": 14, @@ -4250,7 +4221,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_eps\", instance=~\"$instance\"}", @@ -4260,9 +4232,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "read_eps", "tooltip": { "shared": true, @@ -4271,9 +4241,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -4300,14 +4268,17 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 12, - "y": 29 + "y": 61 }, "hiddenSeries": false, "id": 15, @@ -4340,7 +4311,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_eps\", instance=~\"$instance\"}", @@ -4350,9 +4322,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "write_eps", "tooltip": { "shared": true, @@ -4361,9 +4331,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -4390,7 +4358,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "reqps" @@ -4403,7 +4374,7 @@ "h": 8, "w": 12, "x": 0, - "y": 37 + "y": 69 }, "hiddenSeries": false, "id": 42, @@ -4436,7 +4407,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_rps\", instance=~\"$instance\"}", @@ -4446,9 +4418,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "read_rps", "tooltip": { "shared": true, @@ -4457,9 +4427,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -4486,7 +4454,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "reqps" @@ -4499,7 +4470,7 @@ "h": 8, "w": 12, "x": 12, - "y": 37 + "y": 69 }, "hiddenSeries": false, "id": 55, @@ -4532,7 +4503,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_rps\", instance=~\"$instance\"}", @@ -4542,9 +4514,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "write_rps", "tooltip": { "shared": true, @@ -4553,9 +4523,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -4582,7 +4550,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "bytes" @@ -4595,7 +4566,7 @@ "h": 8, "w": 12, "x": 0, - "y": 45 + "y": 77 }, "hiddenSeries": false, "id": 60, @@ -4628,7 +4599,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_size\", instance=~\"$instance\"}", @@ -4638,9 +4610,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "read_io_size", "tooltip": { "shared": true, @@ -4649,9 +4619,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -4678,7 +4646,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "bytes" @@ -4691,7 +4662,7 @@ "h": 8, "w": 12, "x": 12, - "y": 45 + "y": 77 }, "hiddenSeries": false, "id": 61, @@ -4724,7 +4695,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_size\", instance=~\"$instance\"}", @@ -4734,9 +4706,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "write_io_size", "tooltip": { "shared": true, @@ -4745,9 +4715,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -4774,7 +4742,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -4787,7 +4758,7 @@ "h": 8, "w": 12, "x": 0, - "y": 53 + "y": 85 }, "hiddenSeries": false, "id": 26, @@ -4819,6 +4790,10 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", "hide": false, @@ -4828,9 +4803,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "read_latency", "tooltip": { "shared": true, @@ -4839,9 +4812,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -4868,7 +4839,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -4881,7 +4855,7 @@ "h": 8, "w": 12, "x": 12, - "y": 53 + "y": 85 }, "hiddenSeries": false, "id": 27, @@ -4913,6 +4887,10 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", "hide": false, @@ -4922,9 +4900,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "write_latency", "tooltip": { "shared": true, @@ -4933,9 +4909,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -4958,17 +4932,29 @@ } } ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "refId": "A" + } + ], "title": "s3_adaptor r/w performance", "type": "row" }, { "collapsed": true, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 13 + "y": 5 }, "id": 72, "panels": [ @@ -4977,7 +4963,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -4990,7 +4979,7 @@ "h": 8, "w": 12, "x": 0, - "y": 14 + "y": 46 }, "hiddenSeries": false, "id": 73, @@ -5023,7 +5012,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_disk_cache_qps\", instance=~\"$instance\"}", @@ -5033,9 +5023,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "read_diskcache_qps", "tooltip": { "shared": true, @@ -5044,9 +5032,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -5073,7 +5059,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -5086,7 +5075,7 @@ "h": 8, "w": 12, "x": 12, - "y": 14 + "y": 46 }, "hiddenSeries": false, "id": 74, @@ -5119,7 +5108,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_disk_cache_qps\", instance=~\"$instance\"}", @@ -5129,9 +5119,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "write_diskcache_qps", "tooltip": { "shared": true, @@ -5140,9 +5128,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -5169,7 +5155,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "binBps" @@ -5182,7 +5171,7 @@ "h": 8, "w": 12, "x": 0, - "y": 22 + "y": 54 }, "hiddenSeries": false, "id": 75, @@ -5215,7 +5204,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_disk_cache_bps\", instance=~\"$instance\"}", @@ -5225,9 +5215,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "read_diskcache_bps", "tooltip": { "shared": true, @@ -5236,9 +5224,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -5265,7 +5251,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "binBps" @@ -5278,7 +5267,7 @@ "h": 8, "w": 12, "x": 12, - "y": 22 + "y": 54 }, "hiddenSeries": false, "id": 76, @@ -5311,7 +5300,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_disk_cache_bps\", instance=~\"$instance\"}", @@ -5321,9 +5311,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "write_diskcache_bps", "tooltip": { "shared": true, @@ -5332,9 +5320,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -5361,7 +5347,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -5374,7 +5363,7 @@ "h": 8, "w": 12, "x": 0, - "y": 30 + "y": 62 }, "hiddenSeries": false, "id": 77, @@ -5406,6 +5395,10 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_disk_cache_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", "hide": false, @@ -5415,9 +5408,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "read_diskcache_latency", "tooltip": { "shared": true, @@ -5426,9 +5417,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -5455,7 +5444,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -5468,7 +5460,7 @@ "h": 8, "w": 12, "x": 12, - "y": 30 + "y": 62 }, "hiddenSeries": false, "id": 78, @@ -5500,6 +5492,10 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_disk_cache_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", "hide": false, @@ -5509,9 +5505,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "write_diskcache_latency", "tooltip": { "shared": true, @@ -5520,9 +5514,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -5545,604 +5537,633 @@ } } ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "refId": "A" + } + ], "title": "s3_adaptor r/w diskcache performance", "type": "row" }, { - "collapsed": true, - "datasource": null, + "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 14 + "y": 6 }, "id": 69, - "panels": [ + "panels": [], + "targets": [ { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "fieldConfig": { - "defaults": { - "unit": "short" - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 39 - }, - "hiddenSeries": false, - "id": 62, - "interval": "1s", - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "max": true, - "min": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "8.0.6", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus" - }, - "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_s3_qps\", instance=~\"$instance\"}", - "interval": "", - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "read_s3_qps", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:213", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } + "refId": "A" + } + ], + "title": "s3_adaptor r/w s3 performance", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "unit": "short" }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 7 + }, + "hiddenSeries": false, + "id": 62, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "fieldConfig": { - "defaults": { - "unit": "short" - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 39 - }, - "hiddenSeries": false, - "id": 63, - "interval": "1s", - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "max": true, - "min": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, - "percentage": false, - "pluginVersion": "8.0.6", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus" - }, - "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_s3_qps\", instance=~\"$instance\"}", - "interval": "", - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "write_s3_qps", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" + "exemplar": true, + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_s3_qps\", instance=~\"$instance\"}", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "read_s3_qps", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:212", + "format": "short", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:213", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "unit": "short" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 7 + }, + "hiddenSeries": false, + "id": 63, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] + "exemplar": true, + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_s3_qps\", instance=~\"$instance\"}", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "write_s3_qps", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:212", + "format": "short", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:213", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "unit": "binBps" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 15 + }, + "hiddenSeries": false, + "id": 66, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:213", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } + "exemplar": true, + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_s3_bps\", instance=~\"$instance\"}", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "read_s3_bps", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:212", + "format": "binBps", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:213", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "unit": "binBps" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 15 + }, + "hiddenSeries": false, + "id": 65, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "exemplar": true, + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_s3_bps\", instance=~\"$instance\"}", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "write_s3_bps", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:212", + "format": "binBps", + "logBase": 1, + "show": true }, { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "fieldConfig": { - "defaults": { - "unit": "binBps" - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 47 - }, - "hiddenSeries": false, - "id": 66, - "interval": "1s", - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "max": true, - "min": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "8.0.6", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus" - }, - "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_s3_bps\", instance=~\"$instance\"}", - "interval": "", - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "read_s3_bps", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "binBps", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:213", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } + "$$hashKey": "object:213", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "unit": "µs" }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 23 + }, + "hiddenSeries": false, + "id": 64, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "fieldConfig": { - "defaults": { - "unit": "binBps" - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 47 - }, - "hiddenSeries": false, - "id": 65, - "interval": "1s", - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "max": true, - "min": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "8.0.6", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus" - }, - "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_s3_bps\", instance=~\"$instance\"}", - "interval": "", - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "write_s3_bps", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "binBps", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:213", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } + "exemplar": true, + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_s3_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "read_s3_latency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:212", + "format": "µs", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:213", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "unit": "µs" }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 23 + }, + "hiddenSeries": false, + "id": 67, + "interval": "1s", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "fieldConfig": { - "defaults": { - "unit": "µs" - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 55 - }, - "hiddenSeries": false, - "id": 64, - "interval": "1s", - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "max": true, - "min": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "8.0.6", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_read_s3_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", - "hide": false, - "interval": "", - "legendFormat": "", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "read_s3_latency", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "µs", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:213", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } + "exemplar": true, + "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_s3_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "write_s3_latency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:212", + "format": "µs", + "logBase": 1, + "show": true }, { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "fieldConfig": { - "defaults": { - "unit": "µs" - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 55 - }, - "hiddenSeries": false, - "id": 67, - "interval": "1s", - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "max": true, - "min": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "8.0.6", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "exemplar": true, - "expr": "{__name__=~\"curvefs_s3.*[[fs:regex]]_adaptor_write_s3_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", - "hide": false, - "interval": "", - "legendFormat": "", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "write_s3_latency", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "µs", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:213", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } + "$$hashKey": "object:213", + "format": "short", + "logBase": 1, + "show": true } ], - "title": "s3_adaptor r/w s3 performance", - "type": "row" + "yaxis": { + "align": false + } }, { "collapsed": true, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 15 + "y": 31 }, "id": 80, "panels": [ @@ -6151,7 +6172,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -6164,7 +6188,7 @@ "h": 8, "w": 12, "x": 0, - "y": 64 + "y": 96 }, "hiddenSeries": false, "id": 81, @@ -6197,7 +6221,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_disk_cache.*[[fs:regex]]_write_s3_qps\", instance=~\"$instance\"}", @@ -6207,9 +6232,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "write_s3_qps", "tooltip": { "shared": true, @@ -6218,9 +6241,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -6247,7 +6268,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "binBps" @@ -6260,7 +6284,7 @@ "h": 8, "w": 12, "x": 12, - "y": 64 + "y": 96 }, "hiddenSeries": false, "id": 82, @@ -6293,7 +6317,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_disk_cache.*[[fs:regex]]_write_s3_bps\", instance=~\"$instance\"}", @@ -6303,9 +6328,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "write_s3_bps", "tooltip": { "shared": true, @@ -6314,9 +6337,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -6343,7 +6364,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -6356,7 +6380,7 @@ "h": 8, "w": 12, "x": 0, - "y": 72 + "y": 104 }, "hiddenSeries": false, "id": 83, @@ -6388,6 +6412,10 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "{__name__=~\"curvefs_disk_cache.*[[fs:regex]]_write_s3_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", "hide": false, @@ -6397,9 +6425,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "write_s3_latency", "tooltip": { "shared": true, @@ -6408,9 +6434,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -6433,17 +6457,29 @@ } } ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "refId": "A" + } + ], "title": "diskcache r/w s3 performance", "type": "row" }, { "collapsed": true, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 16 + "y": 32 }, "id": 111, "panels": [ @@ -6452,7 +6488,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -6465,7 +6504,7 @@ "h": 8, "w": 12, "x": 0, - "y": 81 + "y": 113 }, "hiddenSeries": false, "id": 112, @@ -6498,7 +6537,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs.*_file_manager_num\", instance=~\"$instance\"}", @@ -6508,9 +6548,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "file_manager_num", "tooltip": { "shared": true, @@ -6519,9 +6557,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -6548,7 +6584,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -6561,7 +6600,7 @@ "h": 8, "w": 12, "x": 12, - "y": 81 + "y": 113 }, "hiddenSeries": false, "id": 113, @@ -6594,7 +6633,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs.*_chunk_manager_num\", instance=~\"$instance\"}", @@ -6604,9 +6644,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "chunk_manager_num", "tooltip": { "shared": true, @@ -6615,9 +6653,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -6640,17 +6676,29 @@ } } ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "refId": "A" + } + ], "title": "manager metric", "type": "row" }, { "collapsed": true, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 17 + "y": 33 }, "id": 121, "panels": [ @@ -6659,7 +6707,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -6672,7 +6723,7 @@ "h": 8, "w": 12, "x": 0, - "y": 18 + "y": 50 }, "hiddenSeries": false, "id": 122, @@ -6705,7 +6756,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"diskcache_cache_count\", instance=~\"$instance\"}", @@ -6715,9 +6767,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "diskcache_cache_count", "tooltip": { "shared": true, @@ -6726,9 +6776,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -6755,7 +6803,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "bytes" @@ -6768,7 +6819,7 @@ "h": 8, "w": 12, "x": 12, - "y": 18 + "y": 50 }, "hiddenSeries": false, "id": 123, @@ -6801,7 +6852,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"diskcache_cache_bytes\", instance=~\"$instance\"}", @@ -6811,9 +6863,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "diskcache_cache_bytes", "tooltip": { "shared": true, @@ -6822,9 +6872,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -6851,7 +6899,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "none" @@ -6864,7 +6915,7 @@ "h": 8, "w": 12, "x": 0, - "y": 26 + "y": 58 }, "hiddenSeries": false, "id": 124, @@ -6897,7 +6948,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"icache_cache_count\", instance=~\"$instance\"}", @@ -6907,9 +6959,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "icache_cache_count", "tooltip": { "shared": true, @@ -6918,9 +6968,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -6947,7 +6995,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "none" @@ -6960,7 +7011,7 @@ "h": 8, "w": 12, "x": 12, - "y": 26 + "y": 58 }, "hiddenSeries": false, "id": 125, @@ -6993,7 +7044,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"inode_s3_chunk_info_size\", instance=~\"$instance\"}", @@ -7003,9 +7055,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "inode_s3_chunk_info_size", "tooltip": { "shared": true, @@ -7014,9 +7064,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -7043,7 +7091,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -7056,7 +7107,7 @@ "h": 8, "w": 12, "x": 0, - "y": 34 + "y": 66 }, "hiddenSeries": false, "id": 126, @@ -7089,7 +7140,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"dcache_cache_count\", instance=~\"$instance\"}", @@ -7099,9 +7151,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "dcache_cache_count", "tooltip": { "shared": true, @@ -7110,9 +7160,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -7139,7 +7187,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -7152,7 +7203,7 @@ "h": 8, "w": 12, "x": 12, - "y": 34 + "y": 66 }, "hiddenSeries": false, "id": 127, @@ -7185,7 +7236,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"dcache_cache_bytes\", instance=~\"$instance\"}", @@ -7195,9 +7247,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "dcache_cache_bytes", "tooltip": { "shared": true, @@ -7206,9 +7256,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -7231,17 +7279,29 @@ } } ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "refId": "A" + } + ], "title": "lru cache", "type": "row" }, { "collapsed": true, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 18 + "y": 34 }, "id": 115, "panels": [ @@ -7250,7 +7310,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -7263,7 +7326,7 @@ "h": 8, "w": 12, "x": 0, - "y": 115 + "y": 147 }, "hiddenSeries": false, "id": 116, @@ -7296,7 +7359,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs.*_read_data_cache_num\", instance=~\"$instance\"}", @@ -7306,9 +7370,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "read_data_cache_num", "tooltip": { "shared": true, @@ -7317,9 +7379,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -7346,7 +7406,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -7359,7 +7422,7 @@ "h": 8, "w": 12, "x": 12, - "y": 115 + "y": 147 }, "hiddenSeries": false, "id": 117, @@ -7392,7 +7455,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs.*_write_data_cache_num\", instance=~\"$instance\"}", @@ -7402,9 +7466,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "write_data_cache_num", "tooltip": { "shared": true, @@ -7413,9 +7475,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -7442,7 +7502,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -7455,7 +7518,7 @@ "h": 8, "w": 12, "x": 0, - "y": 123 + "y": 155 }, "hiddenSeries": false, "id": 118, @@ -7488,7 +7551,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs.*_read_data_cache_byte\", instance=~\"$instance\"}", @@ -7498,9 +7562,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "read_data_cache_byte", "tooltip": { "shared": true, @@ -7509,9 +7571,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -7538,7 +7598,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -7551,7 +7614,7 @@ "h": 8, "w": 12, "x": 12, - "y": 123 + "y": 155 }, "hiddenSeries": false, "id": 119, @@ -7584,7 +7647,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs.*_write_data_cache_byte\", instance=~\"$instance\"}", @@ -7594,9 +7658,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "write_data_cache_byte", "tooltip": { "shared": true, @@ -7605,9 +7667,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -7634,7 +7694,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "bytes" @@ -7647,7 +7710,7 @@ "h": 8, "w": 12, "x": 0, - "y": 131 + "y": 163 }, "hiddenSeries": false, "id": 178, @@ -7680,7 +7743,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_disk_cache_[[fs:regex]]_diskcache_usedbytes\", instance=~\"$instance\"}", @@ -7690,9 +7754,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "diskcache_usedbytes", "tooltip": { "shared": true, @@ -7701,9 +7763,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -7726,17 +7786,29 @@ } } ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "refId": "A" + } + ], "title": "diskcache cache num/byte", "type": "row" }, { "collapsed": true, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 19 + "y": 35 }, "id": 32, "panels": [ @@ -7745,7 +7817,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -7758,7 +7833,7 @@ "h": 6, "w": 12, "x": 0, - "y": 140 + "y": 172 }, "hiddenSeries": false, "id": 36, @@ -7791,7 +7866,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_mds_client.*mount_fs_qps\", instance=~\"$instance\"}", @@ -7801,9 +7877,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "mountfs_qps", "tooltip": { "shared": true, @@ -7812,9 +7886,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -7841,7 +7913,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -7854,7 +7929,7 @@ "h": 6, "w": 12, "x": 12, - "y": 140 + "y": 172 }, "hiddenSeries": false, "id": 128, @@ -7887,7 +7962,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_mds_client.*_mount_fs_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", @@ -7897,9 +7973,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "mountfs_latency", "tooltip": { "shared": true, @@ -7908,9 +7982,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -7937,7 +8009,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -7950,7 +8025,7 @@ "h": 6, "w": 12, "x": 0, - "y": 146 + "y": 178 }, "hiddenSeries": false, "id": 129, @@ -7983,7 +8058,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_mds_client.*umount_fs_qps\", instance=~\"$instance\"}", @@ -7993,9 +8069,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "umountfs_qps", "tooltip": { "shared": true, @@ -8004,9 +8078,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -8033,7 +8105,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -8046,7 +8121,7 @@ "h": 6, "w": 12, "x": 12, - "y": 146 + "y": 178 }, "hiddenSeries": false, "id": 130, @@ -8079,7 +8154,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_mds_client.*umount_fs_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", @@ -8089,9 +8165,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "umountfs_latency", "tooltip": { "shared": true, @@ -8100,9 +8174,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -8129,7 +8201,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -8142,7 +8217,7 @@ "h": 6, "w": 12, "x": 0, - "y": 152 + "y": 184 }, "hiddenSeries": false, "id": 131, @@ -8175,7 +8250,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_mds_client.*get_fs_info_qps\", instance=~\"$instance\"}", @@ -8185,9 +8261,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "get_fsinfo_qps", "tooltip": { "shared": true, @@ -8196,9 +8270,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -8225,7 +8297,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -8238,7 +8313,7 @@ "h": 6, "w": 12, "x": 12, - "y": 152 + "y": 184 }, "hiddenSeries": false, "id": 132, @@ -8271,7 +8346,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_mds_client.*get_fs_info_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", @@ -8281,9 +8357,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "get_fsinfo_latency", "tooltip": { "shared": true, @@ -8292,9 +8366,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -8321,7 +8393,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -8334,7 +8409,7 @@ "h": 6, "w": 12, "x": 0, - "y": 158 + "y": 190 }, "hiddenSeries": false, "id": 133, @@ -8367,7 +8442,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_mds_client.*get_meta_server_info_qps\", instance=~\"$instance\"}", @@ -8377,9 +8453,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "get_metaserver_info_qps", "tooltip": { "shared": true, @@ -8388,9 +8462,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -8417,7 +8489,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -8430,7 +8505,7 @@ "h": 6, "w": 12, "x": 12, - "y": 158 + "y": 190 }, "hiddenSeries": false, "id": 134, @@ -8463,7 +8538,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_mds_client.*get_meta_server_info_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", @@ -8473,9 +8549,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "get_metaserver_info_latency", "tooltip": { "shared": true, @@ -8484,9 +8558,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -8513,7 +8585,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -8526,7 +8601,7 @@ "h": 6, "w": 12, "x": 0, - "y": 164 + "y": 196 }, "hiddenSeries": false, "id": 135, @@ -8559,7 +8634,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_mds_client.*get_meta_server_list_in_copysets_qps\", instance=~\"$instance\"}", @@ -8569,9 +8645,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "get_metaserver_list_in_copysets_qps", "tooltip": { "shared": true, @@ -8580,9 +8654,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -8609,7 +8681,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -8622,7 +8697,7 @@ "h": 6, "w": 12, "x": 12, - "y": 164 + "y": 196 }, "hiddenSeries": false, "id": 136, @@ -8655,7 +8730,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_mds_client.*get_meta_server_list_in_copysets_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", @@ -8665,9 +8741,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "get_metaserver_list_in_copysets_latency", "tooltip": { "shared": true, @@ -8676,9 +8750,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -8705,7 +8777,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -8718,7 +8793,7 @@ "h": 6, "w": 12, "x": 0, - "y": 170 + "y": 202 }, "hiddenSeries": false, "id": 137, @@ -8751,7 +8826,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_mds_client.*create_partition_qps\", instance=~\"$instance\"}", @@ -8761,9 +8837,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "create_partition_qps", "tooltip": { "shared": true, @@ -8772,9 +8846,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -8801,7 +8873,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -8814,7 +8889,7 @@ "h": 6, "w": 12, "x": 12, - "y": 170 + "y": 202 }, "hiddenSeries": false, "id": 138, @@ -8847,7 +8922,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_mds_client.*create_partition_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", @@ -8857,9 +8933,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "create_partition_latency", "tooltip": { "shared": true, @@ -8868,9 +8942,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -8897,7 +8969,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -8910,7 +8985,7 @@ "h": 6, "w": 12, "x": 0, - "y": 176 + "y": 208 }, "hiddenSeries": false, "id": 139, @@ -8943,7 +9018,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_mds_client.*get_copyset_of_partitions_qps\", instance=~\"$instance\"}", @@ -8953,9 +9029,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "get_copyset_of_partitions_qps", "tooltip": { "shared": true, @@ -8964,9 +9038,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -8993,7 +9065,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -9006,7 +9081,7 @@ "h": 6, "w": 12, "x": 12, - "y": 176 + "y": 208 }, "hiddenSeries": false, "id": 140, @@ -9038,7 +9113,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_mds_client.*get_copyset_of_partitions_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", @@ -9048,9 +9124,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "get_copyset_of_partitions_latency", "tooltip": { "shared": true, @@ -9059,9 +9133,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -9088,7 +9160,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -9101,7 +9176,7 @@ "h": 6, "w": 12, "x": 0, - "y": 182 + "y": 214 }, "hiddenSeries": false, "id": 141, @@ -9133,7 +9208,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_mds_client.*list_partition_qps\", instance=~\"$instance\"}", @@ -9143,9 +9219,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "list_partition_qps", "tooltip": { "shared": true, @@ -9154,9 +9228,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -9183,7 +9255,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -9196,7 +9271,7 @@ "h": 6, "w": 12, "x": 12, - "y": 182 + "y": 214 }, "hiddenSeries": false, "id": 142, @@ -9229,7 +9304,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_mds_client.*list_partition_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", @@ -9239,9 +9315,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "list_partition_latency", "tooltip": { "shared": true, @@ -9250,9 +9324,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -9279,7 +9351,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -9292,7 +9367,7 @@ "h": 6, "w": 12, "x": 0, - "y": 188 + "y": 220 }, "hiddenSeries": false, "id": 143, @@ -9325,7 +9400,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_mds_client.*alloc_s3_chunk_id_qps\", instance=~\"$instance\"}", @@ -9335,9 +9411,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "alloc_s3_chunkid_qps", "tooltip": { "shared": true, @@ -9346,9 +9420,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -9375,7 +9447,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -9388,7 +9463,7 @@ "h": 6, "w": 12, "x": 12, - "y": 188 + "y": 220 }, "hiddenSeries": false, "id": 144, @@ -9421,7 +9496,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_mds_client.*alloc_s3_chunk_id_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", @@ -9431,9 +9507,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "alloc_s3_chunkid_latency", "tooltip": { "shared": true, @@ -9442,9 +9516,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -9471,7 +9543,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -9484,7 +9559,7 @@ "h": 6, "w": 12, "x": 0, - "y": 194 + "y": 226 }, "hiddenSeries": false, "id": 145, @@ -9517,7 +9592,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_mds_client.*refresh_session_qps\", instance=~\"$instance\"}", @@ -9527,9 +9603,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "refresh_session_qps", "tooltip": { "shared": true, @@ -9538,9 +9612,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -9567,7 +9639,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -9580,7 +9655,7 @@ "h": 6, "w": 12, "x": 12, - "y": 194 + "y": 226 }, "hiddenSeries": false, "id": 146, @@ -9613,7 +9688,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_mds_client.*refresh_session_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", @@ -9623,9 +9699,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "refresh_session_latency", "tooltip": { "shared": true, @@ -9634,9 +9708,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -9663,7 +9735,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -9676,7 +9751,7 @@ "h": 6, "w": 12, "x": 0, - "y": 200 + "y": 232 }, "hiddenSeries": false, "id": 147, @@ -9709,7 +9784,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_mds_client.*get_latest_tx_id_qps\", instance=~\"$instance\"}", @@ -9719,9 +9795,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "get_latest_txid_qps", "tooltip": { "shared": true, @@ -9730,9 +9804,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -9759,7 +9831,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -9772,7 +9847,7 @@ "h": 6, "w": 12, "x": 12, - "y": 200 + "y": 232 }, "hiddenSeries": false, "id": 148, @@ -9805,7 +9880,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_mds_client.*get_latest_tx_id_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", @@ -9815,9 +9891,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "get_latest_txid_latency", "tooltip": { "shared": true, @@ -9826,9 +9900,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -9855,7 +9927,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -9868,7 +9943,7 @@ "h": 6, "w": 12, "x": 0, - "y": 206 + "y": 238 }, "hiddenSeries": false, "id": 149, @@ -9901,7 +9976,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_mds_client.*commit_tx_qps\", instance=~\"$instance\"}", @@ -9911,9 +9987,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "commit_tx_qps", "tooltip": { "shared": true, @@ -9922,9 +9996,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -9951,7 +10023,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -9964,7 +10039,7 @@ "h": 6, "w": 12, "x": 12, - "y": 206 + "y": 238 }, "hiddenSeries": false, "id": 150, @@ -9997,7 +10072,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_mds_client.*commit_tx_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", @@ -10007,9 +10083,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "commit_tx_latency", "tooltip": { "shared": true, @@ -10018,9 +10092,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -10043,17 +10115,29 @@ } } ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "refId": "A" + } + ], "title": "mds client latency", "type": "row" }, { "collapsed": true, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 20 + "y": 36 }, "id": 34, "panels": [ @@ -10062,7 +10146,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -10075,7 +10162,7 @@ "h": 6, "w": 12, "x": 0, - "y": 213 + "y": 245 }, "hiddenSeries": false, "id": 155, @@ -10108,7 +10195,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_metaserver_client.*create_inode_qps\", instance=~\"$instance\"}", @@ -10118,9 +10206,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "create_inode_qps", "tooltip": { "shared": true, @@ -10129,9 +10215,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -10158,7 +10242,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -10171,7 +10258,7 @@ "h": 6, "w": 12, "x": 12, - "y": 213 + "y": 245 }, "hiddenSeries": false, "id": 152, @@ -10204,7 +10291,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_metaserver_client.*create_inode_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", @@ -10214,9 +10302,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "create_inode_latency", "tooltip": { "shared": true, @@ -10225,9 +10311,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -10254,7 +10338,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -10267,7 +10354,7 @@ "h": 6, "w": 12, "x": 0, - "y": 219 + "y": 251 }, "hiddenSeries": false, "id": 153, @@ -10300,7 +10387,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_metaserver_client.*create_dentry_qps\", instance=~\"$instance\"}", @@ -10310,9 +10398,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "create_dentry_qps", "tooltip": { "shared": true, @@ -10321,9 +10407,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -10350,7 +10434,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -10363,7 +10450,7 @@ "h": 6, "w": 12, "x": 12, - "y": 219 + "y": 251 }, "hiddenSeries": false, "id": 156, @@ -10396,7 +10483,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_metaserver_client.*create_dentry_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", @@ -10406,9 +10494,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "create_dentry_latency", "tooltip": { "shared": true, @@ -10417,9 +10503,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -10446,7 +10530,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -10459,7 +10546,7 @@ "h": 6, "w": 12, "x": 0, - "y": 225 + "y": 257 }, "hiddenSeries": false, "id": 157, @@ -10492,7 +10579,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_metaserver_client.*get_inode_qps\", instance=~\"$instance\"}", @@ -10502,9 +10590,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "get_inode_qps", "tooltip": { "shared": true, @@ -10513,9 +10599,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -10542,7 +10626,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -10555,7 +10642,7 @@ "h": 6, "w": 12, "x": 12, - "y": 225 + "y": 257 }, "hiddenSeries": false, "id": 154, @@ -10588,7 +10675,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_metaserver_client.*get_inode_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", @@ -10598,9 +10686,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "get_inode_latency", "tooltip": { "shared": true, @@ -10609,9 +10695,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -10638,7 +10722,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -10651,7 +10738,7 @@ "h": 6, "w": 12, "x": 0, - "y": 231 + "y": 263 }, "hiddenSeries": false, "id": 151, @@ -10684,7 +10771,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_metaserver_client.*get_dentry_qps\", instance=~\"$instance\"}", @@ -10694,9 +10782,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "get_dentry_qps", "tooltip": { "shared": true, @@ -10705,9 +10791,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -10734,7 +10818,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -10747,7 +10834,7 @@ "h": 6, "w": 12, "x": 12, - "y": 231 + "y": 263 }, "hiddenSeries": false, "id": 158, @@ -10780,7 +10867,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_metaserver_client.*get_dentry_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", @@ -10790,9 +10878,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "get_dentry_latency", "tooltip": { "shared": true, @@ -10801,9 +10887,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -10830,7 +10914,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -10843,7 +10930,7 @@ "h": 6, "w": 12, "x": 0, - "y": 237 + "y": 269 }, "hiddenSeries": false, "id": 159, @@ -10876,7 +10963,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_metaserver_client.*list_dentry_qps\", instance=~\"$instance\"}", @@ -10886,9 +10974,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "list_dentry_qps", "tooltip": { "shared": true, @@ -10897,9 +10983,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -10926,7 +11010,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -10939,7 +11026,7 @@ "h": 6, "w": 12, "x": 12, - "y": 237 + "y": 269 }, "hiddenSeries": false, "id": 160, @@ -10972,7 +11059,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_metaserver_client.*list_dentry_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", @@ -10982,9 +11070,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "list_dentry_latency", "tooltip": { "shared": true, @@ -10993,9 +11079,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -11022,7 +11106,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -11035,7 +11122,7 @@ "h": 6, "w": 12, "x": 0, - "y": 243 + "y": 275 }, "hiddenSeries": false, "id": 161, @@ -11068,7 +11155,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_metaserver_client.*delete_dentry_qps\", instance=~\"$instance\"}", @@ -11078,9 +11166,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "delete_dentry_qps", "tooltip": { "shared": true, @@ -11089,9 +11175,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -11118,7 +11202,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -11131,7 +11218,7 @@ "h": 6, "w": 12, "x": 12, - "y": 243 + "y": 275 }, "hiddenSeries": false, "id": 162, @@ -11164,7 +11251,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_metaserver_client.*delete_dentry_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", @@ -11174,9 +11262,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "delete_dentry_latency", "tooltip": { "shared": true, @@ -11185,9 +11271,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -11214,7 +11298,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -11227,7 +11314,7 @@ "h": 6, "w": 12, "x": 0, - "y": 249 + "y": 281 }, "hiddenSeries": false, "id": 163, @@ -11260,7 +11347,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_metaserver_client.*delete_inode_qps\", instance=~\"$instance\"}", @@ -11270,9 +11358,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "delete_inode_qps", "tooltip": { "shared": true, @@ -11281,9 +11367,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -11310,7 +11394,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -11323,7 +11410,7 @@ "h": 6, "w": 12, "x": 12, - "y": 249 + "y": 281 }, "hiddenSeries": false, "id": 164, @@ -11356,7 +11443,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_metaserver_client.*delete_inode_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", @@ -11366,9 +11454,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "delete_inode_latency", "tooltip": { "shared": true, @@ -11377,9 +11463,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -11406,7 +11490,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -11419,7 +11506,7 @@ "h": 6, "w": 12, "x": 0, - "y": 255 + "y": 287 }, "hiddenSeries": false, "id": 165, @@ -11452,7 +11539,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_metaserver_client.*update_inode_qps\", instance=~\"$instance\"}", @@ -11462,9 +11550,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "update_inode_qps", "tooltip": { "shared": true, @@ -11473,9 +11559,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -11502,7 +11586,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -11515,7 +11602,7 @@ "h": 6, "w": 12, "x": 12, - "y": 255 + "y": 287 }, "hiddenSeries": false, "id": 166, @@ -11548,7 +11635,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_metaserver_client.*update_inode_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", @@ -11558,9 +11646,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "update_inode_latency", "tooltip": { "shared": true, @@ -11569,9 +11655,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -11598,7 +11682,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -11611,7 +11698,7 @@ "h": 6, "w": 12, "x": 0, - "y": 261 + "y": 293 }, "hiddenSeries": false, "id": 167, @@ -11644,7 +11731,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_metaserver_client.*batch_get_inode_attr_qps\", instance=~\"$instance\"}", @@ -11654,9 +11742,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "batch_get_inodeattr_qps", "tooltip": { "shared": true, @@ -11665,9 +11751,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -11694,7 +11778,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -11707,7 +11794,7 @@ "h": 6, "w": 12, "x": 12, - "y": 261 + "y": 293 }, "hiddenSeries": false, "id": 168, @@ -11740,7 +11827,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_metaserver_client.*batch_get_inode_attr_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", @@ -11750,9 +11838,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "batch_get_inodeattr_latency", "tooltip": { "shared": true, @@ -11761,9 +11847,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -11790,7 +11874,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -11803,7 +11890,7 @@ "h": 6, "w": 12, "x": 0, - "y": 267 + "y": 299 }, "hiddenSeries": false, "id": 169, @@ -11836,7 +11923,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_metaserver_client.*batch_get_xattr_qps\", instance=~\"$instance\"}", @@ -11846,9 +11934,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "batch_get_xattr_qps", "tooltip": { "shared": true, @@ -11857,9 +11943,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -11886,7 +11970,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -11899,7 +11986,7 @@ "h": 6, "w": 12, "x": 12, - "y": 267 + "y": 299 }, "hiddenSeries": false, "id": 170, @@ -11932,7 +12019,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_metaserver_client.*batch_get_xattr_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", @@ -11942,9 +12030,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "batch_get_xattr_latency", "tooltip": { "shared": true, @@ -11953,9 +12039,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -11982,7 +12066,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -11995,7 +12082,7 @@ "h": 6, "w": 12, "x": 0, - "y": 273 + "y": 305 }, "hiddenSeries": false, "id": 171, @@ -12028,7 +12115,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_metaserver_client.*append_s3_chunk_info_qps\", instance=~\"$instance\"}", @@ -12038,9 +12126,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "append_s3_chunk_info_qps", "tooltip": { "shared": true, @@ -12049,9 +12135,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -12078,7 +12162,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -12091,7 +12178,7 @@ "h": 6, "w": 12, "x": 12, - "y": 273 + "y": 305 }, "hiddenSeries": false, "id": 172, @@ -12124,7 +12211,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_metaserver_client.*append_s3_chunk_info_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", @@ -12134,9 +12222,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "append_s3_chunk_info_latency", "tooltip": { "shared": true, @@ -12145,9 +12231,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -12174,7 +12258,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "short" @@ -12187,7 +12274,7 @@ "h": 6, "w": 12, "x": 0, - "y": 279 + "y": 311 }, "hiddenSeries": false, "id": 173, @@ -12220,7 +12307,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_metaserver_client.*prepare_rename_tx_qps\", instance=~\"$instance\"}", @@ -12230,9 +12318,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "prepare_rename_tx_qps", "tooltip": { "shared": true, @@ -12241,9 +12327,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -12270,7 +12354,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "unit": "µs" @@ -12283,7 +12370,7 @@ "h": 6, "w": 12, "x": 12, - "y": 279 + "y": 311 }, "hiddenSeries": false, "id": 174, @@ -12316,7 +12403,8 @@ "targets": [ { "datasource": { - "type": "prometheus" + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, "exemplar": true, "expr": "{__name__=~\"curvefs_metaserver_client.*prepare_rename_tx_lat_[[quantile:regex]]\", job=\"client\", instance=~\"$instance\"}", @@ -12326,9 +12414,7 @@ } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, "title": "prepare_rename_tx_latency", "tooltip": { "shared": true, @@ -12337,9 +12423,7 @@ }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, @@ -12362,27 +12446,336 @@ } } ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "refId": "A" + } + ], "title": "metaserver client latency", "type": "row" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 37 + }, + "id": 180, + "panels": [], + "title": "warmup", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "binBps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 38 + }, + "id": 182, + "options": { + "legend": { + "calcs": [ + "max", + "min", + "mean" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "curvefs_warmup_s3_cached_bps", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "warmup_s3_bps", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 38 + }, + "id": 184, + "options": { + "legend": { + "calcs": [ + "max", + "min", + "mean" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "curvefs_warmup_s3_cached_qps", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "warmup_s3_qps", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 46 + }, + "id": 186, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "curvefs_warmup_s3_cache_size", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "warmup_s3_size", + "type": "timeseries" } ], "refresh": "5s", - "schemaVersion": 30, + "revision": 1, + "schemaVersion": 38, "style": "dark", "tags": [], "templating": { "list": [ { - "allValue": null, "current": { "selected": false, "text": "All", "value": "$__all" }, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "definition": "{__name__=~\"bthread_count\", job=\"client\"}", - "description": null, - "error": null, "hide": 0, "includeAll": true, "label": "Addr", @@ -12400,7 +12793,6 @@ "type": "query" }, { - "allValue": null, "current": { "selected": true, "text": [ @@ -12410,10 +12802,11 @@ "$__all" ] }, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "definition": "{__name__=~\"curvefs_s3_.*_adaptor_write_bps\", job=\"client\"}", - "description": null, - "error": null, "hide": 0, "includeAll": true, "label": "fsname", @@ -12441,10 +12834,11 @@ "latency_99" ] }, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "definition": "{__name__=~\".*(latency_[0-9]*|latency)\", job=\"client\"}", - "description": null, - "error": null, "hide": 0, "includeAll": true, "label": "latency_quantile", @@ -12462,7 +12856,6 @@ "type": "query" }, { - "allValue": null, "current": { "selected": true, "text": [ @@ -12472,10 +12865,11 @@ "$__all" ] }, - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "definition": "{__name__=~\"curvefs_client_op_.*_inflight_num\", job=\"client\"}", - "description": null, - "error": null, "hide": 0, "includeAll": true, "label": "op", @@ -12502,5 +12896,6 @@ "timezone": "", "title": "client", "uid": "I2_uSSenk", - "version": 107 -} + "version": 1, + "weekStart": "" +} \ No newline at end of file diff --git a/curvefs/proto/common.proto b/curvefs/proto/common.proto index c0d227f1ec..4c72525de4 100644 --- a/curvefs/proto/common.proto +++ b/curvefs/proto/common.proto @@ -58,6 +58,7 @@ message S3Info { required string bucketname = 4; required uint64 blockSize = 5; required uint64 chunkSize = 6; + optional uint32 objectPrefix = 7; } enum PartitionStatus { diff --git a/curvefs/proto/mds.proto b/curvefs/proto/mds.proto index 8680e6b5a3..36f243bbae 100644 --- a/curvefs/proto/mds.proto +++ b/curvefs/proto/mds.proto @@ -191,6 +191,7 @@ message RefreshSessionRequest { message RefreshSessionResponse { required FSStatusCode statusCode = 1; repeated topology.PartitionTxId latestTxIdList = 2; + optional bool enableSumInDir = 3; } message DLockValue { diff --git a/curvefs/src/client/BUILD b/curvefs/src/client/BUILD index 9bc190a388..b296af86f8 100644 --- a/curvefs/src/client/BUILD +++ b/curvefs/src/client/BUILD @@ -43,6 +43,8 @@ cc_library( "s3/*.h", "volume/*.cpp", "volume/*.h", + "filesystem/*.cpp", + "filesystem/*.h", "warmup/*.h", "warmup/*.cpp", ], @@ -71,10 +73,14 @@ cc_library( "//curvefs/src/volume", "//curvefs/src/common:metric_utils", "//curvefs/src/common:dynamic_vlog", + "//curvefs/src/common:threading", "@com_google_absl//absl/memory", "@com_google_absl//absl/strings", + "@com_google_absl//absl/synchronization", + "@com_google_absl//absl/strings:str_format", "@com_google_absl//absl/meta:type_traits", "@com_google_absl//absl/types:optional", "@com_google_googletest//:gtest_prod", + "@spdlog//:spdlog", ], ) diff --git a/curvefs/src/client/async_request_closure.cpp b/curvefs/src/client/async_request_closure.cpp index 0d99e69fee..aac58c932b 100644 --- a/curvefs/src/client/async_request_closure.cpp +++ b/curvefs/src/client/async_request_closure.cpp @@ -28,7 +28,7 @@ #include #include "curvefs/proto/metaserver.pb.h" -#include "curvefs/src/client/error_code.h" +#include "curvefs/src/client/filesystem/error.h" #include "curvefs/src/client/inode_wrapper.h" namespace curvefs { @@ -63,7 +63,7 @@ CURVEFS_ERROR UpdateVolumeExtentClosure::Wait() { cond_.wait(lk); } - return MetaStatusCodeToCurvefsErrCode(GetStatusCode()); + return ToFSError(GetStatusCode()); } void UpdateVolumeExtentClosure::Run() { diff --git a/curvefs/src/client/async_request_closure.h b/curvefs/src/client/async_request_closure.h index 0e75cc78e2..2d83bfcd0e 100644 --- a/curvefs/src/client/async_request_closure.h +++ b/curvefs/src/client/async_request_closure.h @@ -28,12 +28,15 @@ #include -#include "curvefs/src/client/error_code.h" #include "curvefs/src/client/rpcclient/task_excutor.h" +#include "curvefs/src/client/filesystem/error.h" namespace curvefs { namespace client { +using ::curvefs::client::filesystem::CURVEFS_ERROR; +using ::curvefs::client::filesystem::ToFSError; + class InodeWrapper; namespace internal { diff --git a/curvefs/src/client/client_operator.cpp b/curvefs/src/client/client_operator.cpp index e04b598882..8d44a5150f 100644 --- a/curvefs/src/client/client_operator.cpp +++ b/curvefs/src/client/client_operator.cpp @@ -24,13 +24,14 @@ #include "src/common/uuid.h" #include "curvefs/src/client/client_operator.h" - +#include "curvefs/src/client/filesystem/error.h" namespace curvefs { namespace client { using ::curve::common::UUIDGenerator; using ::curvefs::metaserver::DentryFlag; using ::curvefs::mds::topology::PartitionTxId; +using ::curvefs::client::filesystem::ToFSError; #define LOG_ERROR(action, rc) \ LOG(ERROR) << action << " failed, retCode = " << rc \ @@ -95,7 +96,7 @@ CURVEFS_ERROR RenameOperator::GetTxId(uint32_t fsId, if (rc != MetaStatusCode::OK) { LOG_ERROR("GetTxId", rc); } - return MetaStatusCodeToCurvefsErrCode(rc); + return ToFSError(rc); } CURVEFS_ERROR RenameOperator::GetLatestTxIdWithLock() { @@ -204,7 +205,7 @@ CURVEFS_ERROR RenameOperator::PrepareRenameTx( LOG_ERROR("PrepareRenameTx", rc); } - return MetaStatusCodeToCurvefsErrCode(rc); + return ToFSError(rc); } CURVEFS_ERROR RenameOperator::PrepareTx() { @@ -418,8 +419,6 @@ CURVEFS_ERROR RenameOperator::UpdateInodeCtime() { } void RenameOperator::UpdateCache() { - dentryManager_->DeleteCache(parentId_, name_); - dentryManager_->InsertOrReplaceCache(newDentry_); SetTxId(srcPartitionId_, srcTxId_ + 1); SetTxId(dstPartitionId_, dstTxId_ + 1); } diff --git a/curvefs/src/client/common/common.cpp b/curvefs/src/client/common/common.cpp index 7cf6bfeb21..6567792c0c 100644 --- a/curvefs/src/client/common/common.cpp +++ b/curvefs/src/client/common/common.cpp @@ -105,6 +105,19 @@ WarmupType GetWarmupType(const std::string& type) { return ret; } +const char kCurveFsWarmupStorageDisk[] = "disk"; +const char kCurveFsWarmupStorageKvclient[] = "kvclient"; + +WarmupStorageType GetWarmupStorageType(const std::string &type) { + auto ret = WarmupStorageType::kWarmupStorageTypeUnknown; + if (type == kCurveFsWarmupStorageDisk) { + ret = WarmupStorageType::kWarmupStorageTypeDisk; + } else if (type == kCurveFsWarmupStorageKvclient) { + ret = WarmupStorageType::kWarmupStorageTypeKvClient; + } + return ret; +} + using ::curve::common::StringToUll; // if direction is true means '+', false means '-' diff --git a/curvefs/src/client/common/common.h b/curvefs/src/client/common/common.h index e89f50fd9a..a1a1d704c2 100644 --- a/curvefs/src/client/common/common.h +++ b/curvefs/src/client/common/common.h @@ -69,6 +69,9 @@ const uint32_t MAX_XATTR_VALUE_LENGTH = 64 * 1024; const char kCurveFsWarmupXAttr[] = "curvefs.warmup.op"; + +constexpr int kWarmupOpNum = 4; + enum class WarmupOpType { kWarmupOpUnknown = 0, kWarmupOpAdd = 1, @@ -83,7 +86,15 @@ enum class WarmupType { kWarmupTypeSingle = 2, }; -WarmupType GetWarmupType(const std::string& type); +WarmupType GetWarmupType(const std::string &type); + +enum class WarmupStorageType { + kWarmupStorageTypeUnknown = 0, + kWarmupStorageTypeDisk = 1, + kWarmupStorageTypeKvClient = 2, +}; + +WarmupStorageType GetWarmupStorageType(const std::string &type); enum class FileHandle : uint64_t { kDefaultValue = 0, diff --git a/curvefs/src/client/common/config.cpp b/curvefs/src/client/common/config.cpp index ab7abd49d9..62dabcb726 100644 --- a/curvefs/src/client/common/config.cpp +++ b/curvefs/src/client/common/config.cpp @@ -50,6 +50,56 @@ DEFINE_bool(enableCto, true, "acheieve cto consistency"); DEFINE_bool(useFakeS3, false, "Use fake s3 to inject more metadata for testing metaserver"); DEFINE_bool(supportKVcache, false, "use kvcache to speed up sharing"); +DEFINE_bool(access_logging, true, "enable access log"); + +/** + * use curl -L fuseclient:port/flags/fuseClientAvgWriteBytes?setvalue=true + * for dynamic parameter configuration + */ +static bool pass_uint64(const char *, uint64_t) { return true; } + +DEFINE_uint64(fuseClientAvgWriteBytes, 0, + "the write throttle bps of fuse client"); +DEFINE_validator(fuseClientAvgWriteBytes, &pass_uint64); +DEFINE_uint64(fuseClientBurstWriteBytes, 0, + "the write burst bps of fuse client"); +DEFINE_validator(fuseClientBurstWriteBytes, &pass_uint64); +DEFINE_uint64(fuseClientBurstWriteBytesSecs, 180, + "the times that write burst bps can continue"); +DEFINE_validator(fuseClientBurstWriteBytesSecs, &pass_uint64); + + +DEFINE_uint64(fuseClientAvgWriteIops, 0, + "the write throttle iops of fuse client"); +DEFINE_validator(fuseClientAvgWriteIops, &pass_uint64); +DEFINE_uint64(fuseClientBurstWriteIops, 0, + "the write burst iops of fuse client"); +DEFINE_validator(fuseClientBurstWriteIops, &pass_uint64); +DEFINE_uint64(fuseClientBurstWriteIopsSecs, 180, + "the times that write burst iops can continue"); +DEFINE_validator(fuseClientBurstWriteIopsSecs, &pass_uint64); + + +DEFINE_uint64(fuseClientAvgReadBytes, 0, + "the Read throttle bps of fuse client"); +DEFINE_validator(fuseClientAvgReadBytes, &pass_uint64); +DEFINE_uint64(fuseClientBurstReadBytes, 0, + "the Read burst bps of fuse client"); +DEFINE_validator(fuseClientBurstReadBytes, &pass_uint64); +DEFINE_uint64(fuseClientBurstReadBytesSecs, 180, + "the times that Read burst bps can continue"); +DEFINE_validator(fuseClientBurstReadBytesSecs, &pass_uint64); + + +DEFINE_uint64(fuseClientAvgReadIops, 0, + "the Read throttle iops of fuse client"); +DEFINE_validator(fuseClientAvgReadIops, &pass_uint64); +DEFINE_uint64(fuseClientBurstReadIops, 0, + "the Read burst iops of fuse client"); +DEFINE_validator(fuseClientBurstReadIops, &pass_uint64); +DEFINE_uint64(fuseClientBurstReadIopsSecs, 180, + "the times that Read burst iops can continue"); +DEFINE_validator(fuseClientBurstReadIopsSecs, &pass_uint64); void InitMdsOption(Configuration *conf, MdsOption *mdsOpt) { conf->GetValueFatalIfFail("mdsOpt.mdsMaxRetryMS", &mdsOpt->mdsMaxRetryMS); @@ -157,8 +207,6 @@ void InitDiskCacheOption(Configuration *conf, void InitS3Option(Configuration *conf, S3Option *s3Opt) { conf->GetValueFatalIfFail("s3.fakeS3", &FLAGS_useFakeS3); - conf->GetValueFatalIfFail("s3.fuseMaxSize", - &s3Opt->s3ClientAdaptorOpt.fuseMaxSize); conf->GetValueFatalIfFail("s3.pageSize", &s3Opt->s3ClientAdaptorOpt.pageSize); conf->GetValueFatalIfFail("s3.prefetchBlocks", @@ -175,6 +223,8 @@ void InitS3Option(Configuration *conf, S3Option *s3Opt) { &s3Opt->s3ClientAdaptorOpt.writeCacheMaxByte); conf->GetValueFatalIfFail("s3.readCacheMaxByte", &s3Opt->s3ClientAdaptorOpt.readCacheMaxByte); + conf->GetValueFatalIfFail("s3.readCacheThreads", + &s3Opt->s3ClientAdaptorOpt.readCacheThreads); conf->GetValueFatalIfFail("s3.nearfullRatio", &s3Opt->s3ClientAdaptorOpt.nearfullRatio); conf->GetValueFatalIfFail("s3.baseSleepUs", @@ -243,6 +293,55 @@ void InitKVClientManagerOpt(Configuration *conf, &config->getThreadPooln); } +void InitFileSystemOption(Configuration* c, FileSystemOption* option) { + c->GetValueFatalIfFail("fs.cto", &option->cto); + c->GetValueFatalIfFail("fs.cto", &FLAGS_enableCto); + c->GetValueFatalIfFail("fs.disableXattr", &option->disableXattr); + c->GetValueFatalIfFail("fs.maxNameLength", &option->maxNameLength); + c->GetValueFatalIfFail("fs.accessLogging", &FLAGS_access_logging); + { // kernel cache option + auto o = &option->kernelCacheOption; + c->GetValueFatalIfFail("fs.kernelCache.attrTimeoutSec", + &o->attrTimeoutSec); + c->GetValueFatalIfFail("fs.kernelCache.dirAttrTimeoutSec", + &o->dirAttrTimeoutSec); + c->GetValueFatalIfFail("fs.kernelCache.entryTimeoutSec", + &o->entryTimeoutSec); + c->GetValueFatalIfFail("fs.kernelCache.dirEntryTimeoutSec", + &o->dirEntryTimeoutSec); + } + { // lookup cache option + auto o = &option->lookupCacheOption; + c->GetValueFatalIfFail("fs.lookupCache.lruSize", + &o->lruSize); + c->GetValueFatalIfFail("fs.lookupCache.negativeTimeoutSec", + &o->negativeTimeoutSec); + c->GetValueFatalIfFail("fs.lookupCache.minUses", + &o->minUses); + } + { // dir cache option + auto o = &option->dirCacheOption; + c->GetValueFatalIfFail("fs.dirCache.lruSize", &o->lruSize); + } + { // open file option + auto o = &option->openFilesOption; + c->GetValueFatalIfFail("fs.openFile.lruSize", &o->lruSize); + } + { // attr watcher option + auto o = &option->attrWatcherOption; + c->GetValueFatalIfFail("fs.attrWatcher.lruSize", &o->lruSize); + } + { // rpc option + auto o = &option->rpcOption; + c->GetValueFatalIfFail("fs.rpc.listDentryLimit", &o->listDentryLimit); + } + { // defer sync option + auto o = &option->deferSyncOption; + c->GetValueFatalIfFail("fs.deferSync.delay", &o->delay); + c->GetValueFatalIfFail("fs.deferSync.deferDirMtime", &o->deferDirMtime); + } +} + void SetBrpcOpt(Configuration *conf) { curve::common::GflagsLoadValueFromConfIfCmdNotSet dummy; dummy.Load(conf, "defer_close_second", "rpc.defer.close.second", @@ -263,36 +362,16 @@ void InitFuseClientOption(Configuration *conf, FuseClientOption *clientOption) { InitLeaseOpt(conf, &clientOption->leaseOpt); InitRefreshDataOpt(conf, &clientOption->refreshDataOption); InitKVClientManagerOpt(conf, &clientOption->kvClientManagerOpt); + InitFileSystemOption(conf, &clientOption->fileSystemOption); - conf->GetValueFatalIfFail("fuseClient.attrTimeOut", - &clientOption->attrTimeOut); - conf->GetValueFatalIfFail("fuseClient.entryTimeOut", - &clientOption->entryTimeOut); conf->GetValueFatalIfFail("fuseClient.listDentryLimit", &clientOption->listDentryLimit); conf->GetValueFatalIfFail("fuseClient.listDentryThreads", &clientOption->listDentryThreads); - conf->GetValueFatalIfFail("fuseClient.flushPeriodSec", - &clientOption->flushPeriodSec); - conf->GetValueFatalIfFail("fuseClient.maxNameLength", - &clientOption->maxNameLength); - conf->GetValueFatalIfFail("fuseClient.iCacheLruSize", - &clientOption->iCacheLruSize); - conf->GetValueFatalIfFail("fuseClient.dCacheLruSize", - &clientOption->dCacheLruSize); - conf->GetValueFatalIfFail("fuseClient.enableICacheMetrics", - &clientOption->enableICacheMetrics); - conf->GetValueFatalIfFail("fuseClient.enableDCacheMetrics", - &clientOption->enableDCacheMetrics); - conf->GetValueFatalIfFail("fuseClient.lruTimeOutSec", - &clientOption->lruTimeOutSec); conf->GetValueFatalIfFail("client.dummyServer.startPort", &clientOption->dummyServerStartPort); conf->GetValueFatalIfFail("fuseClient.enableMultiMountPointRename", &clientOption->enableMultiMountPointRename); - conf->GetValueFatalIfFail("fuseClient.disableXattr", - &clientOption->disableXattr); - conf->GetValueFatalIfFail("fuseClient.cto", &FLAGS_enableCto); conf->GetValueFatalIfFail("fuseClient.downloadMaxRetryTimes", &clientOption->downloadMaxRetryTimes); conf->GetValueFatalIfFail("fuseClient.warmupThreadsNum", @@ -302,12 +381,33 @@ void InitFuseClientOption(Configuration *conf, FuseClientOption *clientOption) { << "Not found `fuseClient.enableSplice` in conf, use default value `" << std::boolalpha << clientOption->enableFuseSplice << '`'; - // if enableCto, attr and entry cache must invalid - if (FLAGS_enableCto) { - clientOption->attrTimeOut = 0; - clientOption->entryTimeOut = 0; - } + conf->GetValueFatalIfFail("fuseClient.throttle.avgWriteBytes", + &FLAGS_fuseClientAvgWriteBytes); + conf->GetValueFatalIfFail("fuseClient.throttle.burstWriteBytes", + &FLAGS_fuseClientBurstWriteBytes); + conf->GetValueFatalIfFail("fuseClient.throttle.burstWriteBytesSecs", + &FLAGS_fuseClientBurstWriteBytesSecs); + + conf->GetValueFatalIfFail("fuseClient.throttle.avgWriteIops", + &FLAGS_fuseClientAvgWriteIops); + conf->GetValueFatalIfFail("fuseClient.throttle.burstWriteIops", + &FLAGS_fuseClientBurstWriteIops); + conf->GetValueFatalIfFail("fuseClient.throttle.burstWriteIopsSecs", + &FLAGS_fuseClientBurstWriteIopsSecs); + + conf->GetValueFatalIfFail("fuseClient.throttle.avgReadBytes", + &FLAGS_fuseClientAvgReadBytes); + conf->GetValueFatalIfFail("fuseClient.throttle.burstReadBytes", + &FLAGS_fuseClientBurstReadBytes); + conf->GetValueFatalIfFail("fuseClient.throttle.burstReadBytesSecs", + &FLAGS_fuseClientBurstReadBytesSecs); + conf->GetValueFatalIfFail("fuseClient.throttle.avgReadIops", + &FLAGS_fuseClientAvgReadIops); + conf->GetValueFatalIfFail("fuseClient.throttle.burstReadIops", + &FLAGS_fuseClientBurstReadIops); + conf->GetValueFatalIfFail("fuseClient.throttle.burstReadIopsSecs", + &FLAGS_fuseClientBurstReadIopsSecs); SetBrpcOpt(conf); } @@ -315,6 +415,7 @@ void SetFuseClientS3Option(FuseClientOption *clientOption, const S3InfoOption &fsS3Opt) { clientOption->s3Opt.s3ClientAdaptorOpt.blockSize = fsS3Opt.blockSize; clientOption->s3Opt.s3ClientAdaptorOpt.chunkSize = fsS3Opt.chunkSize; + clientOption->s3Opt.s3ClientAdaptorOpt.objectPrefix = fsS3Opt.objectPrefix; clientOption->s3Opt.s3AdaptrOpt.s3Address = fsS3Opt.s3Address; clientOption->s3Opt.s3AdaptrOpt.ak = fsS3Opt.ak; clientOption->s3Opt.s3AdaptrOpt.sk = fsS3Opt.sk; @@ -329,6 +430,7 @@ void S3Info2FsS3Option(const curvefs::common::S3Info& s3, fsS3Opt->bucketName = s3.bucketname(); fsS3Opt->blockSize = s3.blocksize(); fsS3Opt->chunkSize = s3.chunksize(); + fsS3Opt->objectPrefix = s3.has_objectprefix() ? s3.objectprefix() : 0; } } // namespace common diff --git a/curvefs/src/client/common/config.h b/curvefs/src/client/common/config.h index baa8c69e28..ff0b399e93 100644 --- a/curvefs/src/client/common/config.h +++ b/curvefs/src/client/common/config.h @@ -122,7 +122,6 @@ struct DiskCacheOption { struct S3ClientAdaptorOption { uint64_t blockSize; uint64_t chunkSize; - uint32_t fuseMaxSize; uint64_t pageSize; uint32_t prefetchBlocks; uint32_t prefetchExecQueueNum; @@ -131,10 +130,12 @@ struct S3ClientAdaptorOption { uint32_t flushIntervalSec; uint64_t writeCacheMaxByte; uint64_t readCacheMaxByte; + uint32_t readCacheThreads; uint32_t nearfullRatio; uint32_t baseSleepUs; uint32_t maxReadRetryIntervalMs; uint32_t readRetryIntervalMs; + uint32_t objectPrefix; DiskCacheOption diskCacheOpt; }; @@ -173,6 +174,59 @@ struct RefreshDataOption { uint64_t maxDataSize = 1024; uint32_t refreshDataIntervalSec = 30; }; + +// { filesystem option +struct KernelCacheOption { + uint32_t entryTimeoutSec; + uint32_t dirEntryTimeoutSec; + uint32_t attrTimeoutSec; + uint32_t dirAttrTimeoutSec; +}; + +struct LookupCacheOption { + uint64_t lruSize; + uint32_t negativeTimeoutSec; + uint32_t minUses; +}; + +struct DirCacheOption { + uint64_t lruSize; + uint32_t timeoutSec; +}; + +struct AttrWatcherOption { + uint64_t lruSize; +}; + +struct OpenFilesOption { + uint64_t lruSize; + uint32_t deferSyncSecond; +}; + +struct RPCOption { + uint32_t listDentryLimit; +}; + +struct DeferSyncOption { + uint32_t delay; + bool deferDirMtime; +}; + +struct FileSystemOption { + bool cto; + bool disableXattr; + uint32_t maxNameLength; + uint32_t blockSize = 0x10000u; + KernelCacheOption kernelCacheOption; + LookupCacheOption lookupCacheOption; + DirCacheOption dirCacheOption; + OpenFilesOption openFilesOption; + AttrWatcherOption attrWatcherOption; + RPCOption rpcOption; + DeferSyncOption deferSyncOption; +}; +// } + struct FuseClientOption { MdsOption mdsOpt; MetaCacheOpt metaCacheOpt; @@ -186,22 +240,13 @@ struct FuseClientOption { LeaseOpt leaseOpt; RefreshDataOption refreshDataOption; KVClientManagerOpt kvClientManagerOpt; + FileSystemOption fileSystemOption; - double attrTimeOut; - double entryTimeOut; uint32_t listDentryLimit; uint32_t listDentryThreads; - uint32_t flushPeriodSec; - uint32_t maxNameLength; - uint64_t iCacheLruSize; - uint64_t dCacheLruSize; - bool enableICacheMetrics; - bool enableDCacheMetrics; - uint32_t lruTimeOutSec; uint32_t dummyServerStartPort; bool enableMultiMountPointRename = false; bool enableFuseSplice = false; - bool disableXattr = false; uint32_t downloadMaxRetryTimes; uint32_t warmupThreadsNum = 10; }; diff --git a/curvefs/src/client/curve_fuse_op.cpp b/curvefs/src/client/curve_fuse_op.cpp index fb49c9c83d..47d54cc3b1 100644 --- a/curvefs/src/client/curve_fuse_op.cpp +++ b/curvefs/src/client/curve_fuse_op.cpp @@ -29,7 +29,7 @@ #include "curvefs/src/client/curve_fuse_op.h" #include "curvefs/src/client/fuse_client.h" -#include "curvefs/src/client/error_code.h" +#include "curvefs/src/client/filesystem/error.h" #include "curvefs/src/client/common/config.h" #include "curvefs/src/client/common/common.h" #include "src/common/configuration.h" @@ -43,6 +43,8 @@ #include "curvefs/src/common/metric_utils.h" #include "curvefs/src/common/dynamic_vlog.h" #include "curvefs/src/client/warmup/warmup_manager.h" +#include "curvefs/src/client/filesystem/meta.h" +#include "curvefs/src/client/filesystem/access_log.h" using ::curve::common::Configuration; using ::curvefs::client::CURVEFS_ERROR; @@ -55,7 +57,16 @@ using ::curvefs::client::rpcclient::MDSBaseClient; using ::curvefs::client::metric::ClientOpMetric; using ::curvefs::common::LatencyUpdater; using ::curvefs::client::metric::InflightGuard; -using ::curvefs::client::common::FileHandle; +using ::curvefs::client::filesystem::EntryOut; +using ::curvefs::client::filesystem::AttrOut; +using ::curvefs::client::filesystem::FileOut; +using ::curvefs::client::filesystem::AccessLogGuard; +using ::curvefs::client::filesystem::StrFormat; +using ::curvefs::client::filesystem::InitAccessLog; +using ::curvefs::client::filesystem::Logger; +using ::curvefs::client::filesystem::StrEntry; +using ::curvefs::client::filesystem::StrAttr; +using ::curvefs::client::filesystem::StrMode; using ::curvefs::common::FLAGS_vlog_level; @@ -108,7 +119,7 @@ int GetFsInfo(const char* fsName, FsInfo* fsInfo) { } // namespace -int InitGlog(const char *confPath, const char *argv0) { +int InitLog(const char *confPath, const char *argv0) { Configuration conf; conf.SetConfigPath(confPath); if (!conf.LoadConfig()) { @@ -131,21 +142,24 @@ int InitGlog(const char *confPath, const char *argv0) { // initialize logging module google::InitGoogleLogging(argv0); + bool succ = InitAccessLog(FLAGS_log_dir); + if (!succ) { + return -1; + } return 0; } -int InitFuseClient(const char *confPath, const char* fsName, - const char *fsType, const char *mdsAddr) { +int InitFuseClient(const struct MountOption *mountOption) { g_clientOpMetric = new ClientOpMetric(); Configuration conf; - conf.SetConfigPath(confPath); + conf.SetConfigPath(mountOption->conf); if (!conf.LoadConfig()) { - LOG(ERROR) << "LoadConfig failed, confPath = " << confPath; + LOG(ERROR) << "LoadConfig failed, confPath = " << mountOption->conf; return -1; } - if (mdsAddr) - conf.SetStringValue("mdsOpt.rpcRetryOpt.addrs", mdsAddr); + if (mountOption->mdsAddr) + conf.SetStringValue("mdsOpt.rpcRetryOpt.addrs", mountOption->mdsAddr); conf.PrintConfig(); @@ -153,11 +167,12 @@ int InitFuseClient(const char *confPath, const char* fsName, curvefs::client::common::InitFuseClientOption(&conf, g_fuseClientOption); std::shared_ptr fsInfo = std::make_shared(); - if (GetFsInfo(fsName, fsInfo.get()) != 0) { + if (GetFsInfo(mountOption->fsName, fsInfo.get()) != 0) { return -1; } - std::string fsTypeStr = (fsType == nullptr) ? "" : fsType; + std::string fsTypeStr = + (mountOption->fsType == nullptr) ? "" : mountOption->fsType; std::string fsTypeMds; if (fsInfo->fstype() == FSType::TYPE_S3) { fsTypeMds = "s3"; @@ -187,6 +202,11 @@ int InitFuseClient(const char *confPath, const char* fsName, return -1; } + ret = g_ClientInstance->SetMountStatus(mountOption); + if (ret != CURVEFS_ERROR::OK) { + return -1; + } + return 0; } @@ -200,96 +220,17 @@ void UnInitFuseClient() { delete g_clientOpMetric; } -void FuseOpInit(void *userdata, struct fuse_conn_info *conn) { - CURVEFS_ERROR ret = g_ClientInstance->FuseOpInit(userdata, conn); - if (ret != CURVEFS_ERROR::OK) { - LOG(FATAL) << "FuseOpInit failed, ret = " << ret; - } - EnableSplice(conn); - LOG(INFO) << "Fuse op init success!"; -} - -void FuseOpDestroy(void *userdata) { - g_ClientInstance->FuseOpDestroy(userdata); -} - -void FuseReplyErrByErrCode(fuse_req_t req, CURVEFS_ERROR errcode) { - switch (errcode) { - case CURVEFS_ERROR::OK: - fuse_reply_err(req, 0); - break; - case CURVEFS_ERROR::NO_SPACE: - fuse_reply_err(req, ENOSPC); - break; - case CURVEFS_ERROR::NOTEXIST: - fuse_reply_err(req, ENOENT); - break; - case CURVEFS_ERROR::NOPERMISSION: - fuse_reply_err(req, EACCES); - break; - case CURVEFS_ERROR::INVALIDPARAM: - fuse_reply_err(req, EINVAL); - break; - case CURVEFS_ERROR::NOTEMPTY: - fuse_reply_err(req, ENOTEMPTY); - break; - case CURVEFS_ERROR::NOTSUPPORT: - fuse_reply_err(req, EOPNOTSUPP); - break; - case CURVEFS_ERROR::NAMETOOLONG: - fuse_reply_err(req, ENAMETOOLONG); - break; - case CURVEFS_ERROR::OUT_OF_RANGE: - fuse_reply_err(req, ERANGE); - break; - case CURVEFS_ERROR::NODATA: - fuse_reply_err(req, ENODATA); - break; - case CURVEFS_ERROR::EXISTS: - fuse_reply_err(req, EEXIST); - break; - default: - fuse_reply_err(req, EIO); - break; - } -} - -void FuseOpLookup(fuse_req_t req, fuse_ino_t parent, const char *name) { - InflightGuard guard(&g_clientOpMetric->opLookup.inflightOpNum); - LatencyUpdater updater(&g_clientOpMetric->opLookup.latency); - fuse_entry_param e; - CURVEFS_ERROR ret = g_ClientInstance->FuseOpLookup(req, parent, name, &e); - if (ret != CURVEFS_ERROR::OK) { - g_clientOpMetric->opLookup.ecount << 1; - FuseReplyErrByErrCode(req, ret); - return; - } - fuse_reply_entry(req, &e); -} - -void FuseOpGetAttr(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { - InflightGuard guard(&g_clientOpMetric->opGetAttr.inflightOpNum); - LatencyUpdater updater(&g_clientOpMetric->opGetAttr.latency); - struct stat attr; - CURVEFS_ERROR ret = g_ClientInstance->FuseOpGetAttr(req, ino, fi, &attr); - if (ret != CURVEFS_ERROR::OK) { - g_clientOpMetric->opGetAttr.ecount << 1; - FuseReplyErrByErrCode(req, ret); - return; - } - fuse_reply_attr(req, &attr, g_fuseClientOption->attrTimeOut); -} - int AddWarmupTask(curvefs::client::common::WarmupType type, fuse_ino_t key, - const std::string &path) { + const std::string &path, + curvefs::client::common::WarmupStorageType storageType) { int ret = 0; bool result = true; switch (type) { case curvefs::client::common::WarmupType::kWarmupTypeList: - result = g_ClientInstance->PutWarmFilelistTask(key); + result = g_ClientInstance->PutWarmFilelistTask(key, storageType); break; case curvefs::client::common::WarmupType::kWarmupTypeSingle: - result = g_ClientInstance->PutWarmFileTask(key, path); + result = g_ClientInstance->PutWarmFileTask(key, path, storageType); break; default: // not support add warmup type (warmup single file/dir or filelist) @@ -323,16 +264,23 @@ int Warmup(fuse_ino_t key, const std::string& name, const std::string& value) { std::vector opTypePath; curve::common::SplitString(value, "\n", &opTypePath); - if (opTypePath.size() != 3) { + if (opTypePath.size() != curvefs::client::common::kWarmupOpNum) { LOG(ERROR) << name << " has invalid xattr value " << value; return ERANGE; } + auto storageType = + curvefs::client::common::GetWarmupStorageType(opTypePath[3]); + if (storageType == + curvefs::client::common::WarmupStorageType::kWarmupStorageTypeUnknown) { + LOG(ERROR) << name << " not support storage type: " << value; + return ERANGE; + } int ret = 0; switch (curvefs::client::common::GetWarmupOpType(opTypePath[0])) { case curvefs::client::common::WarmupOpType::kWarmupOpAdd: ret = AddWarmupTask(curvefs::client::common::GetWarmupType(opTypePath[1]), - key, opTypePath[2]); + key, opTypePath[2], storageType); if (ret != 0) { LOG(ERROR) << name << " has invalid xattr value " << value; } @@ -344,355 +292,593 @@ int Warmup(fuse_ino_t key, const std::string& name, const std::string& value) { return ret; } -void FuseOpSetXattr(fuse_req_t req, fuse_ino_t ino, const char* name, - const char* value, size_t size, int flags) { - std::string xattrValue(value, size); - VLOG(9) << "FuseOpSetXattr" - << " ino " << ino << " name " << name << " value " << xattrValue - << " flags " << flags; - if (strcmp(name, curvefs::client::common::kCurveFsWarmupXAttr) == 0) { - // warmup - int code = Warmup(ino, name, xattrValue); - fuse_reply_err(req, code); - } else { - // set xattr - CURVEFS_ERROR ret = g_ClientInstance->FuseOpSetXattr(req, ino, name, - value, size, flags); - FuseReplyErrByErrCode(req, ret); - } +namespace { - VLOG(9) << "FuseOpSetXattr done"; -} +struct CodeGuard { + explicit CodeGuard(CURVEFS_ERROR* rc, bvar::Adder* ecount) + : rc_(rc), ecount_(ecount) {} -void FuseOpGetXattr(fuse_req_t req, fuse_ino_t ino, const char *name, - size_t size) { - if (strcmp(name, curvefs::client::common::kCurveFsWarmupXAttr) == 0) { - // warmup - std::string data; - QueryWarmupTask(ino, &data); - if (size == 0) { - fuse_reply_xattr(req, data.length()); - } else { - fuse_reply_buf(req, data.data(), data.length()); + ~CodeGuard() { + if (*rc_ != CURVEFS_ERROR::OK) { + (*ecount_) << 1; } - return; - } - InflightGuard guard(&g_clientOpMetric->opGetXattr.inflightOpNum); - LatencyUpdater updater(&g_clientOpMetric->opGetXattr.latency); - std::string buf; - CURVEFS_ERROR ret = g_ClientInstance->FuseOpGetXattr(req, ino, name, - &buf, size); - if (ret != CURVEFS_ERROR::OK) { - g_clientOpMetric->opGetXattr.ecount << 1; - FuseReplyErrByErrCode(req, ret); - return; } - if (size == 0) { - fuse_reply_xattr(req, buf.length()); - } else { - fuse_reply_buf(req, buf.data(), buf.length()); - } + CURVEFS_ERROR* rc_; + bvar::Adder* ecount_; +}; + +FuseClient* Client() { + return g_ClientInstance; } -void FuseOpListXattr(fuse_req_t req, fuse_ino_t ino, size_t size) { - InflightGuard guard(&g_clientOpMetric->opListXattr.inflightOpNum); - LatencyUpdater updater(&g_clientOpMetric->opListXattr.latency); - std::unique_ptr buf(new char[size]); - std::memset(buf.get(), 0, size); - size_t xattrSize = 0; - CURVEFS_ERROR ret = g_ClientInstance->FuseOpListXattr(req, ino, buf.get(), - size, &xattrSize); - if (ret != CURVEFS_ERROR::OK) { - g_clientOpMetric->opListXattr.ecount << 1; - FuseReplyErrByErrCode(req, ret); - return; - } +const char* warmupXAttr = ::curvefs::client::common::kCurveFsWarmupXAttr; - if (size == 0) { - fuse_reply_xattr(req, xattrSize); - } else { - fuse_reply_buf(req, buf.get(), xattrSize); - } +bool IsWamupReq(const char* name) { + return strcmp(name, warmupXAttr) == 0; } -void FuseOpReadDir(fuse_req_t req, fuse_ino_t ino, size_t size, off_t off, - struct fuse_file_info *fi) { - InflightGuard guard(&g_clientOpMetric->opReadDir.inflightOpNum); - LatencyUpdater updater(&g_clientOpMetric->opReadDir.latency); - char *buffer = nullptr; - size_t rSize = 0; - CURVEFS_ERROR ret = g_ClientInstance->FuseOpReadDirPlus(req, ino, - size, off, fi, &buffer, &rSize, false); - if (ret != CURVEFS_ERROR::OK) { - g_clientOpMetric->opReadDir.ecount << 1; - FuseReplyErrByErrCode(req, ret); - return; - } - fuse_reply_buf(req, buffer, rSize); -} +void TriggerWarmup(fuse_req_t req, + fuse_ino_t ino, + const char* name, + const char* value, + size_t size) { + auto fs = Client()->GetFileSystem(); -void FuseOpReadDirPlus(fuse_req_t req, fuse_ino_t ino, size_t size, off_t off, - struct fuse_file_info *fi) { - InflightGuard guard(&g_clientOpMetric->opReadDir.inflightOpNum); - LatencyUpdater updater(&g_clientOpMetric->opReadDir.latency); - char *buffer = nullptr; - size_t rSize = 0; - CURVEFS_ERROR ret = g_ClientInstance->FuseOpReadDirPlus(req, ino, - size, off, fi, &buffer, &rSize, true); - if (ret != CURVEFS_ERROR::OK) { - g_clientOpMetric->opReadDir.ecount << 1; - FuseReplyErrByErrCode(req, ret); - return; - } - fuse_reply_buf(req, buffer, rSize); + std::string xattr(value, size); + int code = Warmup(ino, name, xattr); + fuse_reply_err(req, code); } -void FuseOpOpen(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { - InflightGuard guard(&g_clientOpMetric->opOpen.inflightOpNum); - LatencyUpdater updater(&g_clientOpMetric->opOpen.latency); - CURVEFS_ERROR ret = g_ClientInstance->FuseOpOpen(req, ino, fi); - if (ret != CURVEFS_ERROR::OK) { - g_clientOpMetric->opOpen.ecount << 1; - FuseReplyErrByErrCode(req, ret); - return; +void QueryWarmup(fuse_req_t req, fuse_ino_t ino, size_t size) { + auto fs = Client()->GetFileSystem(); + + std::string data; + QueryWarmupTask(ino, &data); + if (size == 0) { + return fs->ReplyXattr(req, data.length()); } - fuse_reply_open(req, fi); + return fs->ReplyBuffer(req, data.data(), data.length()); } -void FuseOpRead(fuse_req_t req, fuse_ino_t ino, size_t size, off_t off, - struct fuse_file_info *fi) { - InflightGuard guard(&g_clientOpMetric->opRead.inflightOpNum); - LatencyUpdater updater(&g_clientOpMetric->opRead.latency); - std::unique_ptr buffer(new char[size]); - size_t rSize = 0; - CURVEFS_ERROR ret = g_ClientInstance->FuseOpRead(req, ino, size, off, fi, - buffer.get(), &rSize); - if (ret != CURVEFS_ERROR::OK) { - g_clientOpMetric->opRead.ecount << 1; - FuseReplyErrByErrCode(req, ret); - return; - } +void ReadThrottleAdd(size_t size) { Client()->Add(true, size); } +void WriteThrottleAdd(size_t size) { Client()->Add(false, size); } - struct fuse_bufvec bufvec = FUSE_BUFVEC_INIT(rSize); - bufvec.buf[0].mem = buffer.get(); +#define MetricGuard(REQUEST) \ + InflightGuard iGuard(&g_clientOpMetric->op##REQUEST.inflightOpNum); \ + CodeGuard cGuard(&rc, &g_clientOpMetric->op##REQUEST.ecount); \ + LatencyUpdater updater(&g_clientOpMetric->op##REQUEST.latency) - fuse_reply_data(req, &bufvec, FUSE_BUF_SPLICE_MOVE); -} +} // namespace -void FuseOpWrite(fuse_req_t req, fuse_ino_t ino, const char *buf, size_t size, - off_t off, struct fuse_file_info *fi) { - InflightGuard guard(&g_clientOpMetric->opWrite.inflightOpNum); - LatencyUpdater updater(&g_clientOpMetric->opWrite.latency); - size_t wSize = 0; - CURVEFS_ERROR ret = - g_ClientInstance->FuseOpWrite(req, ino, buf, size, off, fi, &wSize); - if (ret != CURVEFS_ERROR::OK) { - g_clientOpMetric->opWrite.ecount << 1; - FuseReplyErrByErrCode(req, ret); - return; +void FuseOpInit(void *userdata, struct fuse_conn_info *conn) { + CURVEFS_ERROR rc; + auto client = Client(); + AccessLogGuard log([&](){ + return StrFormat("init : %s", StrErr(rc)); + }); + + rc = client->FuseOpInit(userdata, conn); + if (rc != CURVEFS_ERROR::OK) { + LOG(FATAL) << "FuseOpInit() failed, retCode = " << rc; + } else { + EnableSplice(conn); + LOG(INFO) << "FuseOpInit() success, retCode = " << rc; } - fuse_reply_write(req, wSize); } -void FuseOpCreate(fuse_req_t req, fuse_ino_t parent, const char *name, - mode_t mode, struct fuse_file_info *fi) { - InflightGuard guard(&g_clientOpMetric->opCreate.inflightOpNum); - LatencyUpdater updater(&g_clientOpMetric->opCreate.latency); - fuse_entry_param e; - CURVEFS_ERROR ret = - g_ClientInstance->FuseOpCreate(req, parent, name, mode, fi, &e); - if (ret != CURVEFS_ERROR::OK) { - g_clientOpMetric->opCreate.ecount << 1; - FuseReplyErrByErrCode(req, ret); - return; - } - fuse_reply_create(req, &e, fi); +void FuseOpDestroy(void *userdata) { + auto client = Client(); + AccessLogGuard log([&](){ + return StrFormat("destory : OK"); + }); + client->FuseOpDestroy(userdata); } -void FuseOpMkNod(fuse_req_t req, fuse_ino_t parent, const char *name, - mode_t mode, dev_t rdev) { - InflightGuard guard(&g_clientOpMetric->opMkNod.inflightOpNum); - LatencyUpdater updater(&g_clientOpMetric->opMkNod.latency); - fuse_entry_param e; - CURVEFS_ERROR ret = - g_ClientInstance->FuseOpMkNod(req, parent, name, mode, rdev, &e); - if (ret != CURVEFS_ERROR::OK) { - g_clientOpMetric->opMkNod.ecount << 1; - FuseReplyErrByErrCode(req, ret); - return; +void FuseOpLookup(fuse_req_t req, fuse_ino_t parent, const char* name) { + CURVEFS_ERROR rc; + EntryOut entryOut; + auto client = Client(); + auto fs = client->GetFileSystem(); + MetricGuard(Lookup); + AccessLogGuard log([&](){ + return StrFormat("lookup (%d,%s): %s%s", + parent, name, StrErr(rc), StrEntry(entryOut)); + }); + + rc = client->FuseOpLookup(req, parent, name, &entryOut); + if (rc != CURVEFS_ERROR::OK) { + return fs->ReplyError(req, rc); } - fuse_reply_entry(req, &e); + return fs->ReplyEntry(req, &entryOut); } -void FuseOpMkDir(fuse_req_t req, fuse_ino_t parent, const char *name, +void FuseOpGetAttr(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { + CURVEFS_ERROR rc; + AttrOut attrOut; + auto client = Client(); + auto fs = client->GetFileSystem(); + MetricGuard(GetAttr); + AccessLogGuard log([&](){ + return StrFormat("getattr (%d): %s%s", + ino, StrErr(rc), StrAttr(attrOut)); + }); + + rc = client->FuseOpGetAttr(req, ino, fi, &attrOut); + if (rc != CURVEFS_ERROR::OK) { + return fs->ReplyError(req, rc); + } + return fs->ReplyAttr(req, &attrOut); +} + +void FuseOpSetAttr(fuse_req_t req, + fuse_ino_t ino, + struct stat* attr, + int to_set, + struct fuse_file_info* fi) { + CURVEFS_ERROR rc; + AttrOut attrOut; + auto client = Client(); + auto fs = client->GetFileSystem(); + MetricGuard(SetAttr); + AccessLogGuard log([&](){ + return StrFormat("setattr (%d,0x%X): %s%s", + ino, to_set, StrErr(rc), StrAttr(attrOut)); + }); + + rc = client->FuseOpSetAttr(req, ino, attr, to_set, fi, &attrOut); + if (rc != CURVEFS_ERROR::OK) { + return fs->ReplyError(req, rc); + } + return fs->ReplyAttr(req, &attrOut); +} + +void FuseOpReadLink(fuse_req_t req, fuse_ino_t ino) { + CURVEFS_ERROR rc; + std::string link; + auto client = Client(); + auto fs = client->GetFileSystem(); + MetricGuard(ReadLink); + AccessLogGuard log([&](){ + return StrFormat("readlink (%d): %s %s", ino, StrErr(rc), link.c_str()); + }); + + rc = client->FuseOpReadLink(req, ino, &link); + if (rc != CURVEFS_ERROR::OK) { + return fs->ReplyError(req, rc); + } + return fs->ReplyReadlink(req, link); +} + +void FuseOpMkNod(fuse_req_t req, + fuse_ino_t parent, + const char* name, + mode_t mode, + dev_t rdev) { + CURVEFS_ERROR rc; + EntryOut entryOut; + auto client = Client(); + auto fs = client->GetFileSystem(); + MetricGuard(MkNod); + AccessLogGuard log([&](){ + return StrFormat("mknod (%d,%s,%s:0%04o): %s%s", + parent, name, StrMode(mode), mode, + StrErr(rc), StrEntry(entryOut)); + }); + + rc = client->FuseOpMkNod(req, parent, name, mode, rdev, &entryOut); + if (rc != CURVEFS_ERROR::OK) { + return fs->ReplyError(req, rc); + } + return fs->ReplyEntry(req, &entryOut); +} + +void FuseOpMkDir(fuse_req_t req, + fuse_ino_t parent, + const char* name, mode_t mode) { - InflightGuard guard(&g_clientOpMetric->opMkDir.inflightOpNum); - LatencyUpdater updater(&g_clientOpMetric->opMkDir.latency); - fuse_entry_param e; - CURVEFS_ERROR ret = - g_ClientInstance->FuseOpMkDir(req, parent, name, mode, &e); - if (ret != CURVEFS_ERROR::OK) { - g_clientOpMetric->opMkDir.ecount << 1; - FuseReplyErrByErrCode(req, ret); - return; + CURVEFS_ERROR rc; + EntryOut entryOut; + auto client = Client(); + auto fs = client->GetFileSystem(); + MetricGuard(MkDir); + AccessLogGuard log([&](){ + return StrFormat("mkdir (%d,%s,%s:0%04o): %s%s", + parent, name, StrMode(mode), mode, + StrErr(rc), StrEntry(entryOut)); + }); + + rc = client->FuseOpMkDir(req, parent, name, mode, &entryOut); + if (rc != CURVEFS_ERROR::OK) { + return fs->ReplyError(req, rc); } - fuse_reply_entry(req, &e); + return fs->ReplyEntry(req, &entryOut); } void FuseOpUnlink(fuse_req_t req, fuse_ino_t parent, const char *name) { - InflightGuard guard(&g_clientOpMetric->opUnlink.inflightOpNum); - LatencyUpdater updater(&g_clientOpMetric->opUnlink.latency); - CURVEFS_ERROR ret = g_ClientInstance->FuseOpUnlink(req, parent, name); - if (ret != CURVEFS_ERROR::OK) { - g_clientOpMetric->opUnlink.ecount << 1; - } - FuseReplyErrByErrCode(req, ret); + CURVEFS_ERROR rc; + auto client = Client(); + auto fs = client->GetFileSystem(); + MetricGuard(Unlink); + AccessLogGuard log([&](){ + return StrFormat("unlink (%d,%s): %s", parent, name, StrErr(rc)); + }); + + rc = client->FuseOpUnlink(req, parent, name); + return fs->ReplyError(req, rc); } void FuseOpRmDir(fuse_req_t req, fuse_ino_t parent, const char *name) { - InflightGuard guard(&g_clientOpMetric->opRmDir.inflightOpNum); - LatencyUpdater updater(&g_clientOpMetric->opRmDir.latency); - CURVEFS_ERROR ret = g_ClientInstance->FuseOpRmDir(req, parent, name); - if (ret != CURVEFS_ERROR::OK) { - g_clientOpMetric->opRmDir.ecount << 1; + CURVEFS_ERROR rc; + auto client = Client(); + auto fs = client->GetFileSystem(); + MetricGuard(RmDir); + AccessLogGuard log([&](){ + return StrFormat("rmdir (%d,%s): %s", parent, name, StrErr(rc)); + }); + + rc = client->FuseOpRmDir(req, parent, name); + return fs->ReplyError(req, rc); +} + +void FuseOpSymlink(fuse_req_t req, + const char *link, + fuse_ino_t parent, + const char* name) { + CURVEFS_ERROR rc; + EntryOut entryOut; + auto client = Client(); + auto fs = client->GetFileSystem(); + MetricGuard(Symlink); + AccessLogGuard log([&](){ + return StrFormat("symlink (%d,%s,%s): %s%s", + parent, name, link, StrErr(rc), StrEntry(entryOut)); + }); + + rc = client->FuseOpSymlink(req, link, parent, name, &entryOut); + if (rc != CURVEFS_ERROR::OK) { + return fs->ReplyError(req, rc); + } + return fs->ReplyEntry(req, &entryOut); +} + +void FuseOpRename(fuse_req_t req, + fuse_ino_t parent, + const char *name, + fuse_ino_t newparent, + const char *newname, + unsigned int flags) { + CURVEFS_ERROR rc; + auto client = Client(); + auto fs = client->GetFileSystem(); + MetricGuard(Rename); + AccessLogGuard log([&](){ + return StrFormat("rename (%d,%s,%d,%s,%d): %s", + parent, name, newparent, newname, flags, StrErr(rc)); + }); + + rc = client->FuseOpRename(req, parent, name, newparent, newname, flags); + return fs->ReplyError(req, rc); +} + +void FuseOpLink(fuse_req_t req, + fuse_ino_t ino, + fuse_ino_t newparent, + const char *newname) { + CURVEFS_ERROR rc; + EntryOut entryOut; + auto client = Client(); + auto fs = client->GetFileSystem(); + MetricGuard(Link); + AccessLogGuard log([&](){ + return StrFormat( + "link (%d,%d,%s): %s%s", + ino, newparent, newname, StrErr(rc), StrEntry(entryOut)); + }); + + rc = client->FuseOpLink(req, ino, newparent, newname, &entryOut); + if (rc != CURVEFS_ERROR::OK) { + return fs->ReplyError(req, rc); } - FuseReplyErrByErrCode(req, ret); + return fs->ReplyEntry(req, &entryOut); } -void FuseOpOpenDir(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { - InflightGuard guard(&g_clientOpMetric->opOpenDir.inflightOpNum); - LatencyUpdater updater(&g_clientOpMetric->opOpenDir.latency); - CURVEFS_ERROR ret = g_ClientInstance->FuseOpOpenDir(req, ino, fi); - if (ret != CURVEFS_ERROR::OK) { - g_clientOpMetric->opOpenDir.ecount << 1; - FuseReplyErrByErrCode(req, ret); +void FuseOpOpen(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { + CURVEFS_ERROR rc; + FileOut fileOut; + auto client = Client(); + auto fs = client->GetFileSystem(); + MetricGuard(Open); + AccessLogGuard log([&](){ + return StrFormat("open (%d): %s [fh:%d]", ino, StrErr(rc), fi->fh); + }); + + rc = client->FuseOpOpen(req, ino, fi, &fileOut); + if (rc != CURVEFS_ERROR::OK) { + fs->ReplyError(req, rc); return; } - fuse_reply_open(req, fi); + return fs->ReplyOpen(req, &fileOut); } -void FuseOpReleaseDir(fuse_req_t req, fuse_ino_t ino, - struct fuse_file_info *fi) { - InflightGuard guard(&g_clientOpMetric->opReleaseDir.inflightOpNum); - LatencyUpdater updater(&g_clientOpMetric->opReleaseDir.latency); - CURVEFS_ERROR ret = g_ClientInstance->FuseOpReleaseDir(req, ino, fi); - if (ret != CURVEFS_ERROR::OK) { - g_clientOpMetric->opReleaseDir.ecount << 1; +void FuseOpRead(fuse_req_t req, + fuse_ino_t ino, + size_t size, + off_t off, + struct fuse_file_info* fi) { + CURVEFS_ERROR rc; + size_t rSize = 0; + std::unique_ptr buffer(new char[size]); + auto client = Client(); + auto fs = client->GetFileSystem(); + MetricGuard(Read); + AccessLogGuard log([&](){ + return StrFormat("read (%d,%d,%d,%d): %s (%d)", + ino, size, off, fi->fh, StrErr(rc), rSize); + }); + + ReadThrottleAdd(size); + rc = client->FuseOpRead(req, ino, size, off, fi, buffer.get(), &rSize); + if (rc != CURVEFS_ERROR::OK) { + return fs->ReplyError(req, rc); } - FuseReplyErrByErrCode(req, ret); + struct fuse_bufvec bufvec = FUSE_BUFVEC_INIT(rSize); + bufvec.buf[0].mem = buffer.get(); + return fs->ReplyData(req, &bufvec, FUSE_BUF_SPLICE_MOVE); } -void FuseOpRename(fuse_req_t req, fuse_ino_t parent, const char *name, - fuse_ino_t newparent, const char *newname, - unsigned int flags) { - // TODO(Wine93): the flag RENAME_EXCHANGE and RENAME_NOREPLACE - // is only used in linux interface renameat(), not required by posix, - // we can ignore it now - InflightGuard guard(&g_clientOpMetric->opRename.inflightOpNum); - LatencyUpdater updater(&g_clientOpMetric->opRename.latency); - CURVEFS_ERROR ret = CURVEFS_ERROR::OK; - if (flags != 0) { - ret = CURVEFS_ERROR::INVALIDPARAM; - } else { - ret = g_ClientInstance->FuseOpRename(req, parent, name, newparent, - newname); - } +void FuseOpWrite(fuse_req_t req, + fuse_ino_t ino, + const char* buf, + size_t size, + off_t off, + struct fuse_file_info *fi) { + CURVEFS_ERROR rc; + FileOut fileOut; + auto client = Client(); + auto fs = client->GetFileSystem(); + MetricGuard(Read); + AccessLogGuard log([&](){ + return StrFormat("write (%d,%d,%d,%d): %s (%d)", + ino, size, off, fi->fh, StrErr(rc), fileOut.nwritten); + }); + + WriteThrottleAdd(size); + rc = client->FuseOpWrite(req, ino, buf, size, off, fi, &fileOut); + if (rc != CURVEFS_ERROR::OK) { + return fs->ReplyError(req, rc); + } + return fs->ReplyWrite(req, &fileOut); +} + +void FuseOpFlush(fuse_req_t req, + fuse_ino_t ino, + struct fuse_file_info *fi) { + CURVEFS_ERROR rc; + auto client = Client(); + auto fs = client->GetFileSystem(); + MetricGuard(Flush); + AccessLogGuard log([&](){ + return StrFormat("flush (%d,%d): %s", ino, fi->fh, StrErr(rc)); + }); - if (ret != CURVEFS_ERROR::OK) { - g_clientOpMetric->opRename.ecount << 1; - } - FuseReplyErrByErrCode(req, ret); + rc = client->FuseOpFlush(req, ino, fi); + return fs->ReplyError(req, rc); } -void FuseOpSetAttr(fuse_req_t req, fuse_ino_t ino, struct stat *attr, - int to_set, struct fuse_file_info *fi) { - InflightGuard guard(&g_clientOpMetric->opSetAttr.inflightOpNum); - LatencyUpdater updater(&g_clientOpMetric->opSetAttr.latency); - struct stat attrOut; - CURVEFS_ERROR ret = - g_ClientInstance->FuseOpSetAttr(req, ino, attr, to_set, fi, &attrOut); - if (ret != CURVEFS_ERROR::OK) { - g_clientOpMetric->opSetAttr.ecount << 1; - FuseReplyErrByErrCode(req, ret); - return; - } - fuse_reply_attr(req, &attrOut, g_fuseClientOption->attrTimeOut); +void FuseOpRelease(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { + CURVEFS_ERROR rc; + auto client = Client(); + auto fs = client->GetFileSystem(); + MetricGuard(Release); + AccessLogGuard log([&](){ + return StrFormat("release (%d,%d): %s", ino, fi->fh, StrErr(rc)); + }); + + rc = client->FuseOpRelease(req, ino, fi); + return fs->ReplyError(req, rc); +} + +void FuseOpFsync(fuse_req_t req, + fuse_ino_t ino, + int datasync, + struct fuse_file_info* fi) { + CURVEFS_ERROR rc; + auto client = Client(); + auto fs = client->GetFileSystem(); + MetricGuard(Fsync); + AccessLogGuard log([&](){ + return StrFormat("fsync (%d,%d): %s", ino, datasync, StrErr(rc)); + }); + + rc = client->FuseOpFsync(req, ino, datasync, fi); + return fs->ReplyError(req, rc); } -void FuseOpSymlink(fuse_req_t req, const char *link, fuse_ino_t parent, - const char *name) { - InflightGuard guard(&g_clientOpMetric->opSymlink.inflightOpNum); - LatencyUpdater updater(&g_clientOpMetric->opSymlink.latency); - fuse_entry_param e; - CURVEFS_ERROR ret = - g_ClientInstance->FuseOpSymlink(req, link, parent, name, &e); - if (ret != CURVEFS_ERROR::OK) { - g_clientOpMetric->opSymlink.ecount << 1; - FuseReplyErrByErrCode(req, ret); - return; - } - fuse_reply_entry(req, &e); -} +void FuseOpOpenDir(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { + CURVEFS_ERROR rc; + auto client = Client(); + auto fs = client->GetFileSystem(); + MetricGuard(OpenDir); + AccessLogGuard log([&](){ + return StrFormat("opendir (%d): %s [fh:%d]", ino, StrErr(rc), fi->fh); + }); + + rc = client->FuseOpOpenDir(req, ino, fi); + if (rc != CURVEFS_ERROR::OK) { + return fs->ReplyError(req, rc); + } + return fs->ReplyOpen(req, fi); +} + +void FuseOpReadDir(fuse_req_t req, + fuse_ino_t ino, + size_t size, + off_t off, + struct fuse_file_info* fi) { + CURVEFS_ERROR rc; + char *buffer; + size_t rSize; + auto client = Client(); + auto fs = client->GetFileSystem(); + MetricGuard(ReadDir); + AccessLogGuard log([&](){ + return StrFormat("readdir (%d,%d,%d): %s (%d)", + ino, size, off, StrErr(rc), rSize); + }); + + rc = client->FuseOpReadDir(req, ino, size, off, fi, &buffer, &rSize, false); + if (rc != CURVEFS_ERROR::OK) { + return fs->ReplyError(req, rc); + } + return fs->ReplyBuffer(req, buffer, rSize); +} + +void FuseOpReadDirPlus(fuse_req_t req, + fuse_ino_t ino, + size_t size, + off_t off, + struct fuse_file_info* fi) { + CURVEFS_ERROR rc; + char *buffer; + size_t rSize; + auto client = Client(); + auto fs = client->GetFileSystem(); + MetricGuard(ReadDir); + AccessLogGuard log([&](){ + return StrFormat("readdirplus (%d,%d,%d): %s (%d)", + ino, size, off, StrErr(rc), rSize); + }); + + rc = client->FuseOpReadDir(req, ino, size, off, fi, &buffer, &rSize, true); + if (rc != CURVEFS_ERROR::OK) { + return fs->ReplyError(req, rc); + } + + return fs->ReplyBuffer(req, buffer, rSize); +} + +void FuseOpReleaseDir(fuse_req_t req, + fuse_ino_t ino, + struct fuse_file_info *fi) { + CURVEFS_ERROR rc; + auto client = Client(); + auto fs = client->GetFileSystem(); + MetricGuard(ReleaseDir); + AccessLogGuard log([&](){ + return StrFormat("releasedir (%d,%d): %s", ino, fi->fh, StrErr(rc)); + }); -void FuseOpLink(fuse_req_t req, fuse_ino_t ino, fuse_ino_t newparent, - const char *newname) { - InflightGuard guard(&g_clientOpMetric->opLink.inflightOpNum); - LatencyUpdater updater(&g_clientOpMetric->opLink.latency); - fuse_entry_param e; - CURVEFS_ERROR ret = - g_ClientInstance->FuseOpLink(req, ino, newparent, newname, &e); - if (ret != CURVEFS_ERROR::OK) { - g_clientOpMetric->opLink.ecount << 1; - FuseReplyErrByErrCode(req, ret); - return; - } - fuse_reply_entry(req, &e); + rc = client->FuseOpReleaseDir(req, ino, fi); + return fs->ReplyError(req, rc); } -void FuseOpReadLink(fuse_req_t req, fuse_ino_t ino) { - InflightGuard guard(&g_clientOpMetric->opReadLink.inflightOpNum); - LatencyUpdater updater(&g_clientOpMetric->opReadLink.latency); - std::string linkStr; - CURVEFS_ERROR ret = g_ClientInstance->FuseOpReadLink(req, ino, &linkStr); - if (ret != CURVEFS_ERROR::OK) { - g_clientOpMetric->opReadLink.ecount << 1; - FuseReplyErrByErrCode(req, ret); - return; - } - fuse_reply_readlink(req, linkStr.c_str()); -} +void FuseOpStatFs(fuse_req_t req, fuse_ino_t ino) { + CURVEFS_ERROR rc; + struct statvfs stbuf; + auto client = Client(); + auto fs = client->GetFileSystem(); + AccessLogGuard log([&](){ + return StrFormat("statfs (%d): %s", ino, StrErr(rc)); + }); + + rc = client->FuseOpStatFs(req, ino, &stbuf); + if (rc != CURVEFS_ERROR::OK) { + return fs->ReplyError(req, rc); + } + return fs->ReplyStatfs(req, &stbuf); +} + +void FuseOpSetXattr(fuse_req_t req, + fuse_ino_t ino, + const char* name, + const char* value, + size_t size, + int flags) { + CURVEFS_ERROR rc; + auto client = Client(); + auto fs = client->GetFileSystem(); + AccessLogGuard log([&](){ + return StrFormat("setxattr (%d,%s,%d,%d): %s", + ino, name, size, flags, StrErr(rc)); + }); + + if (IsWamupReq(name)) { + return TriggerWarmup(req, ino, name, value, size); + } + rc = client->FuseOpSetXattr(req, ino, name, value, size, flags); + return fs->ReplyError(req, rc); +} + +void FuseOpGetXattr(fuse_req_t req, + fuse_ino_t ino, + const char *name, + size_t size) { + CURVEFS_ERROR rc; + std::string value; + auto client = Client(); + auto fs = client->GetFileSystem(); + MetricGuard(GetXattr); + AccessLogGuard log([&](){ + return StrFormat("getxattr (%d,%s,%d): %s (%d)", + ino, name, size, StrErr(rc), value.size()); + }); -void FuseOpRelease(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { - InflightGuard guard(&g_clientOpMetric->opRelease.inflightOpNum); - LatencyUpdater updater(&g_clientOpMetric->opRelease.latency); - CURVEFS_ERROR ret = g_ClientInstance->FuseOpRelease(req, ino, fi); - if (ret != CURVEFS_ERROR::OK) { - g_clientOpMetric->opRelease.ecount << 1; + if (IsWamupReq(name)) { + return QueryWarmup(req, ino, size); } - FuseReplyErrByErrCode(req, ret); -} -void FuseOpFsync(fuse_req_t req, fuse_ino_t ino, int datasync, - struct fuse_file_info *fi) { - InflightGuard guard(&g_clientOpMetric->opFsync.inflightOpNum); - LatencyUpdater updater(&g_clientOpMetric->opFsync.latency); - CURVEFS_ERROR ret = g_ClientInstance->FuseOpFsync(req, ino, datasync, fi); - if (ret != CURVEFS_ERROR::OK) { - g_clientOpMetric->opFsync.ecount << 1; + rc = Client()->FuseOpGetXattr(req, ino, name, &value, size); + if (rc != CURVEFS_ERROR::OK) { + return fs->ReplyError(req, rc); + } else if (size == 0) { + return fs->ReplyXattr(req, value.length()); } - FuseReplyErrByErrCode(req, ret); + return fs->ReplyBuffer(req, value.data(), value.length()); } -void FuseOpFlush(fuse_req_t req, fuse_ino_t ino, - struct fuse_file_info *fi) { - InflightGuard guard(&g_clientOpMetric->opFlush.inflightOpNum); - LatencyUpdater updater(&g_clientOpMetric->opFlush.latency); - CURVEFS_ERROR ret = g_ClientInstance->FuseOpFlush(req, ino, fi); - if (ret != CURVEFS_ERROR::OK) { - g_clientOpMetric->opFlush.ecount << 1; - } - FuseReplyErrByErrCode(req, ret); +void FuseOpListXattr(fuse_req_t req, fuse_ino_t ino, size_t size) { + CURVEFS_ERROR rc; + size_t xattrSize = 0; + std::unique_ptr buf(new char[size]); + std::memset(buf.get(), 0, size); + auto client = Client(); + auto fs = client->GetFileSystem(); + MetricGuard(ListXattr); + AccessLogGuard log([&](){ + return StrFormat("listxattr (%d,%s): %s (%d)", + ino, size, StrErr(rc), xattrSize); + }); + + rc = Client()->FuseOpListXattr(req, ino, buf.get(), size, &xattrSize); + if (rc != CURVEFS_ERROR::OK) { + return fs->ReplyError(req, rc); + } else if (size == 0) { + return fs->ReplyXattr(req, xattrSize); + } + return fs->ReplyBuffer(req, buf.get(), xattrSize); +} + +void FuseOpCreate(fuse_req_t req, + fuse_ino_t parent, + const char* name, + mode_t mode, + struct fuse_file_info* fi) { + CURVEFS_ERROR rc; + EntryOut entryOut; + auto client = Client(); + auto fs = client->GetFileSystem(); + MetricGuard(Create); + AccessLogGuard log([&](){ + return StrFormat("create (%d,%s): %s%s [fh:%d]", + parent, name, StrErr(rc), StrEntry(entryOut), fi->fh); + }); + + rc = client->FuseOpCreate(req, parent, name, mode, fi, &entryOut); + if (rc != CURVEFS_ERROR::OK) { + return fs->ReplyError(req, rc); + } + return fs->ReplyCreate(req, &entryOut, fi); } void FuseOpBmap(fuse_req_t req, @@ -700,15 +886,8 @@ void FuseOpBmap(fuse_req_t req, size_t /*blocksize*/, uint64_t /*idx*/) { // TODO(wuhanqing): implement for volume storage - FuseReplyErrByErrCode(req, CURVEFS_ERROR::NOTSUPPORT); -} + auto client = Client(); + auto fs = client->GetFileSystem(); -void FuseOpStatFs(fuse_req_t req, fuse_ino_t ino) { - struct statvfs stbuf; - CURVEFS_ERROR ret = g_ClientInstance->FuseOpStatFs(req, ino, &stbuf); - if (ret != CURVEFS_ERROR::OK) { - FuseReplyErrByErrCode(req, ret); - return; - } - fuse_reply_statfs(req, &stbuf); + return fs->ReplyError(req, CURVEFS_ERROR::NOTSUPPORT); } diff --git a/curvefs/src/client/curve_fuse_op.h b/curvefs/src/client/curve_fuse_op.h index b3a5944134..231cf26526 100644 --- a/curvefs/src/client/curve_fuse_op.h +++ b/curvefs/src/client/curve_fuse_op.h @@ -37,10 +37,9 @@ extern "C" { #endif -int InitGlog(const char *confPath, const char *argv0); +int InitLog(const char *confPath, const char *argv0); -int InitFuseClient(const char *confPath, const char *fsName, - const char* fsType, const char* mdsAddr); +int InitFuseClient(const struct MountOption *mountOption); void UnInitFuseClient(); diff --git a/curvefs/src/client/dentry_cache_manager.cpp b/curvefs/src/client/dentry_cache_manager.cpp index 5bd4e68896..afb5e49eef 100644 --- a/curvefs/src/client/dentry_cache_manager.cpp +++ b/curvefs/src/client/dentry_cache_manager.cpp @@ -44,31 +44,13 @@ namespace client { using curve::common::WriteLockGuard; using NameLockGuard = ::curve::common::GenericNameLockGuard; - -void DentryCacheManagerImpl::InsertOrReplaceCache(const Dentry &dentry) { - std::string key = GetDentryCacheKey(dentry.parentinodeid(), dentry.name()); - if (!curvefs::client::common::FLAGS_enableCto) { - NameLockGuard lock(nameLock_, key); - dCache_->Put(key, dentry); - } -} - -void DentryCacheManagerImpl::DeleteCache(uint64_t parentId, - const std::string &name) { - std::string key = GetDentryCacheKey(parentId, name); - NameLockGuard lock(nameLock_, key); - dCache_->Remove(key); -} +using ::curvefs::client::filesystem::ToFSError; CURVEFS_ERROR DentryCacheManagerImpl::GetDentry(uint64_t parent, const std::string &name, Dentry *out) { std::string key = GetDentryCacheKey(parent, name); NameLockGuard lock(nameLock_, key); - bool ok = dCache_->Get(key, out); - if (ok) { - return CURVEFS_ERROR::OK; - } MetaStatusCode ret = metaClient_->GetDentry(fsId_, parent, name, out); if (ret != MetaStatusCode::OK) { @@ -76,10 +58,7 @@ CURVEFS_ERROR DentryCacheManagerImpl::GetDentry(uint64_t parent, << "metaClient_ GetDentry failed, MetaStatusCode = " << ret << ", MetaStatusCode_Name = " << MetaStatusCode_Name(ret) << ", parent = " << parent << ", name = " << name; - return MetaStatusCodeToCurvefsErrCode(ret); - } - if (!curvefs::client::common::FLAGS_enableCto) { - dCache_->Put(key, *out); + return ToFSError(ret); } return CURVEFS_ERROR::OK; } @@ -94,12 +73,9 @@ CURVEFS_ERROR DentryCacheManagerImpl::CreateDentry(const Dentry &dentry) { << ", MetaStatusCode_Name = " << MetaStatusCode_Name(ret) << ", parent = " << dentry.parentinodeid() << ", name = " << dentry.name(); - return MetaStatusCodeToCurvefsErrCode(ret); + return ToFSError(ret); } - if (!curvefs::client::common::FLAGS_enableCto) { - dCache_->Put(key, dentry); - } return CURVEFS_ERROR::OK; } @@ -108,14 +84,13 @@ CURVEFS_ERROR DentryCacheManagerImpl::DeleteDentry(uint64_t parent, FsFileType type) { std::string key = GetDentryCacheKey(parent, name); NameLockGuard lock(nameLock_, key); - dCache_->Remove(key); MetaStatusCode ret = metaClient_->DeleteDentry(fsId_, parent, name, type); if (ret != MetaStatusCode::OK && ret != MetaStatusCode::NOT_FOUND) { LOG(ERROR) << "metaClient_ DeleteInode failed, MetaStatusCode = " << ret << ", MetaStatusCode_Name = " << MetaStatusCode_Name(ret) << ", parent = " << parent << ", name = " << name; - return MetaStatusCodeToCurvefsErrCode(ret); + return ToFSError(ret); } return CURVEFS_ERROR::OK; } @@ -149,7 +124,7 @@ CURVEFS_ERROR DentryCacheManagerImpl::ListDentry(uint64_t parent, << ", MetaStatusCode_Name = " << MetaStatusCode_Name(ret) << ", parent = " << parent << ", last = " << last << ", count = " << limit << ", onlyDir = " << onlyDir; - return MetaStatusCodeToCurvefsErrCode(ret); + return ToFSError(ret); } if (!onlyDir) { diff --git a/curvefs/src/client/dentry_cache_manager.h b/curvefs/src/client/dentry_cache_manager.h index 15d0a80811..84f9f20f53 100644 --- a/curvefs/src/client/dentry_cache_manager.h +++ b/curvefs/src/client/dentry_cache_manager.h @@ -33,20 +33,18 @@ #include #include "curvefs/src/client/rpcclient/metaserver_client.h" -#include "curvefs/src/client/error_code.h" #include "src/common/concurrent/concurrent.h" -#include "src/common/lru_cache.h" #include "src/common/concurrent/name_lock.h" +#include "curvefs/src/client/filesystem/error.h" using ::curvefs::metaserver::Dentry; -using ::curve::common::TimedLRUCache; -using ::curve::common::CacheMetrics; namespace curvefs { namespace client { using rpcclient::MetaServerClient; using rpcclient::MetaServerClientImpl; +using ::curvefs::client::filesystem::CURVEFS_ERROR; static const char* kDentryKeyDelimiter = ":"; @@ -59,13 +57,6 @@ class DentryCacheManager { fsId_ = fsId; } - virtual CURVEFS_ERROR Init(uint64_t cacheSize, bool enableCacheMetrics, - uint32_t cacheTimeOutSec) = 0; - - virtual void InsertOrReplaceCache(const Dentry& dentry) = 0; - - virtual void DeleteCache(uint64_t parentId, const std::string& name) = 0; - virtual CURVEFS_ERROR GetDentry(uint64_t parent, const std::string &name, Dentry *out) = 0; @@ -86,30 +77,11 @@ class DentryCacheManager { class DentryCacheManagerImpl : public DentryCacheManager { public: DentryCacheManagerImpl() - : metaClient_(std::make_shared()), - dCache_(nullptr) {} + : metaClient_(std::make_shared()) {} explicit DentryCacheManagerImpl( const std::shared_ptr &metaClient) - : metaClient_(metaClient), - dCache_(nullptr) {} - - CURVEFS_ERROR Init(uint64_t cacheSize, bool enableCacheMetrics, - uint32_t cacheTimeOutSec) override { - if (enableCacheMetrics) { - dCache_ = std::make_shared< - TimedLRUCache>(cacheTimeOutSec, cacheSize, - std::make_shared("dcache")); - } else { - dCache_ = std::make_shared< - TimedLRUCache>(cacheTimeOutSec, cacheSize); - } - return CURVEFS_ERROR::OK; - } - - void InsertOrReplaceCache(const Dentry& dentry) override; - - void DeleteCache(uint64_t parentId, const std::string& name) override; + : metaClient_(metaClient) {} CURVEFS_ERROR GetDentry(uint64_t parent, const std::string &name, Dentry *out) override; @@ -130,8 +102,6 @@ class DentryCacheManagerImpl : public DentryCacheManager { private: std::shared_ptr metaClient_; - // key is parentId + name - std::shared_ptr> dCache_; curve::common::GenericNameLock nameLock_; }; diff --git a/curvefs/src/client/error_code.cpp b/curvefs/src/client/error_code.cpp deleted file mode 100644 index d2f7c53d93..0000000000 --- a/curvefs/src/client/error_code.cpp +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright (c) 2021 NetEase Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -/* - * Project: curve - * Created Date: Thur May 27 2021 - * Author: xuchaojie - */ - -#include "curvefs/src/client/error_code.h" - -#include -#include - -namespace curvefs { -namespace client { - -using ::curvefs::metaserver::MetaStatusCode; - -static const std::map err2Msg = { - {CURVEFS_ERROR::OK, "OK"}, - {CURVEFS_ERROR::INTERNAL, "internal error"}, - {CURVEFS_ERROR::UNKNOWN, "unknown"}, - {CURVEFS_ERROR::EXISTS, "inode or dentry already exist"}, - {CURVEFS_ERROR::NOTEXIST, "inode or dentry not exist"}, - {CURVEFS_ERROR::NO_SPACE, "no space to alloc"}, - {CURVEFS_ERROR::BAD_FD, "bad fd"}, - {CURVEFS_ERROR::INVALIDPARAM, "invalid param"}, - {CURVEFS_ERROR::NOPERMISSION, "no permission"}, - {CURVEFS_ERROR::NOTEMPTY, "dir not empty"}, - {CURVEFS_ERROR::NOFLUSH, "no flush"}, - {CURVEFS_ERROR::NOTSUPPORT, "not support"}, - {CURVEFS_ERROR::NAMETOOLONG, "name too long"}, - {CURVEFS_ERROR::MOUNT_POINT_EXIST, "mount point already exist"}, - {CURVEFS_ERROR::MOUNT_FAILED, "mount failed"}, -}; - -std::ostream &operator<<(std::ostream &os, CURVEFS_ERROR code) { - os << static_cast(code) << "[" << [code]() { - auto it = err2Msg.find(code); - if (it != err2Msg.end()) { - return it->second; - } - - return std::string{"Unknown"}; - }() << "]"; - - return os; -} - -CURVEFS_ERROR MetaStatusCodeToCurvefsErrCode( - MetaStatusCode code) { - CURVEFS_ERROR ret = CURVEFS_ERROR::UNKNOWN; - switch (code) { - case MetaStatusCode::OK: - ret = CURVEFS_ERROR::OK; - break; - - case MetaStatusCode::NOT_FOUND: - ret = CURVEFS_ERROR::NOTEXIST; - break; - - case MetaStatusCode::PARAM_ERROR: - ret = CURVEFS_ERROR::INVALIDPARAM; - break; - - case MetaStatusCode::INODE_EXIST: - case MetaStatusCode::DENTRY_EXIST: - ret = CURVEFS_ERROR::EXISTS; - break; - - case MetaStatusCode::SYM_LINK_EMPTY: - case MetaStatusCode::RPC_ERROR: - ret = CURVEFS_ERROR::INTERNAL; - break; - - default: - ret = CURVEFS_ERROR::UNKNOWN; - } - return ret; -} - -} // namespace client -} // namespace curvefs diff --git a/curvefs/src/client/filesystem/README.md b/curvefs/src/client/filesystem/README.md new file mode 100644 index 0000000000..077f9225f2 --- /dev/null +++ b/curvefs/src/client/filesystem/README.md @@ -0,0 +1,78 @@ +CurveFS Client Metadata Cache Design +=== + +``` ++----------------+ ++ Kernel Cache | ++----------------+ + | ^ +(1) | | (4) + v | ++----------------+ ++ CurveFS Fuse | ++----------------+ + | ^ +(2) | | (3) + v | ++----------------+ ++ MetaServer | ++----------------+ +``` + +(1) kernel cache timeout +--- + +* requests are always sent to fuse layer, except `lookup` and `getattr` request which affected by caching. +* if entry or attribute cache timeout, vfs layer will launch request to fuse layer. + +(2) revalidate cache +--- + +* for `open` and `opendir` request, fuse layer should revalidate cache by comparing mtime, if modified, fuse layer should: + * `open`: return **ESTALE** to trigger vfs layer to invoke the `open` again with ignoring cache. + * `opendir`: drop all directory cache. +* others, proxy request to metaserver directly. + + +(3) retrive fresh metadata +--- + +* for `readdir` request, fuse layer should cache direcoty entries and their attributes. +* others, no caching. + +(4) reply with timeout +--- + +* reply entry or attribute to vfs layer with corresponding cache timeout. +* four type timeouts provided: `entryTimeout`, `dirEntryTimeout`, `attrTimeout`, `dirAtttTimeout`. +* fuse layer should remeber the `mtime` of attribute while reply it to vfs layer. + +Cache Layer Level +=== + +``` ++-------------------------------------------------------+ +| Kernel Cache | (L1 Cache) ++-------------------------------------------------------+ + ^ (update length) + | ++--------------------------------------------------------+ +| Open File Cache | (L2 Cache) +| (only length and chunk) | ++--------------------------------------------------------+ + ^ ^ + | (reply entry/attr) | (reply entry/attr) ++-----------------+ | +| Directory Cache | (L3 Cache) | ++-----------------+ | + ^ | + | | ++-----------------+ +------------------------------------+ +| readdir | | lookup、getattr、setattr、create... | ++-----------------+ +------------------------------------+ + ^ ^ + | | ++--------------------------------------------------------+ +| MetaServer | ++--------------------------------------------------------+ +``` \ No newline at end of file diff --git a/curvefs/src/client/filesystem/access_log.h b/curvefs/src/client/filesystem/access_log.h new file mode 100644 index 0000000000..1e2067c9b1 --- /dev/null +++ b/curvefs/src/client/filesystem/access_log.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-17 + * Author: Jingli Chen (Wine93) + */ + +#include +#include +#include +#include +#include + +#include +#include + +#include "absl/strings/str_format.h" +#include "curvefs/src/client/common/config.h" + +#ifndef CURVEFS_SRC_CLIENT_FILESYSTEM_ACCESS_LOG_H_ +#define CURVEFS_SRC_CLIENT_FILESYSTEM_ACCESS_LOG_H_ + +namespace curvefs { +namespace client { +namespace common { + +DECLARE_bool(access_logging); + +} +namespace filesystem { + +using ::absl::StrFormat; +using ::curvefs::client::common::FLAGS_access_logging; +using MessageHandler = std::function; + +static std::shared_ptr Logger; + +bool InitAccessLog(const std::string& prefix) { + std::string filename = StrFormat("%s/access.%d.log", prefix, getpid()); + Logger = spdlog::daily_logger_mt("fuse_access", filename, 0, 0); + spdlog::flush_every(std::chrono::seconds(1)); + return true; +} + +struct AccessLogGuard { + explicit AccessLogGuard(MessageHandler handler) + : enable(FLAGS_access_logging), + handler(handler) { + if (!enable) { + return; + } + + timer.start(); + } + + ~AccessLogGuard() { + if (!enable) { + return; + } + + timer.stop(); + Logger->info("{0} <{1:.6f}>", handler(), timer.u_elapsed() / 1e6); + } + + bool enable; + MessageHandler handler; + butil::Timer timer; +}; + +} // namespace filesystem +} // namespace client +} // namespace curvefs + +#endif // CURVEFS_SRC_CLIENT_FILESYSTEM_ACCESS_LOG_H_ diff --git a/curvefs/src/client/filesystem/attr_watcher.cpp b/curvefs/src/client/filesystem/attr_watcher.cpp new file mode 100644 index 0000000000..9515533e4a --- /dev/null +++ b/curvefs/src/client/filesystem/attr_watcher.cpp @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-09 + * Author: Jingli Chen (Wine93) + */ + +#include "curvefs/src/client/filesystem/utils.h" +#include "curvefs/src/client/filesystem/attr_watcher.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +AttrWatcher::AttrWatcher(AttrWatcherOption option, + std::shared_ptr openFiles, + std::shared_ptr dirCache) + : modifiedAt_(std::make_shared(option.lruSize)), + openFiles_(openFiles), + dirCache_(dirCache) {} + +void AttrWatcher::RemeberMtime(const InodeAttr& attr) { + WriteLockGuard lk(rwlock_); + modifiedAt_->Put(attr.inodeid(), AttrMtime(attr)); +} + +bool AttrWatcher::GetMtime(Ino ino, TimeSpec* time) { + ReadLockGuard lk(rwlock_); + return modifiedAt_->Get(ino, time); +} + +void AttrWatcher::UpdateDirEntryAttr(Ino ino, const InodeAttr& attr) { + std::shared_ptr entries; + for (const auto parent : attr.parent()) { + bool yes = dirCache_->Get(parent, &entries); + if (!yes) { + continue; + } + + entries->UpdateAttr(ino, attr); + + VLOG(1) << "Write back attribute to dir entry cache: ino = " << ino + << ", attr = " << attr.ShortDebugString(); + } +} + +void AttrWatcher::UpdateDirEntryLength(Ino ino, const InodeAttr& open) { + std::shared_ptr entries; + for (const auto parent : open.parent()) { + bool yes = dirCache_->Get(parent, &entries); + if (!yes) { + continue; + } + + entries->UpdateLength(ino, open); + + VLOG(1) << "Write back file length to dir entry cache: ino = " << ino + << ", attr = " << open.ShortDebugString(); + } +} + +AttrWatcherGuard::AttrWatcherGuard(std::shared_ptr watcher, + InodeAttr* attr, + ReplyType type, + bool writeBack) + : watcher(watcher), attr(attr), type(type), writeBack(writeBack) { + InodeAttr open; + Ino ino = attr->inodeid(); + bool yes = watcher->openFiles_->GetFileAttr(ino, &open); + if (!yes) { + return; + } + + attr->set_length(open.length()); + attr->set_mtime(open.mtime()); + attr->set_mtime_ns(open.mtime_ns()); + if (AttrCtime(open) > AttrCtime(*attr)) { + attr->set_ctime(open.ctime()); + attr->set_ctime_ns(open.ctime_ns()); + } +} + +AttrWatcherGuard::~AttrWatcherGuard() { + switch (type) { + case ReplyType::ATTR: + watcher->RemeberMtime(*attr); + if (writeBack) { + watcher->UpdateDirEntryAttr(attr->inodeid(), *attr); + } + break; + + case ReplyType::ONLY_LENGTH: + if (writeBack) { + watcher->UpdateDirEntryLength(attr->inodeid(), *attr); + } + break; + } +} + +} // namespace filesystem +} // namespace client +} // namespace curvefs diff --git a/curvefs/src/client/filesystem/attr_watcher.h b/curvefs/src/client/filesystem/attr_watcher.h new file mode 100644 index 0000000000..e3cd163b8a --- /dev/null +++ b/curvefs/src/client/filesystem/attr_watcher.h @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-09 + * Author: Jingli Chen (Wine93) + */ + +#ifndef CURVEFS_SRC_CLIENT_FILESYSTEM_ATTR_WATCHER_H_ +#define CURVEFS_SRC_CLIENT_FILESYSTEM_ATTR_WATCHER_H_ + +#include + +#include "src/common/lru_cache.h" +#include "curvefs/src/client/common/config.h" +#include "curvefs/src/client/filesystem/meta.h" +#include "curvefs/src/client/filesystem/openfile.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +using ::curve::common::LRUCache; +using ::curve::common::RWLock; +using ::curve::common::ReadLockGuard; +using ::curve::common::WriteLockGuard; +using ::curvefs::client::common::AttrWatcherOption; + +class AttrWatcher { + public: + using LRUType = LRUCache; + + public: + AttrWatcher(AttrWatcherOption option, + std::shared_ptr openFiles, + std::shared_ptr dirCache); + + void RemeberMtime(const InodeAttr& attr); + + bool GetMtime(Ino ino, TimeSpec* time); + + void UpdateDirEntryAttr(Ino ino, const InodeAttr& attr); + + void UpdateDirEntryLength(Ino ino, const InodeAttr& open); + + private: + friend class AttrWatcherGuard; + + private: + RWLock rwlock_; + std::shared_ptr modifiedAt_; + std::shared_ptr openFiles_; + std::shared_ptr dirCache_; +}; + + +enum class ReplyType { + ATTR, + ONLY_LENGTH +}; + +/* + * each attribute reply to kernel, the watcher will: + * before reply: + * 1) set attibute length if the corresponding file is opened + * after reply: + * 1) remeber attribute modified time. + * 2) write back attribute to dir entry cache if |writeBack| is true, + * because the dir-entry attribute maybe stale. + */ +struct AttrWatcherGuard { + public: + AttrWatcherGuard(std::shared_ptr watcher, + InodeAttr* attr, + ReplyType type, + bool writeBack); + + ~AttrWatcherGuard(); + + private: + std::shared_ptr watcher; + InodeAttr* attr; + ReplyType type; + bool writeBack; +}; + +} // namespace filesystem +} // namespace client +} // namespace curvefs + +#endif // CURVEFS_SRC_CLIENT_FILESYSTEM_ATTR_WATCHER_H_ diff --git a/curvefs/src/client/filesystem/defer_sync.cpp b/curvefs/src/client/filesystem/defer_sync.cpp new file mode 100644 index 0000000000..a69cdeb3ba --- /dev/null +++ b/curvefs/src/client/filesystem/defer_sync.cpp @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-06 + * Author: Jingli Chen (Wine93) + */ + +#include +#include + +#include "curvefs/src/client/filesystem/defer_sync.h" +#include "curvefs/src/client/filesystem/utils.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +DeferSync::DeferSync(DeferSyncOption option) + : option_(option), + mutex_(), + running_(false), + thread_(), + sleeper_(), + inodes_() { +} + +void DeferSync::Start() { + if (!running_.exchange(true)) { + thread_ = std::thread(&DeferSync::SyncTask, this); + LOG(INFO) << "Defer sync thread start success"; + } +} + +void DeferSync::Stop() { + if (running_.exchange(false)) { + LOG(INFO) << "Stop defer sync thread..."; + sleeper_.interrupt(); + thread_.join(); + LOG(INFO) << "Defer sync thread stopped"; + } +} + +void DeferSync::SyncTask() { + std::vector> inodes; + for ( ;; ) { + bool running = sleeper_.wait_for(std::chrono::seconds(option_.delay)); + + { + LockGuard lk(mutex_); + inodes.swap(inodes_); + } + for (const auto& inode : inodes) { + UniqueLock lk(inode->GetUniqueLock()); + inode->Async(nullptr, true); + } + inodes.clear(); + + if (!running) { + break; + } + } +} + +void DeferSync::Push(const std::shared_ptr& inode) { + LockGuard lk(mutex_); + inodes_.emplace_back(inode); +} + +} // namespace filesystem +} // namespace client +} // namespace curvefs diff --git a/curvefs/src/client/filesystem/defer_sync.h b/curvefs/src/client/filesystem/defer_sync.h new file mode 100644 index 0000000000..0bd59bb9bc --- /dev/null +++ b/curvefs/src/client/filesystem/defer_sync.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-06 + * Author: Jingli Chen (Wine93) + */ + +#ifndef CURVEFS_SRC_CLIENT_FILESYSTEM_DEFER_SYNC_H_ +#define CURVEFS_SRC_CLIENT_FILESYSTEM_DEFER_SYNC_H_ + +#include +#include +#include + +#include "src/common/interruptible_sleeper.h" +#include "curvefs/src/client/common/config.h" +#include "curvefs/src/client/filesystem/meta.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +using ::curvefs::client::common::DeferSyncOption; + +using ::curve::common::Mutex; +using ::curve::common::LockGuard; +using ::curve::common::InterruptibleSleeper; + +class DeferSync { + public: + explicit DeferSync(DeferSyncOption option); + + void Start(); + + void Stop(); + + void Push(const std::shared_ptr& inode); + + private: + void SyncTask(); + + private: + DeferSyncOption option_; + Mutex mutex_; + std::atomic running_; + std::thread thread_; + InterruptibleSleeper sleeper_; + std::vector> inodes_; +}; + +} // namespace filesystem +} // namespace client +} // namespace curvefs + +#endif // CURVEFS_SRC_CLIENT_FILESYSTEM_DEFER_SYNC_H_ diff --git a/curvefs/src/client/filesystem/dir_cache.cpp b/curvefs/src/client/filesystem/dir_cache.cpp new file mode 100644 index 0000000000..3beed645fe --- /dev/null +++ b/curvefs/src/client/filesystem/dir_cache.cpp @@ -0,0 +1,189 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-07 + * Author: Jingli Chen (Wine93) + */ + +#include + +#include "curvefs/src/client/filesystem/dir_cache.h" +#include "curvefs/src/client/filesystem/utils.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +DirEntryList::DirEntryList() + : rwlock_(), + mtime_(), + entries_(), + attrs_() {} + +size_t DirEntryList::Size() { + ReadLockGuard lk(rwlock_); + return entries_.size(); +} + +void DirEntryList::Add(const DirEntry& dirEntry) { + WriteLockGuard lk(rwlock_); + entries_.push_back(std::move(dirEntry)); + attrs_[dirEntry.ino] = &entries_.back(); +} + +bool DirEntryList::Get(Ino ino, DirEntry* dirEntry) { + ReadLockGuard lk(rwlock_); + auto iter = attrs_.find(ino); + if (iter == attrs_.end()) { + return false; + } + *dirEntry = *iter->second; + return true; +} + +bool DirEntryList::UpdateAttr(Ino ino, const InodeAttr& attr) { + WriteLockGuard lk(rwlock_); + auto iter = attrs_.find(ino); + if (iter == attrs_.end()) { + return false; + } + + DirEntry* dirEntry = iter->second; + dirEntry->attr = std::move(attr); + return true; +} + +bool DirEntryList::UpdateLength(Ino ino, const InodeAttr& open) { + WriteLockGuard lk(rwlock_); + auto iter = attrs_.find(ino); + if (iter == attrs_.end()) { + return false; + } + + InodeAttr* attr = &iter->second->attr; + attr->set_length(open.length()); + attr->set_mtime(open.mtime()); + attr->set_mtime_ns(open.mtime_ns()); + if (AttrCtime(open) > AttrCtime(*attr)) { + attr->set_ctime(open.ctime()); + attr->set_ctime_ns(open.ctime_ns()); + } + return true; +} + +void DirEntryList::Iterate(IterateHandler handler) { + ReadLockGuard lk(rwlock_); + for (auto iter = entries_.begin(); iter != entries_.end(); iter++) { + handler(&(*iter)); + } +} + +void DirEntryList::Clear() { + WriteLockGuard lk(rwlock_); + entries_.clear(); + attrs_.clear(); +} + +void DirEntryList::SetMtime(TimeSpec mtime) { + WriteLockGuard lk(rwlock_); + mtime_ = mtime; +} + +TimeSpec DirEntryList::GetMtime() { + ReadLockGuard lk(rwlock_); + return mtime_; +} + +DirCache::DirCache(DirCacheOption option) + : rwlock_(), + nentries_(0), + option_(option) { + lru_ = std::make_shared(0); // control size by ourself + mq_ = std::make_shared("dircache", 10000); + mq_->Subscribe([&](const std::shared_ptr& entries){ + entries->Clear(); + }); + + LOG(INFO) << "Using directory lru cache, capacity = " << option_.lruSize; +} + +void DirCache::Start() { + mq_->Start(); +} + +void DirCache::Stop() { + WriteLockGuard lk(rwlock_); + Evit(option_.lruSize); + mq_->Stop(); +} + +void DirCache::Delete(Ino parent, + std::shared_ptr entries, + bool evit) { + nentries_ -= entries->Size(); + mq_->Publish(entries); // clear entries in background + lru_->Remove(parent); + + VLOG(1) << "Delete directory cache (evit=" << evit << "): " + << "parent = " << parent + << ", mtime = " << entries->GetMtime() + << ", size = " << entries->Size() + << ", nentries = " << nentries_; +} + +void DirCache::Evit(size_t size) { + Ino parent; + std::shared_ptr entries; + while (nentries_ + size >= option_.lruSize) { + bool yes = lru_->GetLast(&parent, &entries); + if (!yes) { + break; + } + Delete(parent, entries, true); + } +} + +void DirCache::Put(Ino parent, std::shared_ptr entries) { + WriteLockGuard lk(rwlock_); + Evit(entries->Size()); // it guarantee put entries success + lru_->Put(parent, entries); + nentries_ += entries->Size(); + + VLOG(1) << "Insert directory cache: parent = " << parent + << ", mtime = " << entries->GetMtime() + << ", size = " << entries->Size() + << ", nentries = " << nentries_; +} + +bool DirCache::Get(Ino parent, std::shared_ptr* entries) { + ReadLockGuard lk(rwlock_); + return lru_->Get(parent, entries); +} + +void DirCache::Drop(Ino parent) { + WriteLockGuard lk(rwlock_); + std::shared_ptr entries; + bool yes = lru_->Get(parent, &entries); + if (yes) { + Delete(parent, entries, false); + } +} + +} // namespace filesystem +} // namespace client +} // namespace curvefs diff --git a/curvefs/src/client/filesystem/dir_cache.h b/curvefs/src/client/filesystem/dir_cache.h new file mode 100644 index 0000000000..6aa57fe5d5 --- /dev/null +++ b/curvefs/src/client/filesystem/dir_cache.h @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-07 + * Author: Jingli Chen (Wine93) + */ + +#ifndef CURVEFS_SRC_CLIENT_FILESYSTEM_DIR_CACHE_H_ +#define CURVEFS_SRC_CLIENT_FILESYSTEM_DIR_CACHE_H_ + +#include +#include +#include + +#include "src/common/lru_cache.h" +#include "src/common/concurrent/concurrent.h" +#include "curvefs/src/client/common/config.h" +#include "curvefs/src/client/filesystem/meta.h" +#include "curvefs/src/client/filesystem/message_queue.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +using ::curve::common::LRUCache; +using ::curve::common::RWLock; +using ::curve::common::ReadLockGuard; +using ::curve::common::WriteLockGuard; +using ::curvefs::client::common::DirCacheOption; + +class DirEntryList { + public: + using IterateHandler = std::function; + + public: + DirEntryList(); + + size_t Size(); + + void Add(const DirEntry& dirEntry); + + void Iterate(IterateHandler handler); + + bool Get(Ino ino, DirEntry* dirEntry); + + bool UpdateAttr(Ino ino, const InodeAttr& attr); + + bool UpdateLength(Ino ino, const InodeAttr& open); + + void Clear(); + + void SetMtime(TimeSpec mtime); + + TimeSpec GetMtime(); + + private: + RWLock rwlock_; + TimeSpec mtime_; + std::list entries_; + std::map attrs_; +}; + +class DirCache { + public: + using LRUType = LRUCache>; + using MessageType = std::shared_ptr; + using MessageQueueType = MessageQueue; + + public: + explicit DirCache(DirCacheOption option); + + void Start(); + + void Stop(); + + void Put(Ino parent, std::shared_ptr entries); + + bool Get(Ino parent, std::shared_ptr* entries); + + void Drop(Ino parent); + + private: + void Delete(Ino parent, std::shared_ptr entries, bool evit); + + void Evit(size_t size); + + private: + RWLock rwlock_; + size_t nentries_; + DirCacheOption option_; + std::shared_ptr lru_; + std::shared_ptr mq_; +}; + +} // namespace filesystem +} // namespace client +} // namespace curvefs + +#endif // CURVEFS_SRC_CLIENT_FILESYSTEM_DIR_CACHE_H_ diff --git a/curvefs/src/client/filesystem/error.cpp b/curvefs/src/client/filesystem/error.cpp new file mode 100644 index 0000000000..f8a677497b --- /dev/null +++ b/curvefs/src/client/filesystem/error.cpp @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-06 + * Author: Jingli Chen (Wine93) + */ + +#include +#include +#include + +#include "curvefs/src/client/filesystem/error.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +static const std::map> errors = { + { CURVEFS_ERROR::OK, { 0, "OK"} }, + { CURVEFS_ERROR::INTERNAL, { EIO, "internal error" } }, + { CURVEFS_ERROR::UNKNOWN, { -1, "unknown" } }, + { CURVEFS_ERROR::EXISTS, { EEXIST, "inode or dentry already exist" } }, + { CURVEFS_ERROR::NOTEXIST, { ENOENT, "inode or dentry not exist" } }, + { CURVEFS_ERROR::NO_SPACE, { ENOSPC, "no space to alloc" } }, + { CURVEFS_ERROR::BAD_FD, { EBADF, "bad file number" } }, + { CURVEFS_ERROR::INVALIDPARAM , { EINVAL , "invalid argument" } }, + { CURVEFS_ERROR::NOPERMISSION, { EACCES, "permission denied" } }, + { CURVEFS_ERROR::NOTEMPTY, { ENOTEMPTY, "directory not empty" } }, + { CURVEFS_ERROR::NOFLUSH, { -1, "no flush" } }, + { CURVEFS_ERROR::NOTSUPPORT, { EOPNOTSUPP, "operation not supported" } }, + { CURVEFS_ERROR::NAMETOOLONG, { ENAMETOOLONG, "file name too long" } }, + { CURVEFS_ERROR::MOUNT_POINT_EXIST, { -1, "mount point already exist" } }, + { CURVEFS_ERROR::MOUNT_FAILED, { -1, "mount failed" } }, + { CURVEFS_ERROR::OUT_OF_RANGE, { ERANGE, "out of range" } }, + { CURVEFS_ERROR::NODATA, { ENODATA, "no data available" } }, + { CURVEFS_ERROR::IO_ERROR, { EIO, "I/O error" } }, + { CURVEFS_ERROR::STALE, { ESTALE, "stale file handler" } }, + { CURVEFS_ERROR::NOSYS, { ENOSYS, "invalid system call" } }, +}; + +std::string StrErr(CURVEFS_ERROR code) { + auto it = errors.find(code); + if (it != errors.end()) { + return it->second.second; + } + return "unknown"; +} + +int SysErr(CURVEFS_ERROR code) { + int syscode = -1; + auto it = errors.find(code); + if (it != errors.end()) { + syscode = it->second.first; + } + return (syscode == -1) ? EIO : syscode; +} + +std::ostream &operator<<(std::ostream &os, CURVEFS_ERROR code) { + os << static_cast(code) << "[" << [code]() { + auto it = errors.find(code); + if (it != errors.end()) { + return it->second.second; + } + + return std::string{"Unknown"}; + }() << "]"; + + return os; +} + +CURVEFS_ERROR ToFSError(MetaStatusCode code) { + static std::map errs = { + { MetaStatusCode::OK, CURVEFS_ERROR::OK }, + { MetaStatusCode::NOT_FOUND, CURVEFS_ERROR::NOTEXIST }, + { MetaStatusCode::PARAM_ERROR, CURVEFS_ERROR::INVALIDPARAM }, + { MetaStatusCode::INODE_EXIST, CURVEFS_ERROR::EXISTS }, + { MetaStatusCode::DENTRY_EXIST, CURVEFS_ERROR::EXISTS }, + { MetaStatusCode::SYM_LINK_EMPTY, CURVEFS_ERROR::INTERNAL }, + { MetaStatusCode::RPC_ERROR, CURVEFS_ERROR::INTERNAL }, + }; + + auto it = errs.find(code); + if (it != errs.end()) { + return it->second; + } + return CURVEFS_ERROR::UNKNOWN; +} + +} // namespace filesystem +} // namespace client +} // namespace curvefs diff --git a/curvefs/src/client/error_code.h b/curvefs/src/client/filesystem/error.h similarity index 66% rename from curvefs/src/client/error_code.h rename to curvefs/src/client/filesystem/error.h index a9eabee53b..1f837ca9d6 100644 --- a/curvefs/src/client/error_code.h +++ b/curvefs/src/client/filesystem/error.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 NetEase Inc. + * Copyright (c) 2023 NetEase Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,22 +14,27 @@ * limitations under the License. */ - /* - * Project: curve - * Created Date: Thur May 27 2021 - * Author: xuchaojie + * Project: Curve + * Created Date: 2023-03-06 + * Author: Jingli Chen (Wine93) */ -#ifndef CURVEFS_SRC_CLIENT_ERROR_CODE_H_ -#define CURVEFS_SRC_CLIENT_ERROR_CODE_H_ +#include +#include +#include #include "curvefs/proto/metaserver.pb.h" +#ifndef CURVEFS_SRC_CLIENT_FILESYSTEM_ERROR_H_ +#define CURVEFS_SRC_CLIENT_FILESYSTEM_ERROR_H_ + namespace curvefs { namespace client { +namespace filesystem { + +using ::curvefs::metaserver::MetaStatusCode; -// notice : the error code should be negative. enum class CURVEFS_ERROR { OK = 0, INTERNAL = -1, @@ -49,15 +54,21 @@ enum class CURVEFS_ERROR { OUT_OF_RANGE = -15, NODATA = -16, IO_ERROR = -17, + CACHETOOSMALL = -18, + STALE = -19, + NOSYS = -20, }; +std::string StrErr(CURVEFS_ERROR code); + +int SysErr(CURVEFS_ERROR code); std::ostream &operator<<(std::ostream &os, CURVEFS_ERROR code); -CURVEFS_ERROR MetaStatusCodeToCurvefsErrCode( - ::curvefs::metaserver::MetaStatusCode code); +CURVEFS_ERROR ToFSError(MetaStatusCode code); +} // namespace filesystem } // namespace client } // namespace curvefs -#endif // CURVEFS_SRC_CLIENT_ERROR_CODE_H_ +#endif // CURVEFS_SRC_CLIENT_FILESYSTEM_ERROR_H_ diff --git a/curvefs/src/client/filesystem/filesystem.cpp b/curvefs/src/client/filesystem/filesystem.cpp new file mode 100644 index 0000000000..795c896a5b --- /dev/null +++ b/curvefs/src/client/filesystem/filesystem.cpp @@ -0,0 +1,353 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-08 + * Author: Jingli Chen (Wine93) + */ + +#include + +#include "curvefs/src/client/filesystem/filesystem.h" +#include "curvefs/src/client/filesystem/utils.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +FileSystem::FileSystem(FileSystemOption option, ExternalMember member) + : option_(option), member(member) { + deferSync_ = std::make_shared(option.deferSyncOption); + negative_ = std::make_shared(option.lookupCacheOption); + dirCache_ = std::make_shared(option.dirCacheOption); + openFiles_ = std::make_shared(option_.openFilesOption, + deferSync_); + attrWatcher_ = std::make_shared(option_.attrWatcherOption, + openFiles_, dirCache_); + handlerManager_ = std::make_shared(); + rpc_ = std::make_shared(option.rpcOption, member); +} + +void FileSystem::Run() { + deferSync_->Start(); + dirCache_->Start(); +} + +void FileSystem::Destory() { + openFiles_->CloseAll(); + deferSync_->Stop(); + dirCache_->Stop(); +} + +void FileSystem::Attr2Stat(InodeAttr* attr, struct stat* stat) { + std::memset(stat, 0, sizeof(struct stat)); + stat->st_ino = attr->inodeid(); // inode number + stat->st_mode = attr->mode(); // permission mode + stat->st_nlink = attr->nlink(); // number of links + stat->st_uid = attr->uid(); // user ID of owner + stat->st_gid = attr->gid(); // group ID of owner + stat->st_size = attr->length(); // total size, in bytes + stat->st_rdev = attr->rdev(); // device ID (if special file) + stat->st_atim.tv_sec = attr->atime(); // time of last access + stat->st_atim.tv_nsec = attr->atime_ns(); + stat->st_mtim.tv_sec = attr->mtime(); // time of last modification + stat->st_mtim.tv_nsec = attr->mtime_ns(); + stat->st_ctim.tv_sec = attr->ctime(); // time of last status change + stat->st_ctim.tv_nsec = attr->ctime_ns(); + stat->st_blksize = option_.blockSize; // blocksize for file system I/O + stat->st_blocks = 0; // number of 512B blocks allocated + if (IsS3File(*attr)) { + stat->st_blocks = (attr->length() + 511) / 512; + } +} + +void FileSystem::Entry2Param(EntryOut* entryOut, + fuse_entry_param* e) { + std::memset(e, 0, sizeof(fuse_entry_param)); + e->ino = entryOut->attr.inodeid(); + e->generation = 0; + Attr2Stat(&entryOut->attr, &e->attr); + e->entry_timeout = entryOut->entryTimeout; + e->attr_timeout = entryOut->attrTimeout; +} + +void FileSystem::SetEntryTimeout(EntryOut* entryOut) { + auto option = option_.kernelCacheOption; + if (IsDir(entryOut->attr)) { + entryOut->entryTimeout = option.dirEntryTimeoutSec; + entryOut->attrTimeout = option.dirAttrTimeoutSec; + } else { + entryOut->entryTimeout = option.entryTimeoutSec; + entryOut->attrTimeout = option.attrTimeoutSec; + } +} + +void FileSystem::SetAttrTimeout(AttrOut* attrOut) { + auto option = option_.kernelCacheOption; + if (IsDir(attrOut->attr)) { + attrOut->attrTimeout = option.dirAttrTimeoutSec; + } else { + attrOut->attrTimeout = option.attrTimeoutSec; + } +} + +// fuse reply* +void FileSystem::ReplyError(Request req, CURVEFS_ERROR code) { + fuse_reply_err(req, SysErr(code)); +} + +void FileSystem::ReplyEntry(Request req, + EntryOut* entryOut) { + AttrWatcherGuard watcher(attrWatcher_, &entryOut->attr, + ReplyType::ATTR, true); + fuse_entry_param e; + SetEntryTimeout(entryOut); + Entry2Param(entryOut, &e); + fuse_reply_entry(req, &e); +} + +void FileSystem::ReplyAttr(Request req, + AttrOut* attrOut) { + AttrWatcherGuard watcher(attrWatcher_, &attrOut->attr, + ReplyType::ATTR, true); + struct stat stat; + SetAttrTimeout(attrOut); + Attr2Stat(&attrOut->attr, &stat); + fuse_reply_attr(req, &stat, attrOut->attrTimeout); +} + +void FileSystem::ReplyReadlink(Request req, const std::string& link) { + fuse_reply_readlink(req, link.c_str()); +} + +void FileSystem::ReplyOpen(Request req, FileInfo* fi) { + fuse_reply_open(req, fi); +} + +void FileSystem::ReplyOpen(Request req, FileOut* fileOut) { + AttrWatcherGuard watcher(attrWatcher_, &fileOut->attr, + ReplyType::ONLY_LENGTH, true); + fuse_reply_open(req, fileOut->fi); +} + +void FileSystem::ReplyData(Request req, + struct fuse_bufvec *bufv, + enum fuse_buf_copy_flags flags) { + fuse_reply_data(req, bufv, flags); +} + +void FileSystem::ReplyWrite(Request req, FileOut* fileOut) { + AttrWatcherGuard watcher(attrWatcher_, &fileOut->attr, + ReplyType::ONLY_LENGTH, true); + fuse_reply_write(req, fileOut->nwritten); +} + +void FileSystem::ReplyBuffer(Request req, const char *buf, size_t size) { + fuse_reply_buf(req, buf, size); +} + +void FileSystem::ReplyStatfs(Request req, const struct statvfs *stbuf) { + fuse_reply_statfs(req, stbuf); +} + +void FileSystem::ReplyXattr(Request req, size_t size) { + fuse_reply_xattr(req, size); +} + +void FileSystem::ReplyCreate(Request req, EntryOut* entryOut, FileInfo* fi) { + AttrWatcherGuard watcher(attrWatcher_, &entryOut->attr, + ReplyType::ATTR, true); + fuse_entry_param e; + SetEntryTimeout(entryOut); + Entry2Param(entryOut, &e); + fuse_reply_create(req, &e, fi); +} + +void FileSystem::AddDirEntry(Request req, + DirBufferHead* buffer, + DirEntry* dirEntry) { + struct stat stat; + std::memset(&stat, 0, sizeof(stat)); + stat.st_ino = dirEntry->ino; + + // add a directory entry to the buffer + size_t oldsize = buffer->size; + const char* name = dirEntry->name.c_str(); + buffer->size += fuse_add_direntry(req, NULL, 0, name, NULL, 0); + buffer->p = static_cast(realloc(buffer->p, buffer->size)); + fuse_add_direntry(req, + buffer->p + oldsize, // char* buf + buffer->size - oldsize, // size_t bufisze + name, &stat, buffer->size); +} + +void FileSystem::AddDirEntryPlus(Request req, + DirBufferHead* buffer, + DirEntry* dirEntry) { + AttrWatcherGuard watcher(attrWatcher_, &dirEntry->attr, + ReplyType::ATTR, false); + struct fuse_entry_param e; + EntryOut entryOut(dirEntry->attr); + SetEntryTimeout(&entryOut); + Entry2Param(&entryOut, &e); + + // add a directory entry to the buffer with the attributes + size_t oldsize = buffer->size; + const char* name = dirEntry->name.c_str(); + buffer->size += fuse_add_direntry_plus(req, NULL, 0, name, NULL, 0); + buffer->p = static_cast(realloc(buffer->p, buffer->size)); + fuse_add_direntry_plus(req, + buffer->p + oldsize, // char* buf + buffer->size - oldsize, // size_t bufisze + name, &e, buffer->size); +} + +// handler* +std::shared_ptr FileSystem::NewHandler() { + return handlerManager_->NewHandler(); +} + +std::shared_ptr FileSystem::FindHandler(uint64_t fh) { + return handlerManager_->FindHandler(fh); +} + +void FileSystem::ReleaseHandler(uint64_t fh) { + return handlerManager_->ReleaseHandler(fh); +} + +FileSystemMember FileSystem::BorrowMember() { + return FileSystemMember(deferSync_, openFiles_, attrWatcher_); +} + +// fuse request* +CURVEFS_ERROR FileSystem::Lookup(Request req, + Ino parent, + const std::string& name, + EntryOut* entryOut) { + if (name.size() > option_.maxNameLength) { + return CURVEFS_ERROR::NAMETOOLONG; + } + + bool yes = negative_->Get(parent, name); + if (yes) { + return CURVEFS_ERROR::NOTEXIST; + } + + auto rc = rpc_->Lookup(parent, name, entryOut); + if (rc == CURVEFS_ERROR::OK) { + negative_->Delete(parent, name); + } else if (rc == CURVEFS_ERROR::NOTEXIST) { + negative_->Put(parent, name); + } + return rc; +} + +CURVEFS_ERROR FileSystem::GetAttr(Request req, Ino ino, AttrOut* attrOut) { + InodeAttr attr; + auto rc = rpc_->GetAttr(ino, &attr); + if (rc == CURVEFS_ERROR::OK) { + *attrOut = AttrOut(attr); + } + return rc; +} + +CURVEFS_ERROR FileSystem::OpenDir(Request req, Ino ino, FileInfo* fi) { + InodeAttr attr; + CURVEFS_ERROR rc = rpc_->GetAttr(ino, &attr); + if (rc != CURVEFS_ERROR::OK) { + return rc; + } + + // revalidate directory cache + std::shared_ptr entries; + bool yes = dirCache_->Get(ino, &entries); + if (yes) { + if (entries->GetMtime() != AttrMtime(attr)) { + dirCache_->Drop(ino); + } + } + + auto handler = NewHandler(); + handler->mtime = AttrMtime(attr); + fi->fh = handler->fh; + return CURVEFS_ERROR::OK; +} + +CURVEFS_ERROR FileSystem::ReadDir(Request req, + Ino ino, + FileInfo* fi, + std::shared_ptr* entries) { + bool yes = dirCache_->Get(ino, entries); + if (yes) { + return CURVEFS_ERROR::OK; + } + + CURVEFS_ERROR rc = rpc_->ReadDir(ino, entries); + if (rc != CURVEFS_ERROR::OK) { + return rc; + } + + (*entries)->SetMtime(FindHandler(fi->fh)->mtime); + dirCache_->Put(ino, *entries); + return CURVEFS_ERROR::OK; +} + +CURVEFS_ERROR FileSystem::ReleaseDir(Request req, Ino ino, FileInfo* fi) { + ReleaseHandler(fi->fh); + return CURVEFS_ERROR::OK; +} + +CURVEFS_ERROR FileSystem::Open(Request req, Ino ino, FileInfo* fi) { + std::shared_ptr inode; + bool yes = openFiles_->IsOpened(ino, &inode); + if (yes) { + openFiles_->Open(ino, inode); + // fi->keep_cache = 1; + return CURVEFS_ERROR::OK; + } + + CURVEFS_ERROR rc = rpc_->Open(ino, &inode); + if (rc != CURVEFS_ERROR::OK) { + return rc; + } + + TimeSpec mtime; + yes = attrWatcher_->GetMtime(ino, &mtime); + if (!yes) { + // It is rare which only arise when attribute evited for attr-watcher. + LOG(WARNING) << "open(" << ino << "): stale file handler" + << ": attribute not found in wacther"; + return CURVEFS_ERROR::STALE; + } else if (mtime != InodeMtime(inode)) { + LOG(WARNING) << "open(" << ino << "): stale file handler" + << ", cache(" << mtime << ") vs remote(" + << InodeMtime(inode) << ")"; + return CURVEFS_ERROR::STALE; + } + + openFiles_->Open(ino, inode); + return CURVEFS_ERROR::OK; +} + +CURVEFS_ERROR FileSystem::Release(Request req, Ino ino) { + openFiles_->Close(ino); + return CURVEFS_ERROR::OK; +} + +} // namespace filesystem +} // namespace client +} // namespace curvefs diff --git a/curvefs/src/client/filesystem/filesystem.h b/curvefs/src/client/filesystem/filesystem.h new file mode 100644 index 0000000000..c2d48db708 --- /dev/null +++ b/curvefs/src/client/filesystem/filesystem.h @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-08 + * Author: Jingli Chen (Wine93) + */ + +#ifndef CURVEFS_SRC_CLIENT_FILESYSTEM_FILESYSTEM_H_ +#define CURVEFS_SRC_CLIENT_FILESYSTEM_FILESYSTEM_H_ + +#include + +#include +#include + +#include "curvefs/src/client/filesystem/error.h" +#include "curvefs/src/client/common/config.h" +#include "curvefs/src/client/filesystem/package.h" +#include "curvefs/src/client/filesystem/meta.h" +#include "curvefs/src/client/filesystem/lookup_cache.h" +#include "curvefs/src/client/filesystem/dir_cache.h" +#include "curvefs/src/client/filesystem/openfile.h" +#include "curvefs/src/client/filesystem/attr_watcher.h" +#include "curvefs/src/client/filesystem/rpc_client.h" +#include "curvefs/src/client/filesystem/defer_sync.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +using ::curvefs::client::common::FileSystemOption; + +struct FileSystemMember { + FileSystemMember(std::shared_ptr deferSync, + std::shared_ptr openFiles, + std::shared_ptr attrWatcher) + : deferSync(deferSync), + openFiles(openFiles), + attrWatcher(attrWatcher) {} + + std::shared_ptr deferSync; + std::shared_ptr openFiles; + std::shared_ptr attrWatcher; +}; + +class FileSystem { + public: + FileSystem(struct FileSystemOption option, ExternalMember member); + + void Run(); + + void Destory(); + + // fuse request + CURVEFS_ERROR Lookup(Request req, + Ino parent, + const std::string& name, + EntryOut* entryOut); + + CURVEFS_ERROR GetAttr(Request req, Ino ino, AttrOut* attrOut); + + CURVEFS_ERROR OpenDir(Request req, Ino ino, FileInfo* fi); + + CURVEFS_ERROR ReadDir(Request req, + Ino ino, + FileInfo* fi, + std::shared_ptr* entries); + + CURVEFS_ERROR ReleaseDir(Request req, Ino ino, FileInfo* fi); + + CURVEFS_ERROR Open(Request req, Ino ino, FileInfo* fi); + + CURVEFS_ERROR Release(Request req, Ino ino); + + // fuse reply: we control all replies to vfs layer in same entrance. + void ReplyError(Request req, CURVEFS_ERROR code); + + void ReplyEntry(Request req, EntryOut* entryOut); + + void ReplyAttr(Request req, AttrOut* attrOut); + + void ReplyReadlink(Request req, const std::string& link); + + void ReplyOpen(Request req, FileInfo *fi); + + void ReplyOpen(Request req, FileOut* fileOut); + + void ReplyData(Request req, + struct fuse_bufvec *bufv, + enum fuse_buf_copy_flags flags); + + void ReplyWrite(Request req, FileOut* fileOut); + + void ReplyBuffer(Request req, const char *buf, size_t size); + + void ReplyStatfs(Request req, const struct statvfs *stbuf); + + void ReplyXattr(Request req, size_t size); + + void ReplyCreate(Request req, EntryOut* entryOut, FileInfo* fi); + + void AddDirEntry(Request req, + DirBufferHead* buffer, + DirEntry* dirEntry); + + void AddDirEntryPlus(Request req, + DirBufferHead* buffer, + DirEntry* dirEntry); + + // utility: file handler + std::shared_ptr NewHandler(); + + std::shared_ptr FindHandler(uint64_t fh); + + void ReleaseHandler(uint64_t fh); + + // utility: others + FileSystemMember BorrowMember(); + + private: + FRIEND_TEST(FileSystemTest, Attr2Stat); + FRIEND_TEST(FileSystemTest, Entry2Param); + FRIEND_TEST(FileSystemTest, SetEntryTimeout); + FRIEND_TEST(FileSystemTest, SetAttrTimeout); + + // utility: convert to system type. + void Attr2Stat(InodeAttr* attr, struct stat* stat); + + void Entry2Param(EntryOut* entryOut, fuse_entry_param* e); + + // utility: set entry/attribute timeout + void SetEntryTimeout(EntryOut* entryOut); + + void SetAttrTimeout(AttrOut* attrOut); + + private: + FileSystemOption option_; + ExternalMember member; + std::shared_ptr deferSync_; + std::shared_ptr negative_; + std::shared_ptr dirCache_; + std::shared_ptr openFiles_; + std::shared_ptr attrWatcher_; + std::shared_ptr handlerManager_; + std::shared_ptr rpc_; +}; + +} // namespace filesystem +} // namespace client +} // namespace curvefs + +#endif // CURVEFS_SRC_CLIENT_FILESYSTEM_FILESYSTEM_H_ diff --git a/curvefs/src/client/filesystem/lookup_cache.cpp b/curvefs/src/client/filesystem/lookup_cache.cpp new file mode 100644 index 0000000000..d5c473a114 --- /dev/null +++ b/curvefs/src/client/filesystem/lookup_cache.cpp @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-31 + * Author: Jingli Chen (Wine93) + */ + +#include + +#include + +#include "absl/strings/str_format.h" +#include "curvefs/src/client/filesystem/utils.h" +#include "curvefs/src/client/filesystem/lookup_cache.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +#define RETURN_FALSE_IF_DISABLED() \ + do { \ + if (!enable_) { \ + return false; \ + } \ + } while (0) + + +LookupCache::LookupCache(LookupCacheOption option) + : enable_(option.negativeTimeoutSec > 0), + rwlock_(), + option_(option) { + lru_ = std::make_shared(option.lruSize); + if (enable_) { + LOG(INFO) << "Using lookup negative lru cache" + << ", timeout = " << option.negativeTimeoutSec + << ", capacity = " << option.lruSize; + } +} + +std::string LookupCache::CacheKey(Ino parent, const std::string& name) { + return absl::StrFormat("%d:%s", parent, name); +} + +bool LookupCache::Get(Ino parent, const std::string& name) { + RETURN_FALSE_IF_DISABLED(); + ReadLockGuard lk(rwlock_); + CacheEntry entry; + auto key = CacheKey(parent, name); + bool yes = lru_->Get(key, &entry); + if (!yes) { + VLOG(1) << absl::StrFormat("Lookup cache not found: key(%d,%s)", + parent, name); + return false; + } else if (entry.uses < option_.minUses) { + return false; + } else if (entry.expireTime < Now()) { + return false; + } + return true; +} + +bool LookupCache::Put(Ino parent, const std::string& name) { + RETURN_FALSE_IF_DISABLED(); + WriteLockGuard lk(rwlock_); + CacheEntry entry; + auto key = CacheKey(parent, name); + bool yes = lru_->Get(key, &entry); + if (yes) { + entry.uses++; + } else { + entry.uses = 0; + } + + entry.expireTime = Now() + TimeSpec(option_.negativeTimeoutSec, 0); + lru_->Put(key, entry); + return true; +} + +bool LookupCache::Delete(Ino parent, const std::string& name) { + RETURN_FALSE_IF_DISABLED(); + WriteLockGuard lk(rwlock_); + auto key = CacheKey(parent, name); + lru_->Remove(key); + return true; +} + +} // namespace filesystem +} // namespace client +} // namespace curvefs diff --git a/curvefs/src/client/filesystem/lookup_cache.h b/curvefs/src/client/filesystem/lookup_cache.h new file mode 100644 index 0000000000..21925e4791 --- /dev/null +++ b/curvefs/src/client/filesystem/lookup_cache.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-31 + * Author: Jingli Chen (Wine93) + */ + +#ifndef CURVEFS_SRC_CLIENT_FILESYSTEM_LOOKUP_CACHE_H_ +#define CURVEFS_SRC_CLIENT_FILESYSTEM_LOOKUP_CACHE_H_ + +#include +#include + +#include "src/common/lru_cache.h" +#include "curvefs/src/client/common/config.h" +#include "curvefs/src/client/filesystem/meta.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +using ::curve::common::LRUCache; +using ::curve::common::RWLock; +using ::curve::common::ReadLockGuard; +using ::curve::common::WriteLockGuard; +using ::curvefs::client::common::LookupCacheOption; + +// memory cache for lookup result, now we only support cache negative result, +// and other positive entry will be cached in kernel. +class LookupCache { + public: + struct CacheEntry { + uint32_t uses; + TimeSpec expireTime; + }; + + using LRUType = LRUCache; + + public: + explicit LookupCache(LookupCacheOption option); + + bool Get(Ino parent, const std::string& name); + + bool Put(Ino parent, const std::string& name); + + bool Delete(Ino parent, const std::string& name); + + private: + std::string CacheKey(Ino parent, const std::string& name); + + private: + bool enable_; + RWLock rwlock_; + LookupCacheOption option_; + std::shared_ptr lru_; +}; + +} // namespace filesystem +} // namespace client +} // namespace curvefs + +#endif // CURVEFS_SRC_CLIENT_FILESYSTEM_LOOKUP_CACHE_H_ diff --git a/curvefs/src/client/filesystem/message_queue.h b/curvefs/src/client/filesystem/message_queue.h new file mode 100644 index 0000000000..a13802695b --- /dev/null +++ b/curvefs/src/client/filesystem/message_queue.h @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-09 + * Author: Jingli Chen (Wine93) + */ + +#ifndef CURVEFS_SRC_CLIENT_FILESYSTEM_MESSAGE_QUEUE_H_ +#define CURVEFS_SRC_CLIENT_FILESYSTEM_MESSAGE_QUEUE_H_ + +#include + +#include +#include +#include + +#include "src/common/concurrent/task_queue.h" +#include "curvefs/src/common/threading.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +using ::curve::common::TaskQueue; +using ::curvefs::common::SetThreadName; + +template +class MessageQueue { + public: + using MessageHandler = std::function; + + public: + MessageQueue(const std::string& name, size_t bufferSize) + : name_(name), + running_(false), + thread_(), + handler_(), + queue_(bufferSize) {} + + void Start() { + if (running_.exchange(true)) { + return; + } + + thread_ = std::thread(&MessageQueue::Consumer, this); + LOG(INFO) << "MessageQueue [ " << name_ << " ] " + << "consumer thread start success"; + } + + void Stop() { + if (!running_.exchange(false)) { + return; + } + + auto wakeup = []() {}; + queue_.Push(wakeup); + + LOG(INFO) << "MessageQueue [ " << name_ << " ] " + << "consumer thread stoping..."; + + thread_.join(); + + LOG(INFO) << "MessageQueue [ " << name_ << " ] " + << "consumer thread stopped"; + } + + void Publish(MessageT message) { + if (handler_ != nullptr) { + queue_.Push([this, message](){ + this->handler_(message); + }); + } + } + + void Subscribe(MessageHandler handler) { + handler_ = handler; + } + + private: + void Consumer() { + SetThreadName(name_.c_str()); + while (running_.load(std::memory_order_relaxed)) { + queue_.Pop()(); + } + + while (queue_.Size() > 0) { + queue_.Pop()(); + } + } + + private: + std::string name_; + std::atomic running_; + std::thread thread_; + MessageHandler handler_; + TaskQueue queue_; +}; + +} // namespace filesystem +} // namespace client +} // namespace curvefs + +#endif // CURVEFS_SRC_CLIENT_FILESYSTEM_MESSAGE_QUEUE_H_ diff --git a/curvefs/src/client/filesystem/meta.cpp b/curvefs/src/client/filesystem/meta.cpp new file mode 100644 index 0000000000..721c7c859b --- /dev/null +++ b/curvefs/src/client/filesystem/meta.cpp @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-06 + * Author: Jingli Chen (Wine93) + */ + +#include + +#include "absl/strings/str_format.h" +#include "curvefs/src/client/filesystem/meta.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +HandlerManager::HandlerManager() + : mutex_(), + dirBuffer_(std::make_shared()), + handlers_() {} + +HandlerManager::~HandlerManager() { + dirBuffer_->DirBufferFreeAll(); +} + +std::shared_ptr HandlerManager::NewHandler() { + UniqueLock lk(mutex_); + auto handler = std::make_shared(); + handler->fh = dirBuffer_->DirBufferNew(); + handler->buffer = dirBuffer_->DirBufferGet(handler->fh); + handler->padding = false; + handlers_.emplace(handler->fh, handler); + return handler; +} + +std::shared_ptr HandlerManager::FindHandler(uint64_t fh) { + UniqueLock lk(mutex_); + auto iter = handlers_.find(fh); + if (iter == handlers_.end()) { + return nullptr; + } + return iter->second; +} + +void HandlerManager::ReleaseHandler(uint64_t fh) { + UniqueLock lk(mutex_); + dirBuffer_->DirBufferRelease(fh); + handlers_.erase(fh); +} + +std::string StrMode(uint16_t mode) { + static std::map type2char = { + { S_IFSOCK, 's' }, + { S_IFLNK, 'l' }, + { S_IFREG, '-' }, + { S_IFBLK, 'b' }, + { S_IFDIR, 'd' }, + { S_IFCHR, 'c' }, + { S_IFIFO, 'f' }, + { 0, '?' }, + }; + + std::string s("?rwxrwxrwx"); + s[0] = type2char[mode & (S_IFMT & 0xffff)]; + if (mode & S_ISUID) { + s[3] = 's'; + } + if (mode & S_ISGID) { + s[6] = 's'; + } + if (mode & S_ISVTX) { + s[9] = 't'; + } + + for (auto i = 0; i < 9; i++) { + if ((mode & (1 << i)) == 0) { + if ((s[9-i] == 's') || (s[9-i] == 't')) { + s[9-i] &= 0xDF; + } else { + s[9-i] = '-'; + } + } + } + return s; +} + +namespace { + +std::string Attr2Str(const InodeAttr& attr) { + if (!attr.IsInitialized()) { + return ""; + } + + std::string smode; + return absl::StrFormat(" (%d,[%s:0%06o,%d,%d,%d,%d,%d,%d,%d])", + attr.inodeid(), StrMode(attr.mode()).c_str(), attr.mode(), attr.nlink(), + attr.uid(), attr.gid(), + attr.atime(), attr.mtime(), attr.ctime(), + attr.length()); +} + +} // namespace + +std::string StrEntry(EntryOut entryOut) { + return Attr2Str(entryOut.attr); +} + +std::string StrAttr(AttrOut attrOut) { + return Attr2Str(attrOut.attr); +} + +} // namespace filesystem +} // namespace client +} // namespace curvefs diff --git a/curvefs/src/client/filesystem/meta.h b/curvefs/src/client/filesystem/meta.h new file mode 100644 index 0000000000..1165a1fb4f --- /dev/null +++ b/curvefs/src/client/filesystem/meta.h @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-06 + * Author: Jingli Chen (Wine93) + */ + +#ifndef CURVEFS_SRC_CLIENT_FILESYSTEM_META_H_ +#define CURVEFS_SRC_CLIENT_FILESYSTEM_META_H_ + +#include +#include +#include +#include + +#include "src/common/concurrent/concurrent.h" +#include "curvefs/proto/metaserver.pb.h" +#include "curvefs/src/client/fuse_common.h" +#include "curvefs/src/client/dir_buffer.h" +#include "curvefs/src/client/inode_wrapper.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +using ::curve::common::Mutex; +using ::curve::common::RWLock; +using ::curve::common::UniqueLock; +using ::curve::common::ReadLockGuard; +using ::curve::common::WriteLockGuard; +using ::curvefs::metaserver::XAttr; +using ::curvefs::metaserver::Dentry; +using ::curvefs::metaserver::InodeAttr; +using ::curvefs::metaserver::FsFileType; +using ::curvefs::client::DirBufferHead; +using ::curvefs::client::InodeWrapper; + +using Ino = fuse_ino_t; +using Request = fuse_req_t; +using FileInfo = struct fuse_file_info; + +struct EntryOut { + EntryOut() = default; + + explicit EntryOut(InodeAttr attr) : attr(attr) {} + + InodeAttr attr; + double entryTimeout; + double attrTimeout; +}; + +struct AttrOut { + AttrOut() = default; + + explicit AttrOut(InodeAttr attr) : attr(attr) {} + + InodeAttr attr; + double attrTimeout; +}; + +struct DirEntry { + DirEntry() = default; + + DirEntry(Ino ino, const std::string& name, InodeAttr attr) + : ino(ino), name(name), attr(attr) {} + + Ino ino; + std::string name; + InodeAttr attr; +}; + +struct FileOut { + FileOut() = default; + + FileOut(FileInfo* fi, InodeAttr attr) + : fi(fi), attr(attr), nwritten(0) {} + + FileOut(InodeAttr attr, size_t nwritten) + : fi(nullptr), attr(attr), nwritten(nwritten) {} + + FileInfo* fi; + InodeAttr attr; + size_t nwritten; +}; + +struct TimeSpec { + TimeSpec() : seconds(0), nanoSeconds(0) {} + + TimeSpec(uint64_t seconds, uint32_t nanoSeconds) + : seconds(seconds), nanoSeconds(nanoSeconds) {} + + TimeSpec(const TimeSpec& time) + : seconds(time.seconds), nanoSeconds(time.nanoSeconds) {} + + TimeSpec& operator=(const TimeSpec& time) = default; + + TimeSpec operator+(const TimeSpec& time) const { + return TimeSpec(seconds + time.seconds, nanoSeconds + time.nanoSeconds); + } + + uint64_t seconds; + uint32_t nanoSeconds; +}; + +struct FileHandler { + uint64_t fh; + DirBufferHead* buffer; + TimeSpec mtime; + bool padding; // padding buffer +}; + +class HandlerManager { + public: + HandlerManager(); + + ~HandlerManager(); + + std::shared_ptr NewHandler(); + + std::shared_ptr FindHandler(uint64_t id); + + void ReleaseHandler(uint64_t id); + + private: + Mutex mutex_; + std::shared_ptr dirBuffer_; + std::map> handlers_; +}; + +inline bool operator==(const TimeSpec& lhs, const TimeSpec& rhs) { + return (lhs.seconds == rhs.seconds) && + (lhs.nanoSeconds == rhs.nanoSeconds); +} + +inline bool operator!=(const TimeSpec& lhs, const TimeSpec& rhs) { + return !(lhs == rhs); +} + +inline bool operator<(const TimeSpec& lhs, const TimeSpec& rhs) { + return (lhs.seconds < rhs.seconds) || + (lhs.seconds == rhs.seconds && lhs.nanoSeconds < rhs.nanoSeconds); +} + +inline bool operator>(const TimeSpec& lhs, const TimeSpec& rhs) { + return (lhs.seconds > rhs.seconds) || + (lhs.seconds == rhs.seconds && lhs.nanoSeconds > rhs.nanoSeconds); +} + +inline std::ostream &operator<<(std::ostream &os, const TimeSpec& time) { + return os << time.seconds << "." << time.nanoSeconds; +} + +std::string StrMode(uint16_t mode); + +std::string StrEntry(EntryOut entryOut); + +std::string StrAttr(AttrOut attrOut); + +} // namespace filesystem +} // namespace client +} // namespace curvefs + +#endif // CURVEFS_SRC_CLIENT_FILESYSTEM_META_H_ diff --git a/curvefs/src/client/filesystem/openfile.cpp b/curvefs/src/client/filesystem/openfile.cpp new file mode 100644 index 0000000000..49ba785834 --- /dev/null +++ b/curvefs/src/client/filesystem/openfile.cpp @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-09 + * Author: Jingli Chen (Wine93) + */ + +#include "curvefs/src/client/filesystem/openfile.h" +#include "curvefs/src/client/filesystem/utils.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +OpenFiles::OpenFiles(OpenFilesOption option, + std::shared_ptr deferSync) + : rwlock_(), + option_(option), + deferSync_(deferSync), + files_(std::make_shared(option.lruSize)) { + LOG(INFO) << "Using openfile lru cache, capacity " << option.lruSize; +} + +/* + * Delete(...) does: + * 1) publish to message queue which will flush file to server with async + * 2) delete file from lru cache + */ +void OpenFiles::Delete(Ino ino, + const std::shared_ptr& file, + bool flush) { + if (flush) { + deferSync_->Push(file->inode); + } + files_->Remove(ino); + + VLOG(1) << "Delete open file cache: ino = " << ino + << ", refs = " << file->refs + << ", mtime = " << InodeMtime(file->inode); +} + +void OpenFiles::Evit(size_t size) { + Ino ino; + std::shared_ptr file; + while (files_->Size() + size >= option_.lruSize) { + bool yes = files_->GetLast(&ino, &file); + if (!yes) { + break; + } + Delete(ino, file, true); + } +} + +void OpenFiles::Open(Ino ino, std::shared_ptr inode) { + WriteLockGuard lk(rwlock_); + + Evit(1); + + std::shared_ptr file; + bool yes = files_->Get(ino, &file); + if (yes) { + file->refs++; + return; + } + + file = std::make_shared(inode); + file->refs++; + files_->Put(ino, file); + + VLOG(1) << "Insert open file cache: ino = " << ino + << ", refs = " << file->refs + << ", mtime = " << InodeMtime(file->inode); +} + +bool OpenFiles::IsOpened(Ino ino, std::shared_ptr* inode) { + ReadLockGuard lk(rwlock_); + std::shared_ptr file; + bool yes = files_->Get(ino, &file); + if (!yes) { + return false; + } + *inode = file->inode; + return true; +} + +void OpenFiles::Close(Ino ino) { + WriteLockGuard lk(rwlock_); + std::shared_ptr file; + bool yes = files_->Get(ino, &file); + if (!yes) { + return; + } + + if (file->refs > 0) { + file->refs--; + } + + if (file->refs == 0) { + Delete(ino, file, false); // file already flushed before close + } +} + +/* + * CloseAll() does: + * flush all file to server and delete from LRU cache + */ +void OpenFiles::CloseAll() { + WriteLockGuard lk(rwlock_); + Evit(option_.lruSize); +} + +bool OpenFiles::GetFileAttr(Ino ino, InodeAttr* attr) { + ReadLockGuard lk(rwlock_); + std::shared_ptr file; + bool yes = files_->Get(ino, &file); + if (!yes) { + return false; + } + + file->inode->GetInodeAttr(attr); + return true; +} + +} // namespace filesystem +} // namespace client +} // namespace curvefs diff --git a/curvefs/src/client/filesystem/openfile.h b/curvefs/src/client/filesystem/openfile.h new file mode 100644 index 0000000000..53ce0ae26e --- /dev/null +++ b/curvefs/src/client/filesystem/openfile.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-09 + * Author: Jingli Chen (Wine93) + */ + +#ifndef CURVEFS_SRC_CLIENT_FILESYSTEM_OPENFILE_H_ +#define CURVEFS_SRC_CLIENT_FILESYSTEM_OPENFILE_H_ + +#include + +#include "src/common/lru_cache.h" +#include "curvefs/src/client/inode_wrapper.h" +#include "curvefs/src/client/filesystem/meta.h" +#include "curvefs/src/client/filesystem/defer_sync.h" +#include "curvefs/src/client/filesystem/dir_cache.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +using ::curve::common::LRUCache; +using ::curve::common::ReadLockGuard; +using ::curve::common::WriteLockGuard; +using ::curvefs::client::InodeWrapper; +using ::curvefs::client::common::OpenFilesOption; + +struct OpenFile { + explicit OpenFile(std::shared_ptr inode) + : inode(inode), refs(0) {} + + std::shared_ptr inode; + uint64_t refs; +}; + +class OpenFiles { + public: + using LRUType = LRUCache>; + + public: + explicit OpenFiles(OpenFilesOption option, + std::shared_ptr deferSync); + + void Open(Ino ino, std::shared_ptr inode); + + bool IsOpened(Ino ino, std::shared_ptr* inode); + + void Close(Ino ino); + + void CloseAll(); + + bool GetFileAttr(Ino ino, InodeAttr* inode); + + private: + void Delete(Ino ino, const std::shared_ptr& file, bool flush); + + void Evit(size_t size); + + private: + RWLock rwlock_; + OpenFilesOption option_; + std::shared_ptr deferSync_; + std::shared_ptr files_; +}; + +} // namespace filesystem +} // namespace client +} // namespace curvefs + +#endif // CURVEFS_SRC_CLIENT_FILESYSTEM_OPENFILE_H_ diff --git a/curvefs/src/client/filesystem/package.h b/curvefs/src/client/filesystem/package.h new file mode 100644 index 0000000000..47f9fccc36 --- /dev/null +++ b/curvefs/src/client/filesystem/package.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-06 + * Author: Jingli Chen (Wine93) + */ + +#ifndef CURVEFS_SRC_CLIENT_FILESYSTEM_PACKAGE_H_ +#define CURVEFS_SRC_CLIENT_FILESYSTEM_PACKAGE_H_ + +#include + +#include "curvefs/src/client/dentry_cache_manager.h" +#include "curvefs/src/client/inode_cache_manager.h" +#include "curvefs/src/client/xattr_manager.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +struct ExternalMember { // external member depended by FileSystem + ExternalMember() = delete; + ExternalMember(std::shared_ptr dentryManager, + std::shared_ptr inodeManager) + : dentryManager(dentryManager), + inodeManager(inodeManager) {} + + std::shared_ptr dentryManager; + std::shared_ptr inodeManager; +}; + +} // namespace filesystem +} // namespace client +} // namespace curvefs + +#endif // CURVEFS_SRC_CLIENT_FILESYSTEM_PACKAGE_H_ diff --git a/curvefs/src/client/filesystem/rpc_client.cpp b/curvefs/src/client/filesystem/rpc_client.cpp new file mode 100644 index 0000000000..2f9cea01fa --- /dev/null +++ b/curvefs/src/client/filesystem/rpc_client.cpp @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-07 + * Author: Jingli Chen (Wine93) + */ + +#include +#include +#include +#include +#include +#include + +#include "curvefs/src/client/filesystem/rpc_client.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +RPCClient::RPCClient(RPCOption option, + ExternalMember member) + : option_(option), + inodeManager_(member.inodeManager), + dentryManager_(member.dentryManager) {} + +CURVEFS_ERROR RPCClient::GetAttr(Ino ino, InodeAttr* attr) { + CURVEFS_ERROR rc = inodeManager_->GetInodeAttr(ino, attr); + if (rc != CURVEFS_ERROR::OK) { + LOG(ERROR) << "rpc(getattr::GetInodeAttr) failed, retCode = " << rc + << ", ino = " << ino; + } + return rc; +} + +CURVEFS_ERROR RPCClient::Lookup(Ino parent, + const std::string& name, + EntryOut* entryOut) { + Dentry dentry; + CURVEFS_ERROR rc = dentryManager_->GetDentry(parent, name, &dentry); + if (rc != CURVEFS_ERROR::OK) { + if (rc != CURVEFS_ERROR::NOTEXIST) { + LOG(ERROR) << "rpc(lookup::GetDentry) failed, retCode = " << rc + << ", parent = " << parent << ", name = " << name; + } + return rc; + } + + Ino ino = dentry.inodeid(); + rc = inodeManager_->GetInodeAttr(ino, &entryOut->attr); + if (rc != CURVEFS_ERROR::OK) { + LOG(ERROR) << "rpc(lookup::GetInodeAttr) failed, retCode = " << rc + << ", ino = " << ino; + } + return rc; +} + +CURVEFS_ERROR RPCClient::ReadDir(Ino ino, + std::shared_ptr* entries) { + uint32_t limit = option_.listDentryLimit; + + std::list dentries; + CURVEFS_ERROR rc = dentryManager_->ListDentry(ino, &dentries, limit); + if (rc != CURVEFS_ERROR::OK) { + LOG(ERROR) << "rpc(readdir::ListDentry) failed, retCode = " << rc + << ", ino = " << ino; + return rc; + } + + std::set inos; + std::map attrs; + std::for_each(dentries.begin(), dentries.end(), [&](Dentry& dentry){ + inos.emplace(dentry.inodeid()); + }); + rc = inodeManager_->BatchGetInodeAttrAsync(ino, &inos, &attrs); + if (rc != CURVEFS_ERROR::OK) { + LOG(ERROR) << "rpc(readdir::BatchGetInodeAttrAsync) failed" + << ", retCode = " << rc << ", ino = " << ino; + return rc; + } + + DirEntry dirEntry; + for (const auto& dentry : dentries) { + Ino ino = dentry.inodeid(); + auto iter = attrs.find(ino); + if (iter == attrs.end()) { + LOG(WARNING) << "rpc(readdir::BatchGetInodeAttrAsync) " + << "missing attribute, ino = " << ino; + continue; + } + + dirEntry.ino = ino; + dirEntry.name = std::move(dentry.name()); + dirEntry.attr = std::move(iter->second); + (*entries)->Add(dirEntry); + } + return CURVEFS_ERROR::OK; +} + +CURVEFS_ERROR RPCClient::Open(Ino ino, std::shared_ptr* inode) { + CURVEFS_ERROR rc = inodeManager_->GetInode(ino, *inode); + if (rc != CURVEFS_ERROR::OK) { + LOG(ERROR) << "rpc(open/GetInode) failed" << ", retCode = " << rc + << ", ino = " << ino; + } + return rc; +} + +} // namespace filesystem +} // namespace client +} // namespace curvefs diff --git a/curvefs/src/client/filesystem/rpc_client.h b/curvefs/src/client/filesystem/rpc_client.h new file mode 100644 index 0000000000..9358f4a7ed --- /dev/null +++ b/curvefs/src/client/filesystem/rpc_client.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-07 + * Author: Jingli Chen (Wine93) + */ + +#ifndef CURVEFS_SRC_CLIENT_FILESYSTEM_RPC_CLIENT_H_ +#define CURVEFS_SRC_CLIENT_FILESYSTEM_RPC_CLIENT_H_ + +#include +#include + +#include "curvefs/src/client/common/config.h" +#include "curvefs/src/client/filesystem/meta.h" +#include "curvefs/src/client/filesystem/package.h" +#include "curvefs/src/client/filesystem/dir_cache.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +using ::curvefs::client::common::RPCOption; + +class RPCClient { + public: + RPCClient(RPCOption option, + ExternalMember member); + + CURVEFS_ERROR GetAttr(Ino ino, InodeAttr* attr); + + CURVEFS_ERROR Lookup(Ino parent, + const std::string& name, + EntryOut* entryOut); + + CURVEFS_ERROR ReadDir(Ino ino, std::shared_ptr* entries); + + CURVEFS_ERROR Open(Ino ino, std::shared_ptr* inode); + + private: + RPCOption option_; + std::shared_ptr inodeManager_; + std::shared_ptr dentryManager_; +}; + +} // namespace filesystem +} // namespace client +} // namespace curvefs + +#endif // CURVEFS_SRC_CLIENT_FILESYSTEM_RPC_CLIENT_H_ diff --git a/curvefs/src/client/filesystem/utils.cpp b/curvefs/src/client/filesystem/utils.cpp new file mode 100644 index 0000000000..0a6b6c38e0 --- /dev/null +++ b/curvefs/src/client/filesystem/utils.cpp @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-06 + * Author: Jingli Chen (Wine93) + */ + +#include + +#include "curvefs/src/client/filesystem/utils.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +bool IsDir(const InodeAttr& attr) { + return attr.type() == FsFileType::TYPE_DIRECTORY; +} + +bool IsS3File(const InodeAttr& attr) { + return attr.type() == FsFileType::TYPE_S3; +} + +bool IsVolmeFile(const InodeAttr& attr) { + return attr.type() == FsFileType::TYPE_FILE; +} + +bool IsSymLink(const InodeAttr& attr) { + return attr.type() == FsFileType::TYPE_SYM_LINK; +} + +TimeSpec AttrMtime(const InodeAttr& attr) { + return TimeSpec(attr.mtime(), attr.mtime_ns()); +} + +TimeSpec AttrCtime(const InodeAttr& attr) { + return TimeSpec(attr.ctime(), attr.ctime_ns()); +} + +TimeSpec InodeMtime(const std::shared_ptr inode) { + InodeAttr attr; + inode->GetInodeAttr(&attr); + return AttrMtime(attr); +} + +TimeSpec Now() { + struct timespec now; + clock_gettime(CLOCK_REALTIME, &now); + return TimeSpec(now.tv_sec, now.tv_nsec); +} + +} // namespace filesystem +} // namespace client +} // namespace curvefs diff --git a/curvefs/src/client/filesystem/utils.h b/curvefs/src/client/filesystem/utils.h new file mode 100644 index 0000000000..8f37104e1d --- /dev/null +++ b/curvefs/src/client/filesystem/utils.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-06 + * Author: Jingli Chen (Wine93) + */ + +#ifndef CURVEFS_SRC_CLIENT_FILESYSTEM_UTILS_H_ +#define CURVEFS_SRC_CLIENT_FILESYSTEM_UTILS_H_ + +#include + +#include "curvefs/src/client/filesystem/meta.h" +#include "curvefs/src/client/filesystem/package.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +// directory +bool IsDir(const InodeAttr& attr); + +// file which data is stored in s3 +bool IsS3File(const InodeAttr& attr); + +// file which data is stored in volume +bool IsVolmeFile(const InodeAttr& attr); + +// symbol link +bool IsSymLink(const InodeAttr& attr); + +struct TimeSpec AttrMtime(const InodeAttr& attr); + +struct TimeSpec AttrCtime(const InodeAttr& attr); + +struct TimeSpec InodeMtime(const std::shared_ptr inode); + +struct TimeSpec Now(); + +} // namespace filesystem +} // namespace client +} // namespace curvefs + +#endif // CURVEFS_SRC_CLIENT_FILESYSTEM_UTILS_H_ diff --git a/curvefs/src/client/fuse_client.cpp b/curvefs/src/client/fuse_client.cpp index f6146ad62e..b114e3aa14 100644 --- a/curvefs/src/client/fuse_client.cpp +++ b/curvefs/src/client/fuse_client.cpp @@ -35,7 +35,7 @@ #include "curvefs/proto/mds.pb.h" #include "curvefs/src/client/common/common.h" -#include "curvefs/src/client/error_code.h" +#include "curvefs/src/client/filesystem/error.h" #include "curvefs/src/client/fuse_common.h" #include "curvefs/src/client/client_operator.h" #include "curvefs/src/client/inode_wrapper.h" @@ -55,7 +55,10 @@ using ::curvefs::mds::topology::PartitionTxId; using ::curvefs::mds::FSStatusCode_Name; using ::curvefs::client::common::MAX_XATTR_NAME_LENGTH; using ::curvefs::client::common::MAX_XATTR_VALUE_LENGTH; -using ::curvefs::client::common::FileHandle; +using ::curvefs::client::filesystem::ExternalMember; +using ::curvefs::client::filesystem::DirEntry; +using ::curvefs::client::filesystem::DirEntryList; +using ::curvefs::client::filesystem::FileOut; #define RETURN_IF_UNSUCCESS(action) \ do { \ @@ -70,6 +73,21 @@ namespace curvefs { namespace client { namespace common { DECLARE_bool(enableCto); +DECLARE_uint64(fuseClientAvgWriteIops); +DECLARE_uint64(fuseClientBurstWriteIops); +DECLARE_uint64(fuseClientBurstWriteIopsSecs); + +DECLARE_uint64(fuseClientAvgWriteBytes); +DECLARE_uint64(fuseClientBurstWriteBytes); +DECLARE_uint64(fuseClientBurstWriteBytesSecs); + +DECLARE_uint64(fuseClientAvgReadIops); +DECLARE_uint64(fuseClientBurstReadIops); +DECLARE_uint64(fuseClientBurstReadIopsSecs); + +DECLARE_uint64(fuseClientAvgReadBytes); +DECLARE_uint64(fuseClientBurstReadBytes); +DECLARE_uint64(fuseClientBurstReadBytesSecs); } // namespace common } // namespace client } // namespace curvefs @@ -83,6 +101,27 @@ using rpcclient::Cli2ClientImpl; using rpcclient::MetaCache; using common::FLAGS_enableCto; +using common::FLAGS_fuseClientAvgWriteIops; +using common::FLAGS_fuseClientBurstWriteIops; +using common::FLAGS_fuseClientBurstWriteIopsSecs; + +using common::FLAGS_fuseClientAvgWriteBytes; +using common::FLAGS_fuseClientBurstWriteBytes; +using common::FLAGS_fuseClientBurstWriteBytesSecs; + +using common::FLAGS_fuseClientAvgReadIops; +using common::FLAGS_fuseClientBurstReadIops; +using common::FLAGS_fuseClientBurstReadIopsSecs; + +using common::FLAGS_fuseClientAvgReadBytes; +using common::FLAGS_fuseClientBurstReadBytes; +using common::FLAGS_fuseClientBurstReadBytesSecs; + +static void on_throttle_timer(void *arg) { + FuseClient *fuseClient = reinterpret_cast(arg); + fuseClient->InitQosParam(); +} + CURVEFS_ERROR FuseClient::Init(const FuseClientOption &option) { option_ = option; @@ -98,7 +137,8 @@ CURVEFS_ERROR FuseClient::Init(const FuseClientOption &option) { auto channelManager = std::make_shared>(); leaseExecutor_ = absl::make_unique(option.leaseOpt, - metaCache, mdsClient_); + metaCache, mdsClient_, + &enableSumInDir_); xattrManager_ = std::make_shared(inodeManager_, dentryManager_, option_.listDentryLimit, option_.listDentryThreads); @@ -117,6 +157,11 @@ CURVEFS_ERROR FuseClient::Init(const FuseClientOption &option) { curve::client::ClientDummyServerInfo::GetInstance().SetPort(listenPort); curve::client::ClientDummyServerInfo::GetInstance().SetIP(localIp); + { + ExternalMember member(dentryManager_, inodeManager_); + fs_ = std::make_shared(option_.fileSystemOption, member); + } + MetaStatusCode ret2 = metaClient_->Init(option.excutorOpt, option.excutorInternalOpt, metaCache, channelManager); @@ -124,18 +169,14 @@ CURVEFS_ERROR FuseClient::Init(const FuseClientOption &option) { return CURVEFS_ERROR::INTERNAL; } - CURVEFS_ERROR ret3 = - inodeManager_->Init(option.iCacheLruSize, option.enableICacheMetrics, - option.flushPeriodSec, option.refreshDataOption, - option.lruTimeOutSec); - if (ret3 != CURVEFS_ERROR::OK) { - return ret3; - } - ret3 = - dentryManager_->Init(option.dCacheLruSize, option.enableDCacheMetrics, - option.lruTimeOutSec); - if (ret3 != CURVEFS_ERROR::OK) { - return ret3; + { // init inode manager + auto member = fs_->BorrowMember(); + CURVEFS_ERROR rc = inodeManager_->Init(option.refreshDataOption, + member.openFiles, + member.deferSync); + if (rc != CURVEFS_ERROR::OK) { + return rc; + } } if (warmupManager_ != nullptr) { @@ -143,7 +184,9 @@ CURVEFS_ERROR FuseClient::Init(const FuseClientOption &option) { warmupManager_->SetFsInfo(fsInfo_); } - return ret3; + InitQosParam(); + + return CURVEFS_ERROR::OK; } void FuseClient::UnInit() { @@ -153,11 +196,14 @@ void FuseClient::UnInit() { delete mdsBase_; mdsBase_ = nullptr; + + while (bthread_timer_del(throttleTimer_) == 1) { + bthread_usleep(1000); + } } CURVEFS_ERROR FuseClient::Run() { if (isStop_.exchange(false)) { - inodeManager_->Run(); return CURVEFS_ERROR::OK; } return CURVEFS_ERROR::INTERNAL; @@ -165,60 +211,14 @@ CURVEFS_ERROR FuseClient::Run() { void FuseClient::Fini() { if (!isStop_.exchange(true)) { - inodeManager_->Stop(); xattrManager_->Stop(); } } CURVEFS_ERROR FuseClient::FuseOpInit(void *userdata, struct fuse_conn_info *conn) { - struct MountOption* mOpts = (struct MountOption*)userdata; - // set path - mountpoint_.set_path((mOpts->mountPoint == nullptr) ? "" - : mOpts->mountPoint); - std::string fsName = (mOpts->fsName == nullptr) ? "" : mOpts->fsName; - - mountpoint_.set_cto(FLAGS_enableCto); - - int retVal = SetHostPortInMountPoint(&mountpoint_); - if (retVal < 0) { - LOG(ERROR) << "Set Host and Port in MountPoint failed, ret = " - << retVal; - return CURVEFS_ERROR::INTERNAL; - } - - auto ret = mdsClient_->MountFs(fsName, mountpoint_, fsInfo_.get()); - if (ret != FSStatusCode::OK && ret != FSStatusCode::MOUNT_POINT_EXIST) { - LOG(ERROR) << "MountFs failed, FSStatusCode = " << ret - << ", FSStatusCode_Name = " - << FSStatusCode_Name(ret) - << ", fsName = " << fsName - << ", mountPoint = " << mountpoint_.ShortDebugString(); - return CURVEFS_ERROR::MOUNT_FAILED; - } - inodeManager_->SetFsId(fsInfo_->fsid()); - dentryManager_->SetFsId(fsInfo_->fsid()); - enableSumInDir_ = fsInfo_->enablesumindir() && !FLAGS_enableCto; - if (fsInfo_->has_recycletimehour()) { - enableSumInDir_ = enableSumInDir_ && (fsInfo_->recycletimehour() == 0); - } - - LOG(INFO) << "Mount " << fsName << " on " << mountpoint_.ShortDebugString() - << " success!" << " enableSumInDir = " << enableSumInDir_; - - fsMetric_ = std::make_shared(fsName); - - // init fsname and mountpoint - leaseExecutor_->SetFsName(fsName); - leaseExecutor_->SetMountPoint(mountpoint_); - if (!leaseExecutor_->Start()) { - return CURVEFS_ERROR::INTERNAL; - } - - init_ = true; - if (warmupManager_ != nullptr) { - warmupManager_->SetMounted(true); - } + (void)userdata; + (void)conn; return CURVEFS_ERROR::OK; } @@ -228,7 +228,7 @@ void FuseClient::FuseOpDestroy(void *userdata) { } FlushAll(); - dirBuf_->DirBufferFreeAll(); + fs_->Destory(); // stop lease before umount fs, otherwise, lease request after umount fs // will add a mountpoint entry. @@ -260,89 +260,38 @@ void FuseClient::FuseOpDestroy(void *userdata) { << " success!"; } -void InodeAttr2ParamAttr(const InodeAttr &inodeAttr, struct stat *attr) { - attr->st_ino = inodeAttr.inodeid(); - attr->st_mode = inodeAttr.mode(); - attr->st_nlink = inodeAttr.nlink(); - attr->st_uid = inodeAttr.uid(); - attr->st_gid = inodeAttr.gid(); - attr->st_size = inodeAttr.length(); - attr->st_rdev = inodeAttr.rdev(); - attr->st_atim.tv_sec = inodeAttr.atime(); - attr->st_atim.tv_nsec = inodeAttr.atime_ns(); - attr->st_mtim.tv_sec = inodeAttr.mtime(); - attr->st_mtim.tv_nsec = inodeAttr.mtime_ns(); - attr->st_ctim.tv_sec = inodeAttr.ctime(); - attr->st_ctim.tv_nsec = inodeAttr.ctime_ns(); - attr->st_blksize = kOptimalIOBlockSize; - - switch (inodeAttr.type()) { - case metaserver::TYPE_S3: - attr->st_blocks = (inodeAttr.length() + 511) / 512; - break; - default: - attr->st_blocks = 0; - break; - } -} - -void GetDentryParamFromInodeAttr( - const FuseClientOption &option, - const InodeAttr &inodeAttr, - fuse_entry_param *param) { - memset(param, 0, sizeof(fuse_entry_param)); - param->ino = inodeAttr.inodeid(); - param->generation = 0; - InodeAttr2ParamAttr(inodeAttr, ¶m->attr); - param->attr_timeout = option.attrTimeOut; - param->entry_timeout = option.entryTimeOut; -} - -CURVEFS_ERROR FuseClient::FuseOpLookup(fuse_req_t req, fuse_ino_t parent, - const char *name, fuse_entry_param *e) { - VLOG(1) << "FuseOpLookup parent: " << parent - << ", name: " << name; - if (strlen(name) > option_.maxNameLength) { - return CURVEFS_ERROR::NAMETOOLONG; - } - - Dentry dentry; - CURVEFS_ERROR ret = dentryManager_->GetDentry(parent, name, &dentry); - if (ret != CURVEFS_ERROR::OK) { - if (ret != CURVEFS_ERROR::NOTEXIST) { - LOG(WARNING) << "dentryManager_ get dentry fail, ret = " << ret - << ", parent inodeid = " << parent - << ", name = " << name; - } - return ret; - } - - fuse_ino_t ino = dentry.inodeid(); - InodeAttr attr; - ret = inodeManager_->GetInodeAttr(ino, &attr); - if (ret != CURVEFS_ERROR::OK) { - LOG(ERROR) << "inodeManager get inodeAttr fail, ret = " << ret - << ", inodeid = " << ino; - return ret; +CURVEFS_ERROR FuseClient::FuseOpLookup(fuse_req_t req, + fuse_ino_t parent, + const char* name, + EntryOut* entryOut) { + CURVEFS_ERROR rc = fs_->Lookup(req, parent, name, entryOut); + if (rc != CURVEFS_ERROR::OK && rc != CURVEFS_ERROR::NOTEXIST) { + LOG(ERROR) << "Lookup() failed, retCode = " << rc + << ", parent = " << parent << ", name = " << name; } - - GetDentryParamFromInodeAttr(option_, attr, e); - return ret; + return rc; } -CURVEFS_ERROR FuseClient::FuseOpOpen(fuse_req_t req, fuse_ino_t ino, - struct fuse_file_info *fi) { - VLOG(1) << "FuseOpOpen, ino: " << ino; +CURVEFS_ERROR FuseClient::HandleOpenFlags(fuse_req_t req, + fuse_ino_t ino, + struct fuse_file_info* fi, + FileOut* fileOut) { std::shared_ptr inodeWrapper; + // alredy opened CURVEFS_ERROR ret = inodeManager_->GetInode(ino, inodeWrapper); if (ret != CURVEFS_ERROR::OK) { LOG(ERROR) << "inodeManager get inode fail, ret = " << ret << ", inodeid = " << ino; return ret; } - ::curve::common::UniqueLock lgGuard = inodeWrapper->GetUniqueLock(); + + fileOut->fi = fi; + inodeWrapper->GetInodeAttr(&fileOut->attr); + if (fi->flags & O_TRUNC) { if (fi->flags & O_WRONLY || fi->flags & O_RDWR) { + ::curve::common::UniqueLock lgGuard = + inodeWrapper->GetUniqueLock(); uint64_t length = inodeWrapper->GetLengthLocked(); CURVEFS_ERROR tRet = Truncate(inodeWrapper.get(), 0); if (tRet != CURVEFS_ERROR::OK) { @@ -361,7 +310,7 @@ CURVEFS_ERROR FuseClient::FuseOpOpen(fuse_req_t req, fuse_ino_t ino, inodeWrapper->MarkDirty(); } - if (enableSumInDir_ && length != 0) { + if (enableSumInDir_.load() && length != 0) { // update parent summary info const Inode *inode = inodeWrapper->GetInodeLocked(); XAttr xattr; @@ -377,18 +326,29 @@ CURVEFS_ERROR FuseClient::FuseOpOpen(fuse_req_t req, fuse_ino_t ino, } } } + inodeWrapper->GetInodeAttrLocked(&fileOut->attr); } else { return CURVEFS_ERROR::NOPERMISSION; } } - if (FLAGS_enableCto) { - inodeManager_->AddOpenedInode(ino); + return CURVEFS_ERROR::OK; +} + +CURVEFS_ERROR FuseClient::FuseOpOpen(fuse_req_t req, + fuse_ino_t ino, + struct fuse_file_info* fi, + FileOut* fileOut) { + CURVEFS_ERROR rc = fs_->Open(req, ino, fi); + if (rc != CURVEFS_ERROR::OK) { + LOG(ERROR) << "open(" << ino << ") failed, retCode = " << rc; + return rc; } - return ret; + return HandleOpenFlags(req, ino, fi, fileOut); } CURVEFS_ERROR FuseClient::UpdateParentMCTimeAndNlink( fuse_ino_t parent, FsFileType type, NlinkChange nlink) { + std::shared_ptr parentInodeWrapper; auto ret = inodeManager_->GetInode(parent, parentInodeWrapper); if (ret != CURVEFS_ERROR::OK) { @@ -404,16 +364,30 @@ CURVEFS_ERROR FuseClient::UpdateParentMCTimeAndNlink( if (FsFileType::TYPE_DIRECTORY == type) { parentInodeWrapper->UpdateNlinkLocked(nlink); } + + if (option_.fileSystemOption.deferSyncOption.deferDirMtime) { + inodeManager_->ShipToFlush(parentInodeWrapper); + } else { + ret = parentInodeWrapper->SyncAttr(); + if (ret != CURVEFS_ERROR::OK) { + return CURVEFS_ERROR::OK; + } + } } - inodeManager_->ShipToFlush(parentInodeWrapper); + return CURVEFS_ERROR::OK; } -CURVEFS_ERROR FuseClient::MakeNode(fuse_req_t req, fuse_ino_t parent, - const char *name, mode_t mode, - FsFileType type, dev_t rdev, bool internal, - fuse_entry_param *e) { - if (strlen(name) > option_.maxNameLength) { +CURVEFS_ERROR FuseClient::MakeNode( + fuse_req_t req, + fuse_ino_t parent, + const char* name, + mode_t mode, + FsFileType type, + dev_t rdev, + bool internal, + std::shared_ptr& inodeWrapper) { + if (strlen(name) > option_.fileSystemOption.maxNameLength) { return CURVEFS_ERROR::NAMETOOLONG; } @@ -444,7 +418,6 @@ CURVEFS_ERROR FuseClient::MakeNode(fuse_req_t req, fuse_ino_t parent, param.rdev = rdev; param.parent = parent; - std::shared_ptr inodeWrapper; CURVEFS_ERROR ret = inodeManager_->CreateInode(param, inodeWrapper); if (ret != CURVEFS_ERROR::OK) { LOG(ERROR) << "inodeManager CreateInode fail, ret = " << ret @@ -496,7 +469,7 @@ CURVEFS_ERROR FuseClient::MakeNode(fuse_req_t req, fuse_ino_t parent, << ", parent = " << parent << ", name = " << name << ", mode = " << mode; - if (enableSumInDir_) { + if (enableSumInDir_.load()) { // update parent summary info XAttr xattr; xattr.mutable_xattrinfos()->insert({XATTRENTRIES, "1"}); @@ -515,20 +488,26 @@ CURVEFS_ERROR FuseClient::MakeNode(fuse_req_t req, fuse_ino_t parent, } } - InodeAttr attr; - inodeWrapper->GetInodeAttr(&attr); - GetDentryParamFromInodeAttr(option_, attr, e); return ret; } -CURVEFS_ERROR FuseClient::FuseOpMkDir(fuse_req_t req, fuse_ino_t parent, - const char *name, mode_t mode, - fuse_entry_param *e) { +CURVEFS_ERROR FuseClient::FuseOpMkDir(fuse_req_t req, + fuse_ino_t parent, + const char *name, + mode_t mode, + EntryOut* entryOut) { VLOG(1) << "FuseOpMkDir, parent: " << parent << ", name: " << name << ", mode: " << mode; bool internal = false; - return MakeNode(req, parent, name, S_IFDIR | mode, - FsFileType::TYPE_DIRECTORY, 0, internal, e); + std::shared_ptr inode; + CURVEFS_ERROR rc = MakeNode(req, parent, name, S_IFDIR | mode, + FsFileType::TYPE_DIRECTORY, 0, internal, inode); + if (rc != CURVEFS_ERROR::OK) { + return rc; + } + + inode->GetInodeAttr(&entryOut->attr); + return CURVEFS_ERROR::OK; } CURVEFS_ERROR FuseClient::FuseOpRmDir(fuse_req_t req, fuse_ino_t parent, @@ -569,7 +548,7 @@ CURVEFS_ERROR FuseClient::DeleteNode(uint64_t ino, fuse_ino_t parent, << ", parent = " << parent << ", name = " << name; } - if (enableSumInDir_) { + if (enableSumInDir_.load()) { // update parent summary info XAttr xattr; xattr.mutable_xattrinfos()->insert({XATTRENTRIES, "1"}); @@ -587,8 +566,6 @@ CURVEFS_ERROR FuseClient::DeleteNode(uint64_t ino, fuse_ino_t parent, << ", xattr = " << xattr.DebugString(); } } - - inodeManager_->ClearInodeCache(ino); return ret; } @@ -601,10 +578,13 @@ std::string GetRecycleTimeDirName() { return now; } -CURVEFS_ERROR FuseClient::CreateManageNode(fuse_req_t req, uint64_t parent, - const char *name, mode_t mode, ManageInodeType manageType, - fuse_entry_param *e) { - if (strlen(name) > option_.maxNameLength) { +CURVEFS_ERROR FuseClient::CreateManageNode(fuse_req_t req, + uint64_t parent, + const char* name, + mode_t mode, + ManageInodeType manageType, + EntryOut* entryOut) { + if (strlen(name) > option_.fileSystemOption.maxNameLength) { return CURVEFS_ERROR::NAMETOOLONG; } @@ -669,7 +649,7 @@ CURVEFS_ERROR FuseClient::CreateManageNode(fuse_req_t req, uint64_t parent, << ", parent = " << parent << ", name = " << name << ", mode = " << mode; - if (enableSumInDir_) { + if (enableSumInDir_.load()) { // update parent summary info XAttr xattr; xattr.mutable_xattrinfos()->insert({XATTRENTRIES, "1"}); @@ -688,9 +668,7 @@ CURVEFS_ERROR FuseClient::CreateManageNode(fuse_req_t req, uint64_t parent, } } - InodeAttr attr; - inodeWrapper->GetInodeAttrLocked(&attr); - GetDentryParamFromInodeAttr(option_, attr, e); + inodeWrapper->GetInodeAttrLocked(&entryOut->attr); return ret; } @@ -704,9 +682,10 @@ CURVEFS_ERROR FuseClient::GetOrCreateRecycleDir(fuse_req_t req, Dentry *out) { } else if (ret == CURVEFS_ERROR::NOTEXIST) { LOG(INFO) << "recycle dir is not exist, create " << RECYCLENAME << ", parentid = " << ROOTINODEID; - fuse_entry_param param; + EntryOut entryOut; ret = CreateManageNode(req, ROOTINODEID, RECYCLENAME, - S_IFDIR | 0755, ManageInodeType::TYPE_RECYCLE, ¶m); + S_IFDIR | 0755, ManageInodeType::TYPE_RECYCLE, + &entryOut); if (ret != CURVEFS_ERROR::OK) { LOG(ERROR) << "CreateManageNode failed, ret = " << ret << ", inode = " << ROOTINODEID @@ -721,6 +700,7 @@ CURVEFS_ERROR FuseClient::GetOrCreateRecycleDir(fuse_req_t req, Dentry *out) { CURVEFS_ERROR FuseClient::MoveToRecycle(fuse_req_t req, fuse_ino_t ino, fuse_ino_t parent, const char* name, FsFileType type) { + (void)type; // 1. check recycle exist, if not exist, create recycle dir Dentry recycleDir; CURVEFS_ERROR ret = GetOrCreateRecycleDir(req, &recycleDir); @@ -742,17 +722,17 @@ CURVEFS_ERROR FuseClient::MoveToRecycle(fuse_req_t req, fuse_ino_t ino, << ", name = " << recycleTimeDirName; return ret; } else if (ret == CURVEFS_ERROR::NOTEXIST) { - fuse_entry_param param; + std::shared_ptr inode; bool internal = true; ret = MakeNode(req, RECYCLEINODEID, recycleTimeDirName.c_str(), - S_IFDIR | 0755, FsFileType::TYPE_DIRECTORY, 0, internal, ¶m); + S_IFDIR | 0755, FsFileType::TYPE_DIRECTORY, 0, internal, inode); if (ret != CURVEFS_ERROR::OK) { LOG(ERROR) << "MakeNode failed, ret = " << ret << ", inode = " << RECYCLEINODEID << ", name = " << recycleTimeDirName; return ret; } - recycleTimeDirIno = param.ino; + recycleTimeDirIno = inode->GetInodeId(); } else { recycleTimeDirIno = dentry.inodeid(); } @@ -762,7 +742,7 @@ CURVEFS_ERROR FuseClient::MoveToRecycle(fuse_req_t req, fuse_ino_t ino, // 4. move inode to recycle time dir ret = FuseOpRename(req, parent, name, - recycleTimeDirIno, newName.c_str()); + recycleTimeDirIno, newName.c_str(), 0); if (ret != CURVEFS_ERROR::OK) { LOG(ERROR) << "MoveToRecycle failed, ret = " << ret << ", inodeid = " << ino @@ -799,7 +779,7 @@ bool FuseClient::ShouldMoveToRecycle(fuse_ino_t parent) { CURVEFS_ERROR FuseClient::RemoveNode(fuse_req_t req, fuse_ino_t parent, const char *name, FsFileType type) { - if (strlen(name) > option_.maxNameLength) { + if (strlen(name) > option_.fileSystemOption.maxNameLength) { return CURVEFS_ERROR::NAMETOOLONG; } @@ -855,143 +835,87 @@ CURVEFS_ERROR FuseClient::RemoveNode(fuse_req_t req, fuse_ino_t parent, return CURVEFS_ERROR::OK; } -CURVEFS_ERROR FuseClient::FuseOpOpenDir(fuse_req_t req, fuse_ino_t ino, - struct fuse_file_info *fi) { - VLOG(1) << "FuseOpOpenDir ino = " << ino; - std::shared_ptr inodeWrapper; - CURVEFS_ERROR ret = inodeManager_->GetInode(ino, inodeWrapper); - if (ret != CURVEFS_ERROR::OK) { - LOG(ERROR) << "inodeManager get inode fail, ret = " << ret - << ", inodeid = " << ino; - return ret; - } - - ::curve::common::UniqueLock lgGuard = inodeWrapper->GetUniqueLock(); - - uint64_t dindex = dirBuf_->DirBufferNew(); - fi->fh = dindex; - VLOG(1) << "FuseOpOpenDir, ino: " << ino << ", dindex: " << dindex; - return ret; -} - -CURVEFS_ERROR FuseClient::FuseOpReleaseDir(fuse_req_t req, fuse_ino_t ino, - struct fuse_file_info *fi) { - uint64_t dindex = fi->fh; - VLOG(1) << "FuseOpReleaseDir, ino: " << ino << ", dindex: " << dindex; - dirBuf_->DirBufferRelease(dindex); - - // release inodeAttr cache - inodeManager_->ReleaseCache(ino); - return CURVEFS_ERROR::OK; -} - -static void dirbuf_add(fuse_req_t req, struct DirBufferHead *b, - const Dentry &dentry, - const FuseClientOption &option, - bool cacheDir, - InodeAttr *attr = nullptr) { - struct stat stbuf; - struct fuse_entry_param param; - size_t oldsize = b->size; - if (!cacheDir) { - b->size += fuse_add_direntry(req, NULL, 0, dentry.name().c_str(), - NULL, 0); - b->p = static_cast(realloc(b->p, b->size)); - memset(&stbuf, 0, sizeof(stbuf)); - stbuf.st_ino = dentry.inodeid(); - fuse_add_direntry(req, b->p + oldsize, b->size - oldsize, - dentry.name().c_str(), &stbuf, b->size); - } else { - b->size += fuse_add_direntry_plus(req, NULL, 0, dentry.name().c_str(), - NULL, 0); - b->p = static_cast(realloc(b->p, b->size)); - GetDentryParamFromInodeAttr(option, *attr, ¶m); - fuse_add_direntry_plus(req, b->p + oldsize, b->size - oldsize, - dentry.name().c_str(), ¶m, b->size); +CURVEFS_ERROR FuseClient::FuseOpOpenDir(fuse_req_t req, + fuse_ino_t ino, + struct fuse_file_info* fi) { + CURVEFS_ERROR rc = fs_->OpenDir(req, ino, fi); + if (rc != CURVEFS_ERROR::OK) { + LOG(ERROR) << "opendir() failed, retCode = " << rc + << ", ino = " << ino; } + return rc; } -CURVEFS_ERROR FuseClient::FuseOpReadDirPlus(fuse_req_t req, fuse_ino_t ino, - size_t size, off_t off, - struct fuse_file_info *fi, - char **buffer, size_t *rSize, - bool cacheDir) { - VLOG(1) << "FuseOpReadDirPlus ino: " << ino << ", size: " << size - << ", off = " << off << ", cacheDir = " << cacheDir; - std::shared_ptr inodeWrapper; - CURVEFS_ERROR ret = inodeManager_->GetInode(ino, inodeWrapper); - if (ret != CURVEFS_ERROR::OK) { - LOG(ERROR) << "inodeManager get inode fail, ret = " << ret - << ", inodeid = " << ino; - return ret; - } - - uint64_t dindex = fi->fh; - DirBufferHead *bufHead = dirBuf_->DirBufferGet(dindex); - if (!bufHead->wasRead) { - std::list dentryList; - std::set inodeIds; - std::map inodeAttrMap; - auto limit = option_.listDentryLimit; - ret = dentryManager_->ListDentry(ino, &dentryList, limit); - if (ret != CURVEFS_ERROR::OK) { - LOG(ERROR) << "dentryManager_ ListDentry fail, ret = " << ret - << ", parent = " << ino; - return ret; +CURVEFS_ERROR FuseClient::FuseOpReadDir(fuse_req_t req, + fuse_ino_t ino, + size_t size, + off_t off, + struct fuse_file_info *fi, + char** bufferOut, + size_t* rSize, + bool plus) { + auto handler = fs_->FindHandler(fi->fh); + DirBufferHead* buffer = handler->buffer; + if (!handler->padding) { + auto entries = std::make_shared(); + CURVEFS_ERROR rc = fs_->ReadDir(req, ino, fi, &entries); + if (rc != CURVEFS_ERROR::OK) { + LOG(ERROR) << "readdir() failed, retCode = " << rc + << ", ino = " << ino << ", fh = " << fi->fh; + return rc; } - if (!cacheDir) { - for (const auto &dentry : dentryList) { - dirbuf_add(req, bufHead, dentry, option_, cacheDir); - } - } else { - for (const auto &dentry : dentryList) { - inodeIds.emplace(dentry.inodeid()); - } - VLOG(3) << "batch get inode size = " << inodeIds.size(); - ret = inodeManager_->BatchGetInodeAttrAsync(ino, &inodeIds, - &inodeAttrMap); - if (ret != CURVEFS_ERROR::OK) { - LOG(ERROR) << "BatchGetInodeAttr failed when FuseOpReadDir" - << ", parentId = " << ino; - return ret; - } - - for (const auto &dentry : dentryList) { - auto iter = inodeAttrMap.find(dentry.inodeid()); - if (iter != inodeAttrMap.end()) { - dirbuf_add(req, bufHead, dentry, option_, - cacheDir, &iter->second); - } else { - LOG(WARNING) << "BatchGetInodeAttr missing some inodes," - << " inodeId = " << dentry.inodeid(); - } + entries->Iterate([&](DirEntry* dirEntry){ + if (plus) { + fs_->AddDirEntryPlus(req, buffer, dirEntry); + } else { + fs_->AddDirEntry(req, buffer, dirEntry); } - } - bufHead->wasRead = true; + }); + handler->padding = true; } - if (off < bufHead->size) { - *buffer = bufHead->p + off; - *rSize = std::min(bufHead->size - off, size); + if (off < buffer->size) { + *bufferOut = buffer->p + off; + *rSize = std::min(buffer->size - off, size); } else { - *buffer = nullptr; + *bufferOut = nullptr; *rSize = 0; } - return ret; + return CURVEFS_ERROR::OK; +} + +CURVEFS_ERROR FuseClient::FuseOpReleaseDir(fuse_req_t req, + fuse_ino_t ino, + struct fuse_file_info* fi) { + CURVEFS_ERROR rc = fs_->ReleaseDir(req, ino, fi); + if (rc != CURVEFS_ERROR::OK) { + LOG(ERROR) << "releasedir() failed, retCode = " << rc + << ", ino = " << ino; + } + return rc; } CURVEFS_ERROR FuseClient::FuseOpRename(fuse_req_t req, fuse_ino_t parent, const char *name, fuse_ino_t newparent, - const char *newname) { + const char *newname, + unsigned int flags) { VLOG(1) << "FuseOpRename from (" << parent << ", " << name << ") to (" << newparent << ", " << newname << ")"; - if (strlen(name) > option_.maxNameLength || - strlen(newname) > option_.maxNameLength) { + + // TODO(Wine93): the flag RENAME_EXCHANGE and RENAME_NOREPLACE + // is only used in linux interface renameat(), not required by posix, + // we can ignore it now + if (flags != 0) { + return CURVEFS_ERROR::INVALIDPARAM; + } + + uint64_t maxNameLength = option_.fileSystemOption.maxNameLength; + if (strlen(name) > maxNameLength || strlen(newname) > maxNameLength) { LOG(WARNING) << "FuseOpRename name too long, name = " << name << ", name len = " << strlen(name) << ", new name = " << newname << ", new name len = " << strlen(newname) - << ", maxNameLength = " << option_.maxNameLength; + << ", maxNameLength = " << maxNameLength; return CURVEFS_ERROR::NAMETOOLONG; } @@ -1022,7 +946,7 @@ CURVEFS_ERROR FuseClient::FuseOpRename(fuse_req_t req, fuse_ino_t parent, renameOp.UpdateInodeCtime(); renameOp.UpdateCache(); - if (enableSumInDir_) { + if (enableSumInDir_.load()) { xattrManager_->UpdateParentXattrAfterRename( parent, newparent, newname, &renameOp); } @@ -1030,26 +954,24 @@ CURVEFS_ERROR FuseClient::FuseOpRename(fuse_req_t req, fuse_ino_t parent, return rc; } -CURVEFS_ERROR FuseClient::FuseOpGetAttr(fuse_req_t req, fuse_ino_t ino, +CURVEFS_ERROR FuseClient::FuseOpGetAttr(fuse_req_t req, + fuse_ino_t ino, struct fuse_file_info *fi, - struct stat *attr) { - VLOG(1) << "FuseOpGetAttr ino = " << ino; - InodeAttr inodeAttr; - CURVEFS_ERROR ret = - inodeManager_->GetInodeAttr(ino, &inodeAttr); - if (ret != CURVEFS_ERROR::OK) { - LOG(ERROR) << "inodeManager get inodeAttr fail, ret = " << ret - << ", inodeid = " << ino; - return ret; + AttrOut* attrOut) { + CURVEFS_ERROR rc = fs_->GetAttr(req, ino, attrOut); + if (rc != CURVEFS_ERROR::OK) { + LOG(ERROR) << "getattr() fail, retCode = " << rc + << ", ino = " << ino; } - InodeAttr2ParamAttr(inodeAttr, attr); - return ret; + return rc; } -CURVEFS_ERROR FuseClient::FuseOpSetAttr(fuse_req_t req, fuse_ino_t ino, - struct stat *attr, int to_set, - struct fuse_file_info *fi, - struct stat *attrOut) { +CURVEFS_ERROR FuseClient::FuseOpSetAttr(fuse_req_t req, + fuse_ino_t ino, + struct stat* attr, + int to_set, + struct fuse_file_info* fi, + struct AttrOut* attrOut) { VLOG(1) << "FuseOpSetAttr to_set: " << to_set << ", ino: " << ino << ", attr: " << *attr; std::shared_ptr inodeWrapper; @@ -1106,11 +1028,9 @@ CURVEFS_ERROR FuseClient::FuseOpSetAttr(fuse_req_t req, fuse_ino_t ino, if (ret != CURVEFS_ERROR::OK) { return ret; } - InodeAttr inodeAttr; - inodeWrapper->GetInodeAttrLocked(&inodeAttr); - InodeAttr2ParamAttr(inodeAttr, attrOut); + inodeWrapper->GetInodeAttrLocked(&attrOut->attr); - if (enableSumInDir_ && changeSize != 0) { + if (enableSumInDir_.load() && changeSize != 0) { // update parent summary info const Inode* inode = inodeWrapper->GetInodeLocked(); XAttr xattr; @@ -1133,19 +1053,18 @@ CURVEFS_ERROR FuseClient::FuseOpSetAttr(fuse_req_t req, fuse_ino_t ino, if (ret != CURVEFS_ERROR::OK) { return ret; } - InodeAttr inodeAttr; - inodeWrapper->GetInodeAttrLocked(&inodeAttr); - InodeAttr2ParamAttr(inodeAttr, attrOut); + inodeWrapper->GetInodeAttrLocked(&attrOut->attr); return ret; } CURVEFS_ERROR FuseClient::FuseOpGetXattr(fuse_req_t req, fuse_ino_t ino, const char* name, std::string* value, size_t size) { + (void)req; VLOG(9) << "FuseOpGetXattr, ino: " << ino << ", name: " << name << ", size = " << size; - if (option_.disableXattr) { - return CURVEFS_ERROR::NOTSUPPORT; + if (option_.fileSystemOption.disableXattr) { + return CURVEFS_ERROR::NOSYS; } InodeAttr inodeAttr; @@ -1156,7 +1075,8 @@ CURVEFS_ERROR FuseClient::FuseOpGetXattr(fuse_req_t req, fuse_ino_t ino, return ret; } - ret = xattrManager_->GetXattr(name, value, &inodeAttr, enableSumInDir_); + ret = xattrManager_->GetXattr(name, value, + &inodeAttr, enableSumInDir_.load()); if (CURVEFS_ERROR::OK != ret) { LOG(ERROR) << "xattrManager get xattr failed, name = " << name; return ret; @@ -1181,10 +1101,6 @@ CURVEFS_ERROR FuseClient::FuseOpGetXattr(fuse_req_t req, fuse_ino_t ino, CURVEFS_ERROR FuseClient::FuseOpSetXattr(fuse_req_t req, fuse_ino_t ino, const char* name, const char* value, size_t size, int flags) { - if (option_.disableXattr) { - return CURVEFS_ERROR::NOTSUPPORT; - } - std::string strname(name); std::string strvalue(value, size); VLOG(1) << "FuseOpSetXattr ino: " << ino << ", name: " << name @@ -1220,6 +1136,7 @@ CURVEFS_ERROR FuseClient::FuseOpSetXattr(fuse_req_t req, fuse_ino_t ino, CURVEFS_ERROR FuseClient::FuseOpListXattr(fuse_req_t req, fuse_ino_t ino, char *value, size_t size, size_t *realSize) { + (void)req; VLOG(1) << "FuseOpListXattr, ino: " << ino << ", size = " << size; InodeAttr inodeAttr; CURVEFS_ERROR ret = inodeManager_->GetInodeAttr(ino, &inodeAttr); @@ -1266,10 +1183,12 @@ CURVEFS_ERROR FuseClient::FuseOpListXattr(fuse_req_t req, fuse_ino_t ino, return CURVEFS_ERROR::OUT_OF_RANGE; } -CURVEFS_ERROR FuseClient::FuseOpSymlink(fuse_req_t req, const char *link, - fuse_ino_t parent, const char *name, - fuse_entry_param *e) { - if (strlen(name) > option_.maxNameLength) { +CURVEFS_ERROR FuseClient::FuseOpSymlink(fuse_req_t req, + const char* link, + fuse_ino_t parent, + const char* name, + EntryOut* entryOut) { + if (strlen(name) > option_.fileSystemOption.maxNameLength) { return CURVEFS_ERROR::NAMETOOLONG; } const struct fuse_ctx *ctx = fuse_req_ctx(req); @@ -1323,7 +1242,7 @@ CURVEFS_ERROR FuseClient::FuseOpSymlink(fuse_req_t req, const char *link, return ret; } - if (enableSumInDir_) { + if (enableSumInDir_.load()) { // update parent summary info XAttr xattr; xattr.mutable_xattrinfos()->insert({XATTRENTRIES, "1"}); @@ -1338,17 +1257,17 @@ CURVEFS_ERROR FuseClient::FuseOpSymlink(fuse_req_t req, const char *link, } } - InodeAttr attr; - inodeWrapper->GetInodeAttr(&attr); - GetDentryParamFromInodeAttr(option_, attr, e); + inodeWrapper->GetInodeAttr(&entryOut->attr); return ret; } -CURVEFS_ERROR FuseClient::FuseOpLink(fuse_req_t req, fuse_ino_t ino, - fuse_ino_t newparent, const char *newname, +CURVEFS_ERROR FuseClient::FuseOpLink(fuse_req_t req, + fuse_ino_t ino, + fuse_ino_t newparent, + const char* newname, FsFileType type, - fuse_entry_param *e) { - if (strlen(newname) > option_.maxNameLength) { + EntryOut* entryOut) { + if (strlen(newname) > option_.fileSystemOption.maxNameLength) { return CURVEFS_ERROR::NAMETOOLONG; } std::shared_ptr inodeWrapper; @@ -1393,7 +1312,7 @@ CURVEFS_ERROR FuseClient::FuseOpLink(fuse_req_t req, fuse_ino_t ino, return ret; } - if (enableSumInDir_) { + if (enableSumInDir_.load()) { // update parent summary info XAttr xattr; xattr.mutable_xattrinfos()->insert({XATTRENTRIES, "1"}); @@ -1409,14 +1328,13 @@ CURVEFS_ERROR FuseClient::FuseOpLink(fuse_req_t req, fuse_ino_t ino, } } - InodeAttr attr; - inodeWrapper->GetInodeAttr(&attr); - GetDentryParamFromInodeAttr(option_, attr, e); + inodeWrapper->GetInodeAttr(&entryOut->attr); return ret; } CURVEFS_ERROR FuseClient::FuseOpReadLink(fuse_req_t req, fuse_ino_t ino, std::string *linkStr) { + (void)req; VLOG(1) << "FuseOpReadLink, ino: " << ino << ", linkStr: " << linkStr; InodeAttr attr; CURVEFS_ERROR ret = inodeManager_->GetInodeAttr(ino, &attr); @@ -1429,22 +1347,97 @@ CURVEFS_ERROR FuseClient::FuseOpReadLink(fuse_req_t req, fuse_ino_t ino, return CURVEFS_ERROR::OK; } -CURVEFS_ERROR FuseClient::FuseOpRelease(fuse_req_t req, fuse_ino_t ino, +CURVEFS_ERROR FuseClient::FuseOpRelease(fuse_req_t req, + fuse_ino_t ino, struct fuse_file_info *fi) { - VLOG(1) << "FuseOpRelease, ino: " << ino; - if (FLAGS_enableCto) { - inodeManager_->RemoveOpenedInode(ino); + CURVEFS_ERROR rc = fs_->Release(req, ino); + if (rc != CURVEFS_ERROR::OK) { + LOG(ERROR) << "release() failed, ino = " << ino; + } + return rc; +} + +void FuseClient::FlushAll() { + FlushData(); +} + +CURVEFS_ERROR +FuseClient::SetMountStatus(const struct MountOption *mountOption) { + mountpoint_.set_path( + (mountOption->mountPoint == nullptr) ? "" : mountOption->mountPoint); + std::string fsName = + (mountOption->fsName == nullptr) ? "" : mountOption->fsName; + + mountpoint_.set_cto(FLAGS_enableCto); + + int retVal = SetHostPortInMountPoint(&mountpoint_); + if (retVal < 0) { + LOG(ERROR) << "Set Host and Port in MountPoint failed, ret = " + << retVal; + return CURVEFS_ERROR::INTERNAL; + } + + auto ret = mdsClient_->MountFs(fsName, mountpoint_, fsInfo_.get()); + if (ret != FSStatusCode::OK && ret != FSStatusCode::MOUNT_POINT_EXIST) { + LOG(ERROR) << "MountFs failed, FSStatusCode = " << ret + << ", FSStatusCode_Name = " << FSStatusCode_Name(ret) + << ", fsName = " << fsName + << ", mountPoint = " << mountpoint_.ShortDebugString(); + return CURVEFS_ERROR::MOUNT_FAILED; + } + inodeManager_->SetFsId(fsInfo_->fsid()); + dentryManager_->SetFsId(fsInfo_->fsid()); + enableSumInDir_.store(fsInfo_->enablesumindir()); + if (fsInfo_->has_recycletimehour()) { + enableSumInDir_.store(enableSumInDir_.load() && + (fsInfo_->recycletimehour() == 0)); + } + + LOG(INFO) << "Mount " << fsName << " on " << mountpoint_.ShortDebugString() + << " success!" + << " enableSumInDir = " << enableSumInDir_.load(); + + fsMetric_ = std::make_shared(fsName); + + // init fsname and mountpoint + leaseExecutor_->SetFsName(fsName); + leaseExecutor_->SetMountPoint(mountpoint_); + if (!leaseExecutor_->Start()) { + return CURVEFS_ERROR::INTERNAL; + } + + init_ = true; + if (warmupManager_ != nullptr) { + warmupManager_->SetMounted(true); } return CURVEFS_ERROR::OK; } -void FuseClient::FlushInode() { inodeManager_->FlushInodeOnce(); } +void FuseClient::InitQosParam() { + ReadWriteThrottleParams params; + params.iopsWrite = ThrottleParams(FLAGS_fuseClientAvgWriteIops, + FLAGS_fuseClientBurstWriteIops, + FLAGS_fuseClientBurstWriteIopsSecs); -void FuseClient::FlushInodeAll() { inodeManager_->FlushAll(); } + params.bpsWrite = ThrottleParams(FLAGS_fuseClientAvgWriteBytes, + FLAGS_fuseClientBurstWriteBytes, + FLAGS_fuseClientBurstWriteBytesSecs); -void FuseClient::FlushAll() { - FlushData(); - FlushInodeAll(); + params.iopsRead = ThrottleParams(FLAGS_fuseClientAvgReadIops, + FLAGS_fuseClientBurstReadIops, + FLAGS_fuseClientBurstReadIopsSecs); + + params.bpsRead = ThrottleParams(FLAGS_fuseClientAvgReadBytes, + FLAGS_fuseClientBurstReadBytes, + FLAGS_fuseClientBurstReadBytesSecs); + + throttle_.UpdateThrottleParams(params); + + int ret = bthread_timer_add(&throttleTimer_, butil::seconds_from_now(1), + on_throttle_timer, this); + if (ret != 0) { + LOG(ERROR) << "Create fuse client throttle timer failed!"; + } } } // namespace client diff --git a/curvefs/src/client/fuse_client.h b/curvefs/src/client/fuse_client.h index 6b16df6e01..3988b77866 100644 --- a/curvefs/src/client/fuse_client.h +++ b/curvefs/src/client/fuse_client.h @@ -25,6 +25,7 @@ #include #include +#include #include #include @@ -32,6 +33,7 @@ #include #include #include +#include #include "curvefs/proto/common.pb.h" #include "curvefs/proto/mds.pb.h" @@ -53,6 +55,9 @@ #include "curvefs/src/client/lease/lease_excutor.h" #include "curvefs/src/client/xattr_manager.h" #include "curvefs/src/client/warmup/warmup_manager.h" +#include "src/common/throttle.h" +#include "curvefs/src/client/filesystem/meta.h" +#include "curvefs/src/client/filesystem/filesystem.h" #define DirectIOAlignment 512 @@ -67,6 +72,7 @@ using ::curvefs::client::metric::FSMetric; namespace curvefs { namespace client { + namespace warmup { class WarmupManager; } @@ -77,6 +83,10 @@ using rpcclient::MdsClient; using rpcclient::MdsClientImpl; using rpcclient::MetaServerClient; using rpcclient::MetaServerClientImpl; +using ::curvefs::client::filesystem::FileSystem; +using ::curvefs::client::filesystem::EntryOut; +using ::curvefs::client::filesystem::AttrOut; +using ::curvefs::client::filesystem::FileOut; using curvefs::common::is_aligned; @@ -91,13 +101,12 @@ class FuseClient { metaClient_(std::make_shared()), inodeManager_(std::make_shared(metaClient_)), dentryManager_(std::make_shared(metaClient_)), - dirBuf_(std::make_shared()), fsInfo_(nullptr), - mdsBase_(nullptr), - isStop_(true), init_(false), enableSumInDir_(false), - warmupManager_(nullptr) {} + warmupManager_(nullptr), + mdsBase_(nullptr), + isStop_(true) {} virtual ~FuseClient() {} @@ -110,13 +119,12 @@ class FuseClient { metaClient_(metaClient), inodeManager_(inodeManager), dentryManager_(dentryManager), - dirBuf_(std::make_shared()), fsInfo_(nullptr), - mdsBase_(nullptr), - isStop_(true), init_(false), enableSumInDir_(false), - warmupManager_(warmupManager) {} + warmupManager_(warmupManager), + mdsBase_(nullptr), + isStop_(true) {} virtual CURVEFS_ERROR Init(const FuseClientOption &option); @@ -134,31 +142,42 @@ class FuseClient { virtual CURVEFS_ERROR FuseOpWrite(fuse_req_t req, fuse_ino_t ino, const char* buf, size_t size, off_t off, struct fuse_file_info* fi, - size_t* wSize) = 0; + FileOut* fileOut) = 0; virtual CURVEFS_ERROR FuseOpRead(fuse_req_t req, fuse_ino_t ino, size_t size, off_t off, struct fuse_file_info* fi, char* buffer, size_t* rSize) = 0; - virtual CURVEFS_ERROR FuseOpLookup(fuse_req_t req, fuse_ino_t parent, - const char* name, fuse_entry_param* e); + virtual CURVEFS_ERROR FuseOpLookup(fuse_req_t req, + fuse_ino_t parent, + const char* name, + EntryOut* entryOut); - virtual CURVEFS_ERROR FuseOpOpen(fuse_req_t req, fuse_ino_t ino, - struct fuse_file_info* fi); + virtual CURVEFS_ERROR FuseOpOpen(fuse_req_t req, + fuse_ino_t ino, + struct fuse_file_info* fi, + FileOut* fileOut); - virtual CURVEFS_ERROR FuseOpCreate(fuse_req_t req, fuse_ino_t parent, - const char* name, mode_t mode, + virtual CURVEFS_ERROR FuseOpCreate(fuse_req_t req, + fuse_ino_t parent, + const char* name, + mode_t mode, struct fuse_file_info* fi, - fuse_entry_param* e) = 0; + EntryOut* entryOut) = 0; - virtual CURVEFS_ERROR FuseOpMkNod(fuse_req_t req, fuse_ino_t parent, - const char* name, mode_t mode, dev_t rdev, - fuse_entry_param* e) = 0; + virtual CURVEFS_ERROR FuseOpMkNod(fuse_req_t req, + fuse_ino_t parent, + const char* name, + mode_t mode, + dev_t rdev, + EntryOut* entryOut) = 0; - virtual CURVEFS_ERROR FuseOpMkDir(fuse_req_t req, fuse_ino_t parent, - const char* name, mode_t mode, - fuse_entry_param* e); + virtual CURVEFS_ERROR FuseOpMkDir(fuse_req_t req, + fuse_ino_t parent, + const char* name, + mode_t mode, + EntryOut* entryOut); virtual CURVEFS_ERROR FuseOpUnlink(fuse_req_t req, fuse_ino_t parent, const char* name) = 0; @@ -166,30 +185,38 @@ class FuseClient { virtual CURVEFS_ERROR FuseOpRmDir(fuse_req_t req, fuse_ino_t parent, const char* name); - virtual CURVEFS_ERROR FuseOpOpenDir(fuse_req_t req, fuse_ino_t ino, + virtual CURVEFS_ERROR FuseOpOpenDir(fuse_req_t req, + fuse_ino_t ino, struct fuse_file_info* fi); - virtual CURVEFS_ERROR FuseOpReleaseDir(fuse_req_t req, fuse_ino_t ino, + virtual CURVEFS_ERROR FuseOpReleaseDir(fuse_req_t req, + fuse_ino_t ino, struct fuse_file_info* fi); - virtual CURVEFS_ERROR FuseOpReadDirPlus(fuse_req_t req, fuse_ino_t ino, - size_t size, off_t off, - struct fuse_file_info* fi, - char** buffer, size_t* rSize, - bool cacheDir); + virtual CURVEFS_ERROR FuseOpReadDir(fuse_req_t req, + fuse_ino_t ino, + size_t size, + off_t off, + struct fuse_file_info* fi, + char** bufferOut, + size_t* rSize, + bool plus); - virtual CURVEFS_ERROR FuseOpRename(fuse_req_t req, fuse_ino_t parent, - const char* name, fuse_ino_t newparent, - const char* newname); + virtual CURVEFS_ERROR FuseOpRename(fuse_req_t req, + fuse_ino_t parent, + const char* name, + fuse_ino_t newparent, + const char* newname, + unsigned int flags); virtual CURVEFS_ERROR FuseOpGetAttr(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info* fi, - struct stat* attr); + struct AttrOut* out); virtual CURVEFS_ERROR FuseOpSetAttr(fuse_req_t req, fuse_ino_t ino, struct stat* attr, int to_set, struct fuse_file_info* fi, - struct stat* attrOut); + struct AttrOut* out); virtual CURVEFS_ERROR FuseOpGetXattr(fuse_req_t req, fuse_ino_t ino, const char* name, std::string* value, @@ -202,13 +229,17 @@ class FuseClient { virtual CURVEFS_ERROR FuseOpListXattr(fuse_req_t req, fuse_ino_t ino, char *value, size_t size, size_t *realSize); - virtual CURVEFS_ERROR FuseOpSymlink(fuse_req_t req, const char* link, - fuse_ino_t parent, const char* name, - fuse_entry_param* e); + virtual CURVEFS_ERROR FuseOpSymlink(fuse_req_t req, + const char* link, + fuse_ino_t parent, + const char* name, + EntryOut* entryOut); - virtual CURVEFS_ERROR FuseOpLink(fuse_req_t req, fuse_ino_t ino, - fuse_ino_t newparent, const char* newname, - fuse_entry_param* e) = 0; + virtual CURVEFS_ERROR FuseOpLink(fuse_req_t req, + fuse_ino_t ino, + fuse_ino_t newparent, + const char* newname, + EntryOut* entryOut) = 0; virtual CURVEFS_ERROR FuseOpReadLink(fuse_req_t req, fuse_ino_t ino, std::string* linkStr); @@ -221,11 +252,16 @@ class FuseClient { struct fuse_file_info* fi) = 0; virtual CURVEFS_ERROR FuseOpFlush(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { + (void)req; + (void)ino; + (void)fi; return CURVEFS_ERROR::OK; } virtual CURVEFS_ERROR FuseOpStatFs(fuse_req_t req, fuse_ino_t ino, struct statvfs* stbuf) { + (void)req; + (void)ino; // TODO(chengyi01,wuhanqing): implement in s3 and volume client stbuf->f_frsize = stbuf->f_bsize = fsInfo_->blocksize(); stbuf->f_blocks = 10UL << 30; @@ -237,10 +273,12 @@ class FuseClient { stbuf->f_fsid = fsInfo_->fsid(); stbuf->f_flag = 0; - stbuf->f_namemax = option_.maxNameLength; + stbuf->f_namemax = option_.fileSystemOption.maxNameLength; return CURVEFS_ERROR::OK; } + virtual CURVEFS_ERROR Truncate(InodeWrapper* inode, uint64_t length) = 0; + void SetFsInfo(const std::shared_ptr& fsInfo) { fsInfo_ = fsInfo; init_ = true; @@ -256,9 +294,9 @@ class FuseClient { return fsInfo_; } - virtual void FlushInode(); - - virtual void FlushInodeAll(); + std::shared_ptr GetFileSystem() { + return fs_; + } virtual void FlushAll(); @@ -267,16 +305,17 @@ class FuseClient { enableSumInDir_ = enable; } - bool PutWarmFilelistTask(fuse_ino_t key) { + bool PutWarmFilelistTask(fuse_ino_t key, common::WarmupStorageType type) { if (fsInfo_->fstype() == FSType::TYPE_S3) { - return warmupManager_->AddWarmupFilelist(key); + return warmupManager_->AddWarmupFilelist(key, type); } // only support s3 return true; } - bool PutWarmFileTask(fuse_ino_t key, const std::string& path) { + bool PutWarmFileTask(fuse_ino_t key, const std::string &path, + common::WarmupStorageType type) { if (fsInfo_->fstype() == FSType::TYPE_S3) { - return warmupManager_->AddWarmupFile(key, path); + return warmupManager_->AddWarmupFile(key, path, type); } // only support s3 return true; } @@ -288,18 +327,31 @@ class FuseClient { return false; } + CURVEFS_ERROR SetMountStatus(const struct MountOption *mountOption); + + void Add(bool isRead, size_t size) { throttle_.Add(isRead, size); } + + void InitQosParam(); + protected: - CURVEFS_ERROR MakeNode(fuse_req_t req, fuse_ino_t parent, const char* name, - mode_t mode, FsFileType type, dev_t rdev, - bool internal, fuse_entry_param* e); + CURVEFS_ERROR MakeNode(fuse_req_t req, + fuse_ino_t parent, + const char* name, + mode_t mode, + FsFileType type, + dev_t rdev, + bool internal, + std::shared_ptr& InodeWrapper); // NOLINT CURVEFS_ERROR RemoveNode(fuse_req_t req, fuse_ino_t parent, const char* name, FsFileType type); - CURVEFS_ERROR CreateManageNode(fuse_req_t req, uint64_t parent, - const char *name, mode_t mode, + CURVEFS_ERROR CreateManageNode(fuse_req_t req, + uint64_t parent, + const char* name, + mode_t mode, ManageInodeType manageType, - fuse_entry_param *e); + EntryOut* entryOut); CURVEFS_ERROR GetOrCreateRecycleDir(fuse_req_t req, Dentry *out); @@ -312,10 +364,17 @@ class FuseClient { bool ShouldMoveToRecycle(fuse_ino_t parent); - CURVEFS_ERROR FuseOpLink(fuse_req_t req, fuse_ino_t ino, - fuse_ino_t newparent, const char* newname, + CURVEFS_ERROR FuseOpLink(fuse_req_t req, + fuse_ino_t ino, + fuse_ino_t newparent, + const char* newname, FsFileType type, - fuse_entry_param* e); + EntryOut* entryOut); + + CURVEFS_ERROR HandleOpenFlags(fuse_req_t req, + fuse_ino_t ino, + struct fuse_file_info* fi, + FileOut* fileOut); int SetHostPortInMountPoint(Mountpoint* out) { char hostname[kMaxHostNameLength]; @@ -330,9 +389,8 @@ class FuseClient { return 0; } - private: - virtual CURVEFS_ERROR Truncate(InodeWrapper* inode, uint64_t length) = 0; + private: virtual void FlushData() = 0; CURVEFS_ERROR UpdateParentMCTimeAndNlink( @@ -343,8 +401,8 @@ class FuseClient { std::string newName(name); newName = std::to_string(parent) + "_" + std::to_string(ino) + "_" + newName; - if (newName.length() > option_.maxNameLength) { - newName = newName.substr(0, option_.maxNameLength); + if (newName.length() > option_.fileSystemOption.maxNameLength) { + newName = newName.substr(0, option_.fileSystemOption.maxNameLength); } return newName; @@ -368,9 +426,6 @@ class FuseClient { std::shared_ptr leaseExecutor_; - // dir buffer - std::shared_ptr dirBuf_; - // filesystem info std::shared_ptr fsInfo_; @@ -380,7 +435,7 @@ class FuseClient { bool init_; // enable record summary info in dir inode xattr - bool enableSumInDir_; + std::atomic enableSumInDir_; std::shared_ptr fsMetric_; @@ -389,12 +444,18 @@ class FuseClient { // warmup manager std::shared_ptr warmupManager_; + std::shared_ptr fs_; + private: MDSBaseClient* mdsBase_; Atomic isStop_; curve::common::Mutex renameMutex_; + + Throttle throttle_; + + bthread_timer_t throttleTimer_; }; } // namespace client diff --git a/curvefs/src/client/fuse_common.h b/curvefs/src/client/fuse_common.h index 8bbb17a510..280d96aa16 100644 --- a/curvefs/src/client/fuse_common.h +++ b/curvefs/src/client/fuse_common.h @@ -34,9 +34,9 @@ extern "C" { #endif struct MountOption { - char* mountPoint; - char* fsName; - char* fsType; + const char* mountPoint; + const char* fsName; + const char* fsType; char* conf; char* mdsAddr; }; diff --git a/curvefs/src/client/fuse_s3_client.cpp b/curvefs/src/client/fuse_s3_client.cpp index 9d291aa69e..e94fe31e2c 100644 --- a/curvefs/src/client/fuse_s3_client.cpp +++ b/curvefs/src/client/fuse_s3_client.cpp @@ -67,10 +67,20 @@ CURVEFS_ERROR FuseS3Client::Init(const FuseClientOption &option) { auto s3Client = std::make_shared(); s3Client->Init(opt.s3Opt.s3AdaptrOpt); + + const uint64_t writeCacheMaxByte = + opt.s3Opt.s3ClientAdaptorOpt.writeCacheMaxByte; + if (writeCacheMaxByte < MIN_WRITE_CACHE_SIZE) { + LOG(ERROR) << "writeCacheMaxByte is too small" + << ", at least " << MIN_WRITE_CACHE_SIZE << " (8MB)" + ", writeCacheMaxByte = " << writeCacheMaxByte; + return CURVEFS_ERROR::CACHETOOSMALL; + } + auto fsCacheManager = std::make_shared( dynamic_cast(s3Adaptor_.get()), - opt.s3Opt.s3ClientAdaptorOpt.readCacheMaxByte, - opt.s3Opt.s3ClientAdaptorOpt.writeCacheMaxByte, kvClientManager_); + opt.s3Opt.s3ClientAdaptorOpt.readCacheMaxByte, writeCacheMaxByte, + opt.s3Opt.s3ClientAdaptorOpt.readCacheThreads, kvClientManager_); if (opt.s3Opt.s3ClientAdaptorOpt.diskCacheOpt.diskCacheType != DiskCacheType::Disable) { auto s3DiskCacheClient = std::make_shared(); @@ -145,7 +155,8 @@ CURVEFS_ERROR FuseS3Client::FuseOpInit(void *userdata, CURVEFS_ERROR FuseS3Client::FuseOpWrite(fuse_req_t req, fuse_ino_t ino, const char *buf, size_t size, off_t off, struct fuse_file_info *fi, - size_t *wSize) { + FileOut* fileOut) { + size_t *wSize = &fileOut->nwritten; // check align if (fi->flags & O_DIRECT) { if (!(is_aligned(off, DirectIOAlignment) && @@ -207,6 +218,8 @@ CURVEFS_ERROR FuseS3Client::FuseOpWrite(fuse_req_t req, fuse_ino_t ino, } } } + + inodeWrapper->GetInodeAttrLocked(&fileOut->attr); return ret; } @@ -214,6 +227,7 @@ CURVEFS_ERROR FuseS3Client::FuseOpRead(fuse_req_t req, fuse_ino_t ino, size_t size, off_t off, struct fuse_file_info *fi, char *buffer, size_t *rSize) { + (void)req; // check align if (fi->flags & O_DIRECT) { if (!(is_aligned(off, DirectIOAlignment) && @@ -232,7 +246,7 @@ CURVEFS_ERROR FuseS3Client::FuseOpRead(fuse_req_t req, fuse_ino_t ino, uint64_t fileSize = inodeWrapper->GetLength(); size_t len = 0; - if (fileSize <= off) { + if (static_cast(fileSize) <= off) { *rSize = 0; return CURVEFS_ERROR::OK; } else if (fileSize < off + size) { @@ -268,33 +282,56 @@ CURVEFS_ERROR FuseS3Client::FuseOpRead(fuse_req_t req, fuse_ino_t ino, CURVEFS_ERROR FuseS3Client::FuseOpCreate(fuse_req_t req, fuse_ino_t parent, const char *name, mode_t mode, struct fuse_file_info *fi, - fuse_entry_param *e) { + EntryOut* entryOut) { VLOG(1) << "FuseOpCreate, parent: " << parent << ", name: " << name << ", mode: " << mode; + + std::shared_ptr inode; CURVEFS_ERROR ret = - MakeNode(req, parent, name, mode, FsFileType::TYPE_S3, 0, false, e); + MakeNode(req, parent, name, mode, FsFileType::TYPE_S3, 0, false, inode); if (ret != CURVEFS_ERROR::OK) { return ret; } - return FuseOpOpen(req, e->ino, fi); + + auto openFiles = fs_->BorrowMember().openFiles; + openFiles->Open(inode->GetInodeId(), inode); + + inode->GetInodeAttr(&entryOut->attr); + return CURVEFS_ERROR::OK; } -CURVEFS_ERROR FuseS3Client::FuseOpMkNod(fuse_req_t req, fuse_ino_t parent, - const char *name, mode_t mode, - dev_t rdev, fuse_entry_param *e) { +CURVEFS_ERROR FuseS3Client::FuseOpMkNod(fuse_req_t req, + fuse_ino_t parent, + const char* name, + mode_t mode, + dev_t rdev, + EntryOut* entryOut) { VLOG(1) << "FuseOpMkNod, parent: " << parent << ", name: " << name << ", mode: " << mode << ", rdev: " << rdev; - return MakeNode(req, parent, name, mode, FsFileType::TYPE_S3, rdev, false, - e); + + std::shared_ptr inode; + CURVEFS_ERROR rc = MakeNode(req, parent, name, mode, + FsFileType::TYPE_S3, rdev, false, + inode); + if (rc != CURVEFS_ERROR::OK) { + return rc; + } + + InodeAttr attr; + inode->GetInodeAttr(&attr); + *entryOut = EntryOut(attr); + return CURVEFS_ERROR::OK; } -CURVEFS_ERROR FuseS3Client::FuseOpLink(fuse_req_t req, fuse_ino_t ino, - fuse_ino_t newparent, const char *newname, - fuse_entry_param *e) { +CURVEFS_ERROR FuseS3Client::FuseOpLink(fuse_req_t req, + fuse_ino_t ino, + fuse_ino_t newparent, + const char* newname, + EntryOut* entryOut) { VLOG(1) << "FuseOpLink, ino: " << ino << ", newparent: " << newparent << ", newname: " << newname; return FuseClient::FuseOpLink( - req, ino, newparent, newname, FsFileType::TYPE_S3, e); + req, ino, newparent, newname, FsFileType::TYPE_S3, entryOut); } CURVEFS_ERROR FuseS3Client::FuseOpUnlink(fuse_req_t req, fuse_ino_t parent, @@ -306,6 +343,8 @@ CURVEFS_ERROR FuseS3Client::FuseOpUnlink(fuse_req_t req, fuse_ino_t parent, CURVEFS_ERROR FuseS3Client::FuseOpFsync(fuse_req_t req, fuse_ino_t ino, int datasync, struct fuse_file_info *fi) { + (void)req; + (void)fi; VLOG(1) << "FuseOpFsync, ino: " << ino << ", datasync: " << datasync; CURVEFS_ERROR ret = s3Adaptor_->Flush(ino); @@ -334,6 +373,8 @@ CURVEFS_ERROR FuseS3Client::Truncate(InodeWrapper *inode, uint64_t length) { CURVEFS_ERROR FuseS3Client::FuseOpFlush(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { + (void)req; + (void)fi; VLOG(1) << "FuseOpFlush, ino: " << ino; CURVEFS_ERROR ret = CURVEFS_ERROR::OK; diff --git a/curvefs/src/client/fuse_s3_client.h b/curvefs/src/client/fuse_s3_client.h index 5e6ab3ab97..68d7bf5d08 100644 --- a/curvefs/src/client/fuse_s3_client.h +++ b/curvefs/src/client/fuse_s3_client.h @@ -33,6 +33,7 @@ #include "curvefs/src/client/fuse_client.h" #include "curvefs/src/client/s3/client_s3_cache_manager.h" #include "curvefs/src/client/warmup/warmup_manager.h" +#include "curvefs/src/volume/common.h" #include "src/common/s3_adapter.h" namespace curvefs { @@ -40,6 +41,7 @@ namespace client { using curve::common::GetObjectAsyncContext; using curve::common::GetObjectAsyncCallBack; +using curvefs::volume::kMiB; namespace warmup { class WarmupManager; class WarmupManagerS3Impl; @@ -79,7 +81,7 @@ class FuseS3Client : public FuseClient { CURVEFS_ERROR FuseOpWrite(fuse_req_t req, fuse_ino_t ino, const char *buf, size_t size, off_t off, - struct fuse_file_info *fi, size_t *wSize) override; + struct fuse_file_info *fi, FileOut* fileOut) override; CURVEFS_ERROR FuseOpRead(fuse_req_t req, fuse_ino_t ino, size_t size, off_t off, @@ -87,17 +89,25 @@ class FuseS3Client : public FuseClient { char *buffer, size_t *rSize) override; - CURVEFS_ERROR FuseOpCreate(fuse_req_t req, fuse_ino_t parent, - const char *name, mode_t mode, struct fuse_file_info *fi, - fuse_entry_param *e) override; - - CURVEFS_ERROR FuseOpMkNod(fuse_req_t req, fuse_ino_t parent, - const char *name, mode_t mode, dev_t rdev, - fuse_entry_param *e) override; - - CURVEFS_ERROR FuseOpLink(fuse_req_t req, fuse_ino_t ino, - fuse_ino_t newparent, const char *newname, - fuse_entry_param *e) override; + CURVEFS_ERROR FuseOpCreate(fuse_req_t req, + fuse_ino_t parent, + const char* name, + mode_t mode, + struct fuse_file_info *fi, + EntryOut* entryOut) override; + + CURVEFS_ERROR FuseOpMkNod(fuse_req_t req, + fuse_ino_t parent, + const char* name, + mode_t mode, + dev_t rdev, + EntryOut* entryOut) override; + + CURVEFS_ERROR FuseOpLink(fuse_req_t req, + fuse_ino_t ino, + fuse_ino_t newparent, + const char *newname, + EntryOut* entryOut) override; CURVEFS_ERROR FuseOpUnlink(fuse_req_t req, fuse_ino_t parent, const char *name) override; @@ -108,17 +118,19 @@ class FuseS3Client : public FuseClient { CURVEFS_ERROR FuseOpFlush(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) override; + CURVEFS_ERROR Truncate(InodeWrapper *inode, uint64_t length) override; + private: bool InitKVCache(const KVClientManagerOpt &opt); - CURVEFS_ERROR Truncate(InodeWrapper *inode, uint64_t length) override; - void FlushData() override; private: // s3 adaptor std::shared_ptr s3Adaptor_; std::shared_ptr kvClientManager_; + + static constexpr auto MIN_WRITE_CACHE_SIZE = 8 * kMiB; }; diff --git a/curvefs/src/client/fuse_volume_client.cpp b/curvefs/src/client/fuse_volume_client.cpp index ba9295797a..fcdbb17f15 100644 --- a/curvefs/src/client/fuse_volume_client.cpp +++ b/curvefs/src/client/fuse_volume_client.cpp @@ -32,7 +32,7 @@ #include "absl/cleanup/cleanup.h" #include "absl/memory/memory.h" #include "curvefs/proto/mds.pb.h" -#include "curvefs/src/client/error_code.h" +#include "curvefs/src/client/filesystem/error.h" #include "curvefs/src/client/volume/default_volume_storage.h" #include "curvefs/src/client/volume/extent_cache.h" #include "curvefs/src/volume/common.h" @@ -133,10 +133,11 @@ CURVEFS_ERROR FuseVolumeClient::FuseOpWrite(fuse_req_t req, size_t size, off_t off, struct fuse_file_info *fi, - size_t *wSize) { + FileOut* fileOut) { VLOG(9) << "write start, ino: " << ino << ", offset: " << off << ", length: " << size; + size_t* wSize = &fileOut->nwritten; if (fi->flags & O_DIRECT) { if (!(is_aligned(off, DirectIOAlignment) && is_aligned(size, DirectIOAlignment))) { @@ -148,7 +149,7 @@ CURVEFS_ERROR FuseVolumeClient::FuseOpWrite(fuse_req_t req, butil::Timer timer; timer.start(); - CURVEFS_ERROR ret = storage_->Write(ino, off, size, buf); + CURVEFS_ERROR ret = storage_->Write(ino, off, size, buf, fileOut); if (ret != CURVEFS_ERROR::OK) { if (fsMetric_) { fsMetric_->userWrite.eps.count << 1; @@ -189,6 +190,7 @@ CURVEFS_ERROR FuseVolumeClient::FuseOpRead(fuse_req_t req, struct fuse_file_info *fi, char *buffer, size_t *rSize) { + (void)req; VLOG(3) << "read start, ino: " << ino << ", offset: " << off << ", length: " << size; @@ -233,34 +235,57 @@ CURVEFS_ERROR FuseVolumeClient::FuseOpRead(fuse_req_t req, CURVEFS_ERROR FuseVolumeClient::FuseOpCreate(fuse_req_t req, fuse_ino_t parent, const char *name, mode_t mode, struct fuse_file_info *fi, - fuse_entry_param *e) { + EntryOut* entryOut) { VLOG(3) << "FuseOpCreate, parent: " << parent << ", name: " << name << ", mode: " << mode; - CURVEFS_ERROR ret = - MakeNode(req, parent, name, mode, FsFileType::TYPE_FILE, 0, false, e); + + std::shared_ptr inode; + CURVEFS_ERROR ret = MakeNode( + req, parent, name, mode, FsFileType::TYPE_FILE, 0, false, inode); if (ret != CURVEFS_ERROR::OK) { return ret; } - return FuseOpOpen(req, e->ino, fi); + + auto openFiles = fs_->BorrowMember().openFiles; + openFiles->Open(inode->GetInodeId(), inode); + + inode->GetInodeAttr(&entryOut->attr); + return CURVEFS_ERROR::OK; } -CURVEFS_ERROR FuseVolumeClient::FuseOpMkNod(fuse_req_t req, fuse_ino_t parent, - const char *name, mode_t mode, - dev_t rdev, fuse_entry_param *e) { +CURVEFS_ERROR FuseVolumeClient::FuseOpMkNod(fuse_req_t req, + fuse_ino_t parent, + const char* name, + mode_t mode, + dev_t rdev, + EntryOut* entryOut) { VLOG(3) << "FuseOpMkNod, parent: " << parent << ", name: " << name << ", mode: " << mode << ", rdev: " << rdev; - return MakeNode(req, parent, name, mode, FsFileType::TYPE_FILE, rdev, - false, e); + + std::shared_ptr inode; + CURVEFS_ERROR rc = MakeNode(req, parent, name, mode, + FsFileType::TYPE_FILE, rdev, + false, inode); + if (rc != CURVEFS_ERROR::OK) { + return rc; + } + + InodeAttr attr; + inode->GetInodeAttr(&attr); + *entryOut = EntryOut(attr); + return CURVEFS_ERROR::OK; } -CURVEFS_ERROR FuseVolumeClient::FuseOpLink(fuse_req_t req, fuse_ino_t ino, - fuse_ino_t newparent, const char *newname, - fuse_entry_param *e) { +CURVEFS_ERROR FuseVolumeClient::FuseOpLink(fuse_req_t req, + fuse_ino_t ino, + fuse_ino_t newparent, + const char* newname, + EntryOut* entryOut) { VLOG(1) << "FuseOpLink, ino: " << ino << ", newparent: " << newparent << ", newname: " << newname; return FuseClient::FuseOpLink( - req, ino, newparent, newname, FsFileType::TYPE_FILE, e); + req, ino, newparent, newname, FsFileType::TYPE_FILE, entryOut); } CURVEFS_ERROR FuseVolumeClient::FuseOpUnlink(fuse_req_t req, fuse_ino_t parent, @@ -272,6 +297,8 @@ CURVEFS_ERROR FuseVolumeClient::FuseOpUnlink(fuse_req_t req, fuse_ino_t parent, CURVEFS_ERROR FuseVolumeClient::FuseOpFsync(fuse_req_t req, fuse_ino_t ino, int datasync, struct fuse_file_info *fi) { + (void)req; + (void)fi; VLOG(3) << "FuseOpFsync start, ino: " << ino << ", datasync: " << datasync; CURVEFS_ERROR ret = storage_->Flush(ino); @@ -298,12 +325,16 @@ CURVEFS_ERROR FuseVolumeClient::FuseOpFsync(fuse_req_t req, fuse_ino_t ino, } CURVEFS_ERROR FuseVolumeClient::Truncate(InodeWrapper *inode, uint64_t length) { + (void)inode; + (void)length; // Todo: call volume truncate return CURVEFS_ERROR::OK; } CURVEFS_ERROR FuseVolumeClient::FuseOpFlush(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { + (void)req; + (void)fi; VLOG(9) << "FuseOpFlush, ino: " << ino; CURVEFS_ERROR ret = storage_->Flush(ino); diff --git a/curvefs/src/client/fuse_volume_client.h b/curvefs/src/client/fuse_volume_client.h index f702c766b4..952c6c1981 100644 --- a/curvefs/src/client/fuse_volume_client.h +++ b/curvefs/src/client/fuse_volume_client.h @@ -65,24 +65,32 @@ class FuseVolumeClient : public FuseClient { CURVEFS_ERROR FuseOpWrite(fuse_req_t req, fuse_ino_t ino, const char *buf, size_t size, off_t off, - struct fuse_file_info *fi, size_t *wSize) override; + struct fuse_file_info *fi, FileOut* fileOut) override; CURVEFS_ERROR FuseOpRead(fuse_req_t req, fuse_ino_t ino, size_t size, off_t off, struct fuse_file_info *fi, char *buffer, size_t *rSize) override; - CURVEFS_ERROR FuseOpCreate(fuse_req_t req, fuse_ino_t parent, - const char *name, mode_t mode, struct fuse_file_info *fi, - fuse_entry_param *e) override; - CURVEFS_ERROR FuseOpMkNod(fuse_req_t req, fuse_ino_t parent, - const char *name, mode_t mode, dev_t rdev, - fuse_entry_param *e) override; + CURVEFS_ERROR FuseOpCreate(fuse_req_t req, + fuse_ino_t parent, + const char* name, + mode_t mode, + struct fuse_file_info* fi, + EntryOut* entryOut) override; + + CURVEFS_ERROR FuseOpMkNod(fuse_req_t req, + fuse_ino_t parent, + const char* name, + mode_t mode, + dev_t rdev, + EntryOut* entryOut) override; CURVEFS_ERROR FuseOpLink(fuse_req_t req, fuse_ino_t ino, - fuse_ino_t newparent, const char *newname, - fuse_entry_param *e) override; + fuse_ino_t newparent, + const char *newname, + EntryOut* entryOut) override; CURVEFS_ERROR FuseOpUnlink(fuse_req_t req, fuse_ino_t parent, const char *name) override; @@ -93,13 +101,13 @@ class FuseVolumeClient : public FuseClient { CURVEFS_ERROR FuseOpFlush(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) override; + CURVEFS_ERROR Truncate(InodeWrapper *inode, uint64_t length) override; + void SetSpaceManagerForTesting(SpaceManager *manager); void SetVolumeStorageForTesting(VolumeStorage *storage); private: - CURVEFS_ERROR Truncate(InodeWrapper *inode, uint64_t length) override; - void FlushData() override; private: diff --git a/curvefs/src/client/inode_cache_manager.cpp b/curvefs/src/client/inode_cache_manager.cpp index bc44e6b91b..42035b6604 100644 --- a/curvefs/src/client/inode_cache_manager.cpp +++ b/curvefs/src/client/inode_cache_manager.cpp @@ -29,7 +29,7 @@ #include #include #include "curvefs/proto/metaserver.pb.h" -#include "curvefs/src/client/error_code.h" +#include "curvefs/src/client/filesystem/error.h" #include "curvefs/src/client/inode_wrapper.h" using ::curvefs::metaserver::Inode; @@ -46,36 +46,11 @@ DECLARE_bool(enableCto); namespace curvefs { namespace client { +using ::curvefs::client::filesystem::ToFSError; + using NameLockGuard = ::curve::common::GenericNameLockGuard; using curvefs::client::common::FLAGS_enableCto; -class TrimICacheAsyncDone : public MetaServerClientDone { - public: - explicit TrimICacheAsyncDone( - const std::shared_ptr &inodeWrapper, - const std::shared_ptr &inodeCacheManager) - : inodeWrapper_(inodeWrapper), inodeCacheManager_(inodeCacheManager) {} - - void Run() override { - std::unique_ptr self_guard(this); - MetaStatusCode ret = GetStatusCode(); - if (ret != MetaStatusCode::OK && ret != MetaStatusCode::NOT_FOUND) { - LOG(ERROR) << "metaClient_ UpdateInode failed, " - << "MetaStatusCode: " << ret - << ", MetaStatusCode_Name: " << MetaStatusCode_Name(ret) - << ", inodeid: " << inodeWrapper_->GetInodeId(); - inodeWrapper_->MarkInodeError(); - } - VLOG(9) << "Trim inode " << inodeWrapper_->GetInodeId() - << " async success."; - inodeCacheManager_->RemoveICache(inodeWrapper_); - }; - - private: - std::shared_ptr inodeWrapper_; - std::shared_ptr inodeCacheManager_; -}; - #define GET_INODE_REMOTE(FSID, INODEID, OUT, STREAMING) \ MetaStatusCode ret = metaClient_->GetInode(FSID, INODEID, OUT, STREAMING); \ if (ret != MetaStatusCode::OK) { \ @@ -83,17 +58,7 @@ class TrimICacheAsyncDone : public MetaServerClientDone { << "metaClient_ GetInode failed, MetaStatusCode = " << ret \ << ", MetaStatusCode_Name = " << MetaStatusCode_Name(ret) \ << ", inodeid = " << INODEID; \ - return MetaStatusCodeToCurvefsErrCode(ret); \ - } - -#define PUT_INODE_CACHE(INODEID, INODEWRAPPER) \ - std::shared_ptr eliminatedOne; \ - bool eliminated = iCache_->Put(INODEID, INODEWRAPPER, &eliminatedOne); \ - if (eliminated) { \ - VLOG(3) << "GetInode eliminate one inode, ino: " \ - << eliminatedOne->GetInodeId() \ - << ", iCache does not evict inodes via put interface "; \ - assert(0); \ + return ToFSError(ret); \ } #define REFRESH_DATA_REMOTE(OUT, STREAMING) \ @@ -106,15 +71,8 @@ CURVEFS_ERROR InodeCacheManagerImpl::GetInode(uint64_t inodeId, std::shared_ptr &out) { NameLockGuard lock(nameLock_, std::to_string(inodeId)); - // get inode from cache - bool ok = iCache_->Get(inodeId, &out); - if (ok && NeedUseCache(inodeId, out, false)) { - curve::common::UniqueLock lgGuard = out->GetUniqueLock(); - if (out->GetType() == FsFileType::TYPE_FILE) { - return CURVEFS_ERROR::OK; - } - - REFRESH_DATA_REMOTE(out, out->NeedRefreshData()); + bool yes = openFiles_->IsOpened(inodeId, &out); + if (yes) { return CURVEFS_ERROR::OK; } @@ -129,24 +87,12 @@ InodeCacheManagerImpl::GetInode(uint64_t inodeId, // refresh data REFRESH_DATA_REMOTE(out, streaming); - // put to cache - PUT_INODE_CACHE(inodeId, out); - return CURVEFS_ERROR::OK; } CURVEFS_ERROR InodeCacheManagerImpl::GetInodeAttr(uint64_t inodeId, InodeAttr *out) { NameLockGuard lock(nameLock_, std::to_string(inodeId)); - // 1. find in icache - std::shared_ptr inodeWrapper; - bool ok = iCache_->Get(inodeId, &inodeWrapper); - if (ok && NeedUseCache(inodeId, inodeWrapper, true)) { - inodeWrapper->GetInodeAttr(out); - return CURVEFS_ERROR::OK; - } - - // 2. get form metaserver std::set inodeIds; std::list attrs; inodeIds.emplace(inodeId); @@ -156,7 +102,7 @@ CURVEFS_ERROR InodeCacheManagerImpl::GetInodeAttr(uint64_t inodeId, LOG(ERROR) << "metaClient BatchGetInodeAttr failed" << ", inodeId = " << inodeId << ", MetaStatusCode = " << ret << ", MetaStatusCode_Name = " << MetaStatusCode_Name(ret); - return MetaStatusCodeToCurvefsErrCode(ret); + return ToFSError(ret); } if (attrs.size() != 1) { @@ -173,21 +119,6 @@ CURVEFS_ERROR InodeCacheManagerImpl::GetInodeAttr(uint64_t inodeId, CURVEFS_ERROR InodeCacheManagerImpl::BatchGetInodeAttr( std::set *inodeIds, std::list *attrs) { - // get some inode attr in icache - for (auto iter = inodeIds->begin(); iter != inodeIds->end();) { - std::shared_ptr inodeWrapper; - NameLockGuard lock(nameLock_, std::to_string(*iter)); - bool ok = iCache_->Get(*iter, &inodeWrapper); - if (ok && NeedUseCache(*iter, inodeWrapper, true)) { - InodeAttr tmpAttr; - inodeWrapper->GetInodeAttr(&tmpAttr); - attrs->emplace_back(std::move(tmpAttr)); - iter = inodeIds->erase(iter); - continue; - } - ++iter; - } - if (inodeIds->empty()) { return CURVEFS_ERROR::OK; } @@ -199,7 +130,7 @@ CURVEFS_ERROR InodeCacheManagerImpl::BatchGetInodeAttr( << ret << ", MetaStatusCode_Name = " << MetaStatusCode_Name(ret); } - return MetaStatusCodeToCurvefsErrCode(ret); + return ToFSError(ret); } CURVEFS_ERROR InodeCacheManagerImpl::BatchGetInodeAttrAsync( @@ -207,26 +138,6 @@ CURVEFS_ERROR InodeCacheManagerImpl::BatchGetInodeAttrAsync( std::set *inodeIds, std::map *attrs) { NameLockGuard lg(asyncNameLock_, std::to_string(parentId)); - std::map cachedAttr; - bool cache = iAttrCache_->Get(parentId, &cachedAttr); - - // get some inode attr in icache - for (auto iter = inodeIds->begin(); iter != inodeIds->end();) { - std::shared_ptr inodeWrapper; - NameLockGuard lock(nameLock_, std::to_string(*iter)); - bool ok = iCache_->Get(*iter, &inodeWrapper); - if (ok && NeedUseCache(*iter, inodeWrapper, true)) { - InodeAttr tmpAttr; - inodeWrapper->GetInodeAttr(&tmpAttr); - attrs->emplace(*iter, std::move(tmpAttr)); - iter = inodeIds->erase(iter); - } else if (cache && cachedAttr.find(*iter) != cachedAttr.end()) { - attrs->emplace(*iter, cachedAttr[*iter]); - iter = inodeIds->erase(iter); - } else { - ++iter; - } - } if (inodeIds->empty()) { return CURVEFS_ERROR::OK; @@ -238,12 +149,12 @@ CURVEFS_ERROR InodeCacheManagerImpl::BatchGetInodeAttrAsync( return CURVEFS_ERROR::NOTEXIST; } + ::curve::common::Mutex mutex; std::shared_ptr cond = std::make_shared(inodeGroups.size()); for (const auto& it : inodeGroups) { VLOG(3) << "BatchGetInodeAttrAsync Send " << it.size(); - auto* done = new BatchGetInodeAttrAsyncDone(shared_from_this(), - cond, parentId); + auto* done = new BatchGetInodeAttrAsyncDone(attrs, &mutex, cond); MetaStatusCode ret = metaClient_->BatchGetInodeAttrAsync(fsId_, it, done); if (MetaStatusCode::OK != ret) { @@ -256,30 +167,12 @@ CURVEFS_ERROR InodeCacheManagerImpl::BatchGetInodeAttrAsync( // wait for all sudrequest finished cond->Wait(); - - bool ok = iAttrCache_->Get(parentId, attrs); - if (!ok) { - LOG(WARNING) << "get attrs form iAttrCache_ failed."; - } return CURVEFS_ERROR::OK; } CURVEFS_ERROR InodeCacheManagerImpl::BatchGetXAttr( std::set *inodeIds, std::list *xattr) { - // get some inode in icache - for (auto iter = inodeIds->begin(); iter != inodeIds->end();) { - std::shared_ptr inodeWrapper; - NameLockGuard lock(nameLock_, std::to_string(*iter)); - bool ok = iCache_->Get(*iter, &inodeWrapper); - if (ok && NeedUseCache(*iter, inodeWrapper, true)) { - xattr->emplace_back(inodeWrapper->GetXattr()); - iter = inodeIds->erase(iter); - } else { - ++iter; - } - } - if (inodeIds->empty()) { return CURVEFS_ERROR::OK; } @@ -290,7 +183,7 @@ CURVEFS_ERROR InodeCacheManagerImpl::BatchGetXAttr( << ret << ", MetaStatusCode_Name = " << MetaStatusCode_Name(ret); } - return MetaStatusCodeToCurvefsErrCode(ret); + return ToFSError(ret); } CURVEFS_ERROR InodeCacheManagerImpl::CreateInode( @@ -301,23 +194,11 @@ CURVEFS_ERROR InodeCacheManagerImpl::CreateInode( if (ret != MetaStatusCode::OK) { LOG(ERROR) << "metaClient_ CreateInode failed, MetaStatusCode = " << ret << ", MetaStatusCode_Name = " << MetaStatusCode_Name(ret); - return MetaStatusCodeToCurvefsErrCode(ret); + return ToFSError(ret); } - uint64_t inodeid = inode.inodeid(); out = std::make_shared(std::move(inode), metaClient_, s3ChunkInfoMetric_, option_.maxDataSize, option_.refreshDataIntervalSec); - - std::shared_ptr eliminatedOne; - bool eliminated = false; - { - NameLockGuard lock(nameLock_, std::to_string(inodeid)); - eliminated = iCache_->Put(inodeid, out, &eliminatedOne); - } - if (eliminated) { - /* iCache does not evict inodes via put interface */ - assert(0); - } return CURVEFS_ERROR::OK; } @@ -330,159 +211,29 @@ CURVEFS_ERROR InodeCacheManagerImpl::CreateManageInode( LOG(ERROR) << "metaClient_ CreateManageInode failed, MetaStatusCode = " << ret << ", MetaStatusCode_Name = " << MetaStatusCode_Name(ret); - return MetaStatusCodeToCurvefsErrCode(ret); + return ToFSError(ret); } - uint64_t inodeid = inode.inodeid(); out = std::make_shared(std::move(inode), metaClient_, s3ChunkInfoMetric_, option_.maxDataSize, option_.refreshDataIntervalSec); - - std::shared_ptr eliminatedOne; - bool eliminated = false; - { - NameLockGuard lock(nameLock_, std::to_string(inodeid)); - eliminated = iCache_->Put(inodeid, out, &eliminatedOne); - } - if (eliminated) { - /* iCache does not evict inodes via put interface */ - assert(0); - } return CURVEFS_ERROR::OK; } CURVEFS_ERROR InodeCacheManagerImpl::DeleteInode(uint64_t inodeId) { NameLockGuard lock(nameLock_, std::to_string(inodeId)); - iCache_->Remove(inodeId); MetaStatusCode ret = metaClient_->DeleteInode(fsId_, inodeId); if (ret != MetaStatusCode::OK && ret != MetaStatusCode::NOT_FOUND) { LOG(ERROR) << "metaClient_ DeleteInode failed, MetaStatusCode = " << ret << ", MetaStatusCode_Name = " << MetaStatusCode_Name(ret) << ", inodeId = " << inodeId; - return MetaStatusCodeToCurvefsErrCode(ret); + return ToFSError(ret); } - - curve::common::LockGuard lg2(dirtyMapMutex_); - dirtyMap_.erase(inodeId); return CURVEFS_ERROR::OK; } -void InodeCacheManagerImpl::AddInodeAttrs( - uint64_t parentId, const RepeatedPtrField& inodeAttrs) { - iAttrCache_->Set(parentId, inodeAttrs); -} - -void InodeCacheManagerImpl::ClearInodeCache(uint64_t inodeId) { - { - NameLockGuard lock(nameLock_, std::to_string(inodeId)); - iCache_->Remove(inodeId); - } - curve::common::LockGuard lg2(dirtyMapMutex_); - dirtyMap_.erase(inodeId); -} - void InodeCacheManagerImpl::ShipToFlush( - const std::shared_ptr &inodeWrapper) { - curve::common::LockGuard lg(dirtyMapMutex_); - dirtyMap_.emplace(inodeWrapper->GetInodeId(), inodeWrapper); -} - -void InodeCacheManagerImpl::FlushAll() { - while (!dirtyMap_.empty()) { - FlushInodeOnce(); - } -} - -void InodeCacheManagerImpl::FlushInodeOnce() { - std::map> temp_; - { - curve::common::LockGuard lg(dirtyMapMutex_); - temp_.swap(dirtyMap_); - } - for (auto it = temp_.begin(); it != temp_.end(); it++) { - curve::common::UniqueLock ulk = it->second->GetUniqueLock(); - it->second->Async(nullptr, true); - } -} - -void InodeCacheManagerImpl::ReleaseCache(uint64_t parentId) { - NameLockGuard lg(asyncNameLock_, std::to_string(parentId)); - iAttrCache_->Release(parentId); -} - -void InodeCacheManagerImpl::FlushInodeBackground() { - LOG(INFO) << "flush thread is start."; - while (!isStop_.load()) { - if (iCache_->Size() > maxCacheSize_) { - TrimIcache(iCache_->Size() - maxCacheSize_); - } - FlushInodeOnce(); - sleeper_.wait_for(std::chrono::seconds(flushPeriodSec_)); - } - LOG(INFO) << "flush thread is stop."; -} - -namespace { -// Wether an inode is dirty or not. -// if |needLock| is true, we acquire the lock firstly and do the test, -// otherwise, we assume the lock is already held. -bool IsDirtyInode(InodeWrapper *ino, bool needLock) { - auto check = [ino]() { - return ino->IsDirty() || !ino->S3ChunkInfoEmptyNolock() || - ino->GetMutableExtentCacheLocked()->HasDirtyExtents(); - }; - - if (needLock) { - auto lk = ino->GetUniqueLock(); - return check(); - } - - return check(); -} -} // namespace - -void InodeCacheManagerImpl::RemoveICache( - const std::shared_ptr &inode) { - if (!IsDirtyInode(inode.get(), true)) { - uint64_t inodeId = inode->GetInodeId(); - NameLockGuard lock(nameLock_, std::to_string(inodeId)); - ::curve::common::UniqueLock lgGuard = inode->GetUniqueLock(); - iCache_->Remove(inodeId); - } -} - -void InodeCacheManagerImpl::TrimIcache(uint64_t trimSize) { - std::shared_ptr inodeWrapper; - uint64_t inodeId; - VLOG(3) << "TrimIcache trimSize " << trimSize; - while (trimSize > 0) { - bool ok = iCache_->GetLast(&inodeId, &inodeWrapper); - if (ok) { - NameLockGuard lock(nameLock_, std::to_string(inodeId)); - ::curve::common::UniqueLock lgGuard = inodeWrapper->GetUniqueLock(); - if (IsDirtyInode(inodeWrapper.get(), false)) { - VLOG(9) << "TrimIcache sync dirty inode " << inodeId; - dirtyMapMutex_.lock(); - dirtyMap_.erase(inodeId); - dirtyMapMutex_.unlock(); - auto *done = - new TrimICacheAsyncDone(inodeWrapper, shared_from_this()); - inodeWrapper->Async(done); - } else { - VLOG(9) << "TrimIcache remove inode " << inodeId - << " from iCache"; - iCache_->Remove(inodeId); - } - trimSize--; - // remove the attr of the inode in iattrcache - auto parents = inodeWrapper->GetParentLocked(); - for (uint64_t parent : parents) { - iAttrCache_->Remove(parent, inodeId); - } - } else { - LOG(ERROR) << "icache size " << iCache_->Size(); - assert(0); - } - } + const std::shared_ptr& inode) { + deferSync_->Push(inode); } CURVEFS_ERROR @@ -519,57 +270,5 @@ InodeCacheManagerImpl::RefreshData(std::shared_ptr &inode, return rc; } -void InodeCacheManagerImpl::AddOpenedInode(uint64_t inodeId) { - VLOG(1) << "AddOpenedInode inodeId: " << inodeId; - curve::common::LockGuard lg(openInodesMutex_); - openedInodes_.emplace(inodeId); -} - -void InodeCacheManagerImpl::RemoveOpenedInode(uint64_t inodeId) { - VLOG(1) << "RemoveOpenedInode inodeId: " << inodeId; - curve::common::LockGuard lg(openInodesMutex_); - auto iter = openedInodes_.find(inodeId); - if (iter != openedInodes_.end()) { - openedInodes_.erase(iter); - } -} - -bool InodeCacheManagerImpl::OpenInodeCached(uint64_t inodeId) { - curve::common::LockGuard lg(openInodesMutex_); - auto iter = openedInodes_.find(inodeId); - return iter != openedInodes_.end(); -} - -bool InodeCacheManagerImpl::NeedUseCache(uint64_t inodeId, - const std::shared_ptr &inodeWrapper, - bool onlyAttr) { - auto lock = inodeWrapper->GetUniqueLock(); - if (onlyAttr) { - if (inodeWrapper->IsDirty()) { - return true; - } - } else { - if (IsDirtyInode(inodeWrapper.get(), false)) { - return true; - } - } - - if (((FLAGS_enableCto && OpenInodeCached(inodeId)) || !FLAGS_enableCto) - && !IsTimeOut(inodeWrapper)) { - return true; - } - return false; -} - -bool InodeCacheManagerImpl::IsTimeOut( - const std::shared_ptr &inodeWrapper) const { - uint32_t time = inodeWrapper->GetCachedTime(); - if (cacheTimeOutSec_ > 0 && - TimeUtility::GetTimeofDaySec() - time >= cacheTimeOutSec_) { - return true; - } - return false; -} - } // namespace client } // namespace curvefs diff --git a/curvefs/src/client/inode_cache_manager.h b/curvefs/src/client/inode_cache_manager.h index 0a3e44eac8..ce0ebb3d17 100644 --- a/curvefs/src/client/inode_cache_manager.h +++ b/curvefs/src/client/inode_cache_manager.h @@ -38,12 +38,14 @@ #include "curvefs/src/client/rpcclient/metaserver_client.h" #include "curvefs/proto/metaserver.pb.h" -#include "curvefs/src/client/error_code.h" +#include "curvefs/src/client/filesystem/error.h" #include "src/common/concurrent/concurrent.h" #include "src/common/interruptible_sleeper.h" #include "curvefs/src/client/inode_wrapper.h" #include "src/common/concurrent/name_lock.h" #include "curvefs/src/client/common/config.h" +#include "curvefs/src/client/filesystem/openfile.h" +#include "curvefs/src/client/filesystem/defer_sync.h" using ::curve::common::LRUCache; using ::curve::common::CacheMetrics; @@ -64,57 +66,8 @@ using rpcclient::BatchGetInodeAttrDone; using curve::common::CountDownEvent; using metric::S3ChunkInfoMetric; using common::RefreshDataOption; - -class InodeAttrCache { - public: - InodeAttrCache() {} - ~InodeAttrCache() {} - - bool Get(uint64_t parentId, std::map *imap) { - curve::common::LockGuard lg(iAttrCacheMutex_); - auto iter = iAttrCache_.find(parentId); - if (iter != iAttrCache_.end()) { - imap->insert(iter->second.begin(), iter->second.end()); - return true; - } - return false; - } - - void Set(uint64_t parentId, const RepeatedPtrField& inodeAttrs) { - curve::common::LockGuard lg(iAttrCacheMutex_); - VLOG(1) << "parentId = " << parentId - << ", iAttrCache set size = " << inodeAttrs.size(); - auto& inner = iAttrCache_[parentId]; - for (const auto &it : inodeAttrs) { - inner.emplace(it.inodeid(), it); - } - } - - void Release(uint64_t parentId) { - curve::common::LockGuard lg(iAttrCacheMutex_); - auto size = iAttrCache_.size(); - auto iter = iAttrCache_.find(parentId); - if (iter != iAttrCache_.end()) { - iAttrCache_.erase(iter); - } - VLOG(1) << "inodeId = " << parentId - << ", inode attr cache release, before = " - << size << ", after = " << iAttrCache_.size(); - } - - void Remove(uint64_t parentId, uint64_t inodeId) { - curve::common::LockGuard lg(iAttrCacheMutex_); - auto iter = iAttrCache_.find(parentId); - if (iter != iAttrCache_.end()) { - iter->second.erase(inodeId); - } - } - - private: - // inodeAttr cache; > - std::map> iAttrCache_; - curve::common::Mutex iAttrCacheMutex_; -}; +using ::curvefs::client::filesystem::OpenFiles; +using ::curvefs::client::filesystem::DeferSync; class InodeCacheManager { public: @@ -126,14 +79,9 @@ class InodeCacheManager { fsId_ = fsId; } - virtual CURVEFS_ERROR Init(uint64_t cacheSize, bool enableCacheMetrics, - uint32_t flushPeriodSec, - RefreshDataOption option, - uint32_t cacheTimeOutSec) = 0; - - virtual void Run() = 0; - - virtual void Stop() = 0; + virtual CURVEFS_ERROR Init(RefreshDataOption option, + std::shared_ptr openFiles, + std::shared_ptr deferSync) = 0; virtual CURVEFS_ERROR GetInode(uint64_t inodeId, @@ -161,24 +109,9 @@ class InodeCacheManager { virtual CURVEFS_ERROR DeleteInode(uint64_t inodeId) = 0; - virtual void AddInodeAttrs(uint64_t parentId, - const RepeatedPtrField& inodeAttrs) = 0; - - virtual void ClearInodeCache(uint64_t inodeId) = 0; - virtual void ShipToFlush( const std::shared_ptr &inodeWrapper) = 0; - virtual void FlushAll() = 0; - - virtual void FlushInodeOnce() = 0; - - virtual void ReleaseCache(uint64_t parentId) = 0; - - virtual void AddOpenedInode(uint64_t inodeId) = 0; - - virtual void RemoveOpenedInode(uint64_t inodeId) = 0; - protected: uint32_t fsId_; }; @@ -187,59 +120,22 @@ class InodeCacheManagerImpl : public InodeCacheManager, public std::enable_shared_from_this { public: InodeCacheManagerImpl() - : metaClient_(std::make_shared()), - iCache_(nullptr), - iAttrCache_(nullptr), - isStop_(true), - cacheTimeOutSec_(0) {} + : metaClient_(std::make_shared()) {} explicit InodeCacheManagerImpl( const std::shared_ptr &metaClient) - : metaClient_(metaClient), - iCache_(nullptr), - iAttrCache_(nullptr), - cacheTimeOutSec_(0) {} - - CURVEFS_ERROR Init(uint64_t cacheSize, bool enableCacheMetrics, - uint32_t flushPeriodSec, - RefreshDataOption option, - uint32_t cacheTimeOutSec) override { - if (enableCacheMetrics) { - iCache_ = std::make_shared< - LRUCache>>(0, - std::make_shared("icache")); - } else { - iCache_ = std::make_shared< - LRUCache>>(0); - } - maxCacheSize_ = cacheSize; + : metaClient_(metaClient) {} + + CURVEFS_ERROR Init(RefreshDataOption option, + std::shared_ptr openFiles, + std::shared_ptr deferSync) override { option_ = option; - flushPeriodSec_ = flushPeriodSec; - cacheTimeOutSec_ = cacheTimeOutSec; - iAttrCache_ = std::make_shared(); s3ChunkInfoMetric_ = std::make_shared(); + openFiles_ = openFiles; + deferSync_ = deferSync; return CURVEFS_ERROR::OK; } - void Run() { - isStop_.exchange(false); - flushThread_ = - Thread(&InodeCacheManagerImpl::FlushInodeBackground, this); - LOG(INFO) << "Start inodeManager flush thread ok."; - } - - void Stop() { - isStop_.exchange(true); - LOG(INFO) << "stop inodeManager flush thread ..."; - sleeper_.interrupt(); - flushThread_.join(); - } - - bool IsDirtyMapExist(uint64_t inodeId) { - curve::common::LockGuard lg(dirtyMapMutex_); - return dirtyMap_.count(inodeId) > 0; - } - CURVEFS_ERROR GetInode(uint64_t inodeId, std::shared_ptr &out) override; @@ -263,78 +159,37 @@ class InodeCacheManagerImpl : public InodeCacheManager, CURVEFS_ERROR DeleteInode(uint64_t inodeId) override; - void AddInodeAttrs(uint64_t parentId, - const RepeatedPtrField& inodeAttrs) override; - - void ClearInodeCache(uint64_t inodeId) override; - void ShipToFlush( const std::shared_ptr &inodeWrapper) override; - void FlushAll() override; - - void FlushInodeOnce() override; - - void ReleaseCache(uint64_t parentId) override; - - void RemoveICache(const std::shared_ptr &inode); - - void AddOpenedInode(uint64_t inodeId) override; - - void RemoveOpenedInode(uint64_t inodeId) override; - - bool NeedUseCache(uint64_t inodeId, - const std::shared_ptr &inodeWrapper, - bool onlyAttr); - - bool IsTimeOut(const std::shared_ptr &inodeWrapper) const; - private: - virtual void FlushInodeBackground(); - void TrimIcache(uint64_t trimSize); CURVEFS_ERROR RefreshData(std::shared_ptr &inode, // NOLINT bool streaming = true); - bool OpenInodeCached(uint64_t inodeId); private: std::shared_ptr metaClient_; - std::shared_ptr>> iCache_; std::shared_ptr s3ChunkInfoMetric_; - std::shared_ptr iAttrCache_; - - // dirty map, key is inodeid - std::map> dirtyMap_; - curve::common::Mutex dirtyMapMutex_; + std::shared_ptr openFiles_; - // record opened inode - std::multiset openedInodes_; - curve::common::Mutex openInodesMutex_; + std::shared_ptr deferSync_; curve::common::GenericNameLock nameLock_; curve::common::GenericNameLock asyncNameLock_; - uint64_t maxCacheSize_; RefreshDataOption option_; - uint32_t flushPeriodSec_; - Thread flushThread_; - InterruptibleSleeper sleeper_; - Atomic isStop_; - // cache timeout seconds, 0 means never timeout - uint32_t cacheTimeOutSec_; }; class BatchGetInodeAttrAsyncDone : public BatchGetInodeAttrDone { public: - BatchGetInodeAttrAsyncDone( - const std::shared_ptr &inodeCacheManager, - std::shared_ptr cond, - uint64_t parentId): - inodeCacheManager_(inodeCacheManager), - cond_(cond), - parentId_(parentId) {} + BatchGetInodeAttrAsyncDone(std::map* attrs, + ::curve::common::Mutex* mutex, + std::shared_ptr cond): + attrs_(attrs), + mutex_(mutex), + cond_(cond) {} + ~BatchGetInodeAttrAsyncDone() {} void Run() override { @@ -342,22 +197,25 @@ class BatchGetInodeAttrAsyncDone : public BatchGetInodeAttrDone { MetaStatusCode ret = GetStatusCode(); if (ret != MetaStatusCode::OK) { LOG(ERROR) << "BatchGetInodeAttrAsync failed, " - << "parentId = " << parentId_ << ", MetaStatusCode: " << ret << ", MetaStatusCode_Name: " << MetaStatusCode_Name(ret); } else { auto inodeAttrs = GetInodeAttrs(); VLOG(3) << "BatchGetInodeAttrAsyncDone update inodeAttrCache" << " size = " << inodeAttrs.size(); - inodeCacheManager_->AddInodeAttrs(parentId_, inodeAttrs); + + curve::common::LockGuard lk(*mutex_); + for (const auto& attr : inodeAttrs) { + attrs_->emplace(attr.inodeid(), attr); + } } cond_->Signal(); }; private: - std::shared_ptr inodeCacheManager_; + ::curve::common::Mutex* mutex_; + std::map* attrs_; std::shared_ptr cond_; - uint64_t parentId_; }; } // namespace client diff --git a/curvefs/src/client/inode_wrapper.cpp b/curvefs/src/client/inode_wrapper.cpp index 735cf51544..3a49e8ee70 100644 --- a/curvefs/src/client/inode_wrapper.cpp +++ b/curvefs/src/client/inode_wrapper.cpp @@ -33,7 +33,6 @@ #include "curvefs/proto/metaserver.pb.h" #include "curvefs/src/client/async_request_closure.h" -#include "curvefs/src/client/error_code.h" #include "curvefs/src/client/rpcclient/metaserver_client.h" #include "curvefs/src/client/rpcclient/task_excutor.h" #include "curvefs/src/client/xattr_manager.h" @@ -47,6 +46,7 @@ namespace client { using rpcclient::MetaServerClient; using rpcclient::MetaServerClientImpl; using rpcclient::DataIndices; +using ::curvefs::client::filesystem::ToFSError; bvar::Adder g_alive_inode_count{"alive_inode_count"}; @@ -193,7 +193,7 @@ CURVEFS_ERROR InodeWrapper::SyncAttr(bool internal) { << "MetaStatusCode: " << ret << ", MetaStatusCode_Name: " << MetaStatusCode_Name(ret) << ", inodeid: " << inode_.inodeid(); - return MetaStatusCodeToCurvefsErrCode(ret); + return ToFSError(ret); } dirty_ = false; @@ -213,7 +213,7 @@ CURVEFS_ERROR InodeWrapper::SyncS3ChunkInfo(bool internal) { << "MetaStatusCode: " << ret << ", MetaStatusCode_Name: " << MetaStatusCode_Name(ret) << ", inodeid: " << inode_.inodeid(); - return MetaStatusCodeToCurvefsErrCode(ret); + return ToFSError(ret); } ClearS3ChunkInfoAdd(); } @@ -294,7 +294,7 @@ CURVEFS_ERROR InodeWrapper::RefreshS3ChunkInfo() { << "MetaStatusCode: " << ret << ", MetaStatusCode_Name: " << MetaStatusCode_Name(ret) << ", inodeid: " << inode_.inodeid(); - return MetaStatusCodeToCurvefsErrCode(ret); + return ToFSError(ret); } auto before = s3ChunkInfoSize_; inode_.mutable_s3chunkinfomap()->swap(s3ChunkInfoMap); @@ -328,7 +328,7 @@ CURVEFS_ERROR InodeWrapper::Link(uint64_t parent) { <<", MetaStatusCode = " << ret << ", MetaStatusCode_Name = " << MetaStatusCode_Name(ret) << ", inodeid = " << inode_.inodeid(); - return MetaStatusCodeToCurvefsErrCode(ret); + return ToFSError(ret); } dirty_ = false; @@ -396,7 +396,7 @@ CURVEFS_ERROR InodeWrapper::UnLink(uint64_t parent) { << ", MetaStatusCode = " << ret << ", MetaStatusCode_Name = " << MetaStatusCode_Name(ret) << ", inodeid = " << inode_.inodeid(); - return MetaStatusCodeToCurvefsErrCode(ret); + return ToFSError(ret); } dirty_ = false; dirtyAttr_.Clear(); @@ -427,7 +427,7 @@ CURVEFS_ERROR InodeWrapper::UpdateParent( << ", MetaStatusCode = " << ret << ", MetaStatusCode_Name = " << MetaStatusCode_Name(ret) << ", inodeid = " << inode_.inodeid(); - return MetaStatusCodeToCurvefsErrCode(ret); + return ToFSError(ret); } dirty_ = false; dirtyAttr_.Clear(); @@ -457,7 +457,7 @@ CURVEFS_ERROR InodeWrapper::Sync(bool internal) { } void InodeWrapper::Async(MetaServerClientDone *done, bool internal) { - VLOG(3) << "async inode: " << inode_.ShortDebugString(); + VLOG(9) << "async inode: " << inode_.ShortDebugString(); switch (inode_.type()) { case FsFileType::TYPE_S3: @@ -514,7 +514,7 @@ CURVEFS_ERROR InodeWrapper::SyncS3(bool internal) { << "MetaStatusCode: " << ret << ", MetaStatusCode_Name: " << MetaStatusCode_Name(ret) << ", inodeid: " << inode_.inodeid(); - return MetaStatusCodeToCurvefsErrCode(ret); + return ToFSError(ret); } ClearS3ChunkInfoAdd(); dirty_ = false; @@ -559,6 +559,7 @@ class UpdateInodeAsyncS3Done : public MetaServerClientDone { } // namespace void InodeWrapper::AsyncS3(MetaServerClientDone *done, bool internal) { + (void)internal; if (dirty_ || !s3ChunkInfoAdd_.empty()) { LockSyncingInode(); LockSyncingS3ChunkInfo(); @@ -596,7 +597,7 @@ CURVEFS_ERROR InodeWrapper::RefreshVolumeExtent() { LOG(ERROR) << "GetVolumeExtent failed, inodeid: " << inode_.inodeid(); } - return MetaStatusCodeToCurvefsErrCode(st); + return ToFSError(st); } CURVEFS_ERROR InodeWrapper::RefreshNlink() { diff --git a/curvefs/src/client/inode_wrapper.h b/curvefs/src/client/inode_wrapper.h index 0ffcbb2889..543745751c 100644 --- a/curvefs/src/client/inode_wrapper.h +++ b/curvefs/src/client/inode_wrapper.h @@ -34,12 +34,12 @@ #include "curvefs/src/common/define.h" #include "curvefs/proto/metaserver.pb.h" -#include "curvefs/src/client/error_code.h" #include "curvefs/src/client/rpcclient/metaserver_client.h" #include "src/common/concurrent/concurrent.h" #include "curvefs/src/client/volume/extent_cache.h" #include "curvefs/src/client/metric/client_metric.h" #include "src/common/timeutility.h" +#include "curvefs/src/client/filesystem/error.h" using ::curvefs::metaserver::Inode; using ::curvefs::metaserver::S3ChunkInfoList; @@ -53,15 +53,13 @@ constexpr int kChangeTime = 1 << 1; constexpr int kModifyTime = 1 << 2; using ::curvefs::metaserver::VolumeExtentList; +using ::curvefs::client::filesystem::CURVEFS_ERROR; enum class InodeStatus { kNormal = 0, kError = -1, }; -// TODO(xuchaojie) : get from conf maybe? -const uint32_t kOptimalIOBlockSize = 0x10000u; - using rpcclient::MetaServerClient; using rpcclient::MetaServerClientImpl; using rpcclient::MetaServerClientDone; @@ -80,7 +78,7 @@ class InodeWrapper : public std::enable_shared_from_this { InodeWrapper(Inode inode, std::shared_ptr metaClient, std::shared_ptr s3ChunkInfoMetric = nullptr, - uint64_t maxDataSize = ULONG_MAX, + int64_t maxDataSize = LONG_MAX, uint32_t refreshDataInterval = UINT_MAX) : inode_(std::move(inode)), status_(InodeStatus::kNormal), @@ -400,8 +398,8 @@ class InodeWrapper : public std::enable_shared_from_this { InodeAttr dirtyAttr_; InodeStatus status_; - uint64_t baseMaxDataSize_; - uint64_t maxDataSize_; + int64_t baseMaxDataSize_; + int64_t maxDataSize_; uint32_t refreshDataInterval_; uint64_t lastRefreshTime_; diff --git a/curvefs/src/client/lease/lease_excutor.cpp b/curvefs/src/client/lease/lease_excutor.cpp index be1c962a14..08dc400c88 100644 --- a/curvefs/src/client/lease/lease_excutor.cpp +++ b/curvefs/src/client/lease/lease_excutor.cpp @@ -41,7 +41,7 @@ LeaseExecutor::~LeaseExecutor() { bool LeaseExecutor::Start() { if (opt_.leaseTimeUs <= 0 || opt_.refreshTimesPerLease <= 0) { LOG(ERROR) << "LeaseExecutor start fail. Invalid param in leaseopt, " - "leasTimesUs = " + "leaseTimeUs = " << opt_.leaseTimeUs << ", refreshTimePerLease = " << opt_.refreshTimesPerLease; return false; @@ -78,7 +78,8 @@ bool LeaseExecutor::RefreshLease() { // refresh from mds std::vector latestTxIdList; FSStatusCode ret = mdsCli_->RefreshSession(txIds, &latestTxIdList, - fsName_, mountpoint_); + fsName_, mountpoint_, + enableSumInDir_); if (ret != FSStatusCode::OK) { LOG(ERROR) << "LeaseExecutor refresh session fail, ret = " << ret << ", errorName = " << FSStatusCode_Name(ret); diff --git a/curvefs/src/client/lease/lease_excutor.h b/curvefs/src/client/lease/lease_excutor.h index 1d8a2c26ed..c54550529a 100644 --- a/curvefs/src/client/lease/lease_excutor.h +++ b/curvefs/src/client/lease/lease_excutor.h @@ -25,6 +25,7 @@ #include #include +#include #include "curvefs/src/client/rpcclient/metacache.h" #include "curvefs/src/client/rpcclient/mds_client.h" @@ -44,8 +45,10 @@ namespace client { class LeaseExecutor : public LeaseExecutorBase { public: LeaseExecutor(const LeaseOpt &opt, std::shared_ptr metaCache, - std::shared_ptr mdsCli) - : opt_(opt), metaCache_(metaCache), mdsCli_(mdsCli) {} + std::shared_ptr mdsCli, + std::atomic* enableSumInDir) + : opt_(opt), metaCache_(metaCache), mdsCli_(mdsCli), + enableSumInDir_(enableSumInDir) {} ~LeaseExecutor(); @@ -73,6 +76,7 @@ class LeaseExecutor : public LeaseExecutorBase { std::unique_ptr task_; std::string fsName_; Mountpoint mountpoint_; + std::atomic* enableSumInDir_; }; } // namespace client diff --git a/curvefs/src/client/main.cpp b/curvefs/src/client/main.cpp index 92739b18e0..e5d636b9ed 100644 --- a/curvefs/src/client/main.cpp +++ b/curvefs/src/client/main.cpp @@ -21,6 +21,8 @@ * Author: xuchaojie */ +#include + #include #include @@ -49,13 +51,11 @@ static const struct fuse_lowlevel_ops curve_ll_oper = { release : FuseOpRelease, fsync : FuseOpFsync, opendir : FuseOpOpenDir, - // TODO(wuhongsong): readdirplus is problematic, - // resulting in inconsistent metadata - // #if FUSE_VERSION >= FUSE_MAKE_VERSION(3, 0) - // readdir : 0, - // #else + #if FUSE_VERSION >= FUSE_MAKE_VERSION(3, 0) + readdir : 0, + #else readdir : FuseOpReadDir, - // #endif + #endif releasedir : FuseOpReleaseDir, fsyncdir : 0, statfs : FuseOpStatFs, @@ -79,11 +79,11 @@ static const struct fuse_lowlevel_ops curve_ll_oper = { flock : 0, fallocate : 0, #endif - // TODO(wuhongsong): The current implementation is problematic, - // resulting in inconsistent metadata - // #if FUSE_VERSION >= FUSE_MAKE_VERSION(3, 0) + #if FUSE_VERSION >= FUSE_MAKE_VERSION(3, 0) + readdirplus : FuseOpReadDirPlus, + #else readdirplus : 0, - // #endif + #endif #if FUSE_VERSION >= FUSE_MAKE_VERSION(3, 4) copy_file_range : 0, #endif @@ -107,7 +107,7 @@ void extra_options_help() { std::string match_any_pattern( const std::unordered_map& patterns, const char* src) { - int src_len = strlen(src); + size_t src_len = strlen(src); for (const auto& pair : patterns) { const auto& pattern = pair.first; if (pattern.length() < src_len && @@ -220,18 +220,18 @@ int main(int argc, char *argv[]) { fuse_daemonize(opts.foreground); - if (InitGlog(mOpts.conf, argv[0]) < 0) { - printf("Init glog failed, confpath = %s\n", mOpts.conf); + if (InitLog(mOpts.conf, argv[0]) < 0) { + printf("Init log failed, confpath = %s\n", mOpts.conf); } - ret = InitFuseClient(mOpts.conf, mOpts.fsName, mOpts.fsType, mOpts.mdsAddr); + ret = InitFuseClient(&mOpts); if (ret < 0) { - printf("init fuse client fail, conf =%s\n", mOpts.conf); + LOG(ERROR) << "init fuse client fail, conf = " << mOpts.conf; goto err_out4; } - printf("fuse start loop, singlethread = %d, max_idle_threads = %d\n", - opts.singlethread, opts.max_idle_threads); + LOG(INFO) << "fuse start loop, singlethread = " << opts.singlethread + << ", max_idle_threads = " << opts.max_idle_threads; /* Block until ctrl+c or fusermount -u */ if (opts.singlethread) { diff --git a/curvefs/src/client/metric/BUILD b/curvefs/src/client/metric/BUILD index be14a70b4c..9fc0cdd625 100644 --- a/curvefs/src/client/metric/BUILD +++ b/curvefs/src/client/metric/BUILD @@ -22,5 +22,9 @@ cc_library( hdrs = glob(["*.h"]), copts = CURVE_DEFAULT_COPTS, visibility = ["//visibility:public"], - deps = ["//external:gflags"], + deps = [ + "//external:gflags", + "//external:bvar", + "//src/client:curve_client", + ], ) diff --git a/curvefs/src/client/metric/client_metric.cpp b/curvefs/src/client/metric/client_metric.cpp new file mode 100644 index 0000000000..0cd5e8d299 --- /dev/null +++ b/curvefs/src/client/metric/client_metric.cpp @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: curve + * Created Date: Fri Apr 21 2023 + * Author: Xinlong-Chen + */ + +#include "curvefs/src/client/metric/client_metric.h" + +namespace curvefs { +namespace client { +namespace metric { + +const std::string MDSClientMetric::prefix = "curvefs_mds_client"; // NOLINT +const std::string MetaServerClientMetric::prefix = "curvefs_metaserver_client"; // NOLINT +const std::string ClientOpMetric::prefix = "curvefs_client"; // NOLINT +const std::string S3MultiManagerMetric::prefix = "curvefs_client_manager"; // NOLINT +const std::string FSMetric::prefix = "curvefs_client"; // NOLINT +const std::string S3Metric::prefix = "curvefs_s3"; // NOLINT +const std::string DiskCacheMetric::prefix = "curvefs_disk_cache"; // NOLINT +const std::string KVClientMetric::prefix = "curvefs_kvclient"; // NOLINT +const std::string S3ChunkInfoMetric::prefix = "inode_s3_chunk_info"; // NOLINT +const std::string WarmupManagerS3Metric::prefix = "curvefs_warmup"; // NOLINT + +} // namespace metric +} // namespace client +} // namespace curvefs + diff --git a/curvefs/src/client/metric/client_metric.h b/curvefs/src/client/metric/client_metric.h index d9c2cf5398..af38ceb200 100644 --- a/curvefs/src/client/metric/client_metric.h +++ b/curvefs/src/client/metric/client_metric.h @@ -36,8 +36,7 @@ namespace client { namespace metric { struct MDSClientMetric { - std::string prefix; - std::string mdsAddrs; + static const std::string prefix; InterfaceMetric mountFs; InterfaceMetric umountFs; @@ -53,11 +52,8 @@ struct MDSClientMetric { InterfaceMetric commitTx; InterfaceMetric allocOrGetMemcacheCluster; - explicit MDSClientMetric(const std::string& prefix_ = "") - : prefix(!prefix_.empty() ? prefix_ - : "curvefs_mds_client_" + - curve::common::ToHexString(this)), - mountFs(prefix, "mountFs"), + MDSClientMetric() + : mountFs(prefix, "mountFs"), umountFs(prefix, "umountFs"), getFsInfo(prefix, "getFsInfo"), getMetaServerInfo(prefix, "getMetaServerInfo"), @@ -73,7 +69,7 @@ struct MDSClientMetric { }; struct MetaServerClientMetric { - std::string prefix; + static const std::string prefix; // dentry InterfaceMetric getDentry; @@ -97,11 +93,8 @@ struct MetaServerClientMetric { InterfaceMetric updateVolumeExtent; InterfaceMetric getVolumeExtent; - explicit MetaServerClientMetric(const std::string &prefix_ = "") - : prefix(!prefix_.empty() ? prefix_ - : "curvefs_metaserver_client_" + - curve::common::ToHexString(this)), - getDentry(prefix, "getDentry"), + MetaServerClientMetric() + : getDentry(prefix, "getDentry"), listDentry(prefix, "listDentry"), createDentry(prefix, "createDentry"), deleteDentry(prefix, "deleteDentry"), @@ -142,7 +135,7 @@ struct OpMetric { }; struct ClientOpMetric { - std::string prefix; + static const std::string prefix; OpMetric opLookup; OpMetric opOpen; @@ -169,10 +162,8 @@ struct ClientOpMetric { OpMetric opWrite; - explicit ClientOpMetric(const std::string &prefix_ = "") - : prefix(!prefix_.empty() ? prefix_ - : "curvefs_client"), - opLookup(prefix, "opLookup"), + ClientOpMetric() + : opLookup(prefix, "opLookup"), opOpen(prefix, "opOpen"), opCreate(prefix, "opCreate"), opMkNod(prefix, "opMkNod"), @@ -198,7 +189,8 @@ struct ClientOpMetric { }; struct S3MultiManagerMetric { - const std::string prefix; + static const std::string prefix; + bvar::Adder fileManagerNum; bvar::Adder chunkManagerNum; bvar::Adder writeDataCacheNum; @@ -206,9 +198,7 @@ struct S3MultiManagerMetric { bvar::Adder readDataCacheNum; bvar::Adder readDataCacheByte; - explicit S3MultiManagerMetric( - const std::string &prefix_ = "curvefs_client_manager") - : prefix(prefix_) { + S3MultiManagerMetric() { fileManagerNum.expose_as(prefix, "file_manager_num"); chunkManagerNum.expose_as(prefix, "chunk_manager_num"); writeDataCacheNum.expose_as(prefix, "write_data_cache_num"); @@ -219,7 +209,7 @@ struct S3MultiManagerMetric { }; struct FSMetric { - const std::string prefix = "curvefs_client"; + static const std::string prefix; std::string fsName; @@ -238,7 +228,7 @@ struct FSMetric { }; struct S3Metric { - const std::string prefix = "curvefs_s3"; + static const std::string prefix; std::string fsName; InterfaceMetric adaptorWrite; @@ -264,7 +254,7 @@ struct S3Metric { }; struct DiskCacheMetric { - const std::string prefix = "curvefs_disk_cache"; + static const std::string prefix; std::string fsName; InterfaceMetric writeS3; @@ -278,22 +268,33 @@ struct DiskCacheMetric { }; struct KVClientMetric { - const std::string prefix = "curvefs_kvclient"; - InterfaceMetric kvClientSet; + static const std::string prefix; InterfaceMetric kvClientGet; + InterfaceMetric kvClientSet; KVClientMetric() : kvClientGet(prefix, "get"), kvClientSet(prefix, "set") {} }; struct S3ChunkInfoMetric { - const std::string prefix = "inode_s3_chunk_info"; + static const std::string prefix; bvar::Adder s3ChunkInfoSize; S3ChunkInfoMetric() : s3ChunkInfoSize(prefix, "size") {} }; +struct WarmupManagerS3Metric { + static const std::string prefix; + + InterfaceMetric warmupS3Cached; + bvar::Adder warmupS3CacheSize; + + WarmupManagerS3Metric() + : warmupS3Cached(prefix, "s3_cached"), + warmupS3CacheSize(prefix, "s3_cache_size") {} +}; + } // namespace metric } // namespace client } // namespace curvefs diff --git a/curvefs/src/client/rpcclient/mds_client.cpp b/curvefs/src/client/rpcclient/mds_client.cpp index 928fca1d62..561351d286 100644 --- a/curvefs/src/client/rpcclient/mds_client.cpp +++ b/curvefs/src/client/rpcclient/mds_client.cpp @@ -59,6 +59,8 @@ MdsClientImpl::Init(const ::curve::client::MetaServerOption &mdsOpt, FSStatusCode MdsClientImpl::MountFs(const std::string& fsName, const Mountpoint& mountPt, FsInfo* fsInfo) { auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; mdsClientMetric_.mountFs.qps.count << 1; LatencyUpdater updater(&mdsClientMetric_.mountFs.latency); MountFsResponse response; @@ -88,6 +90,8 @@ FSStatusCode MdsClientImpl::MountFs(const std::string& fsName, FSStatusCode MdsClientImpl::UmountFs(const std::string& fsName, const Mountpoint& mountPt) { auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; mdsClientMetric_.umountFs.qps.count << 1; LatencyUpdater updater(&mdsClientMetric_.umountFs.latency); UmountFsResponse response; @@ -113,6 +117,8 @@ FSStatusCode MdsClientImpl::UmountFs(const std::string& fsName, FSStatusCode MdsClientImpl::GetFsInfo(const std::string &fsName, FsInfo *fsInfo) { auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; mdsClientMetric_.getFsInfo.qps.count << 1; LatencyUpdater updater(&mdsClientMetric_.getFsInfo.latency); GetFsInfoResponse response; @@ -143,6 +149,8 @@ FSStatusCode MdsClientImpl::GetFsInfo(const std::string &fsName, FSStatusCode MdsClientImpl::GetFsInfo(uint32_t fsId, FsInfo *fsInfo) { auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; mdsClientMetric_.getFsInfo.qps.count << 1; LatencyUpdater updater(&mdsClientMetric_.getFsInfo.latency); GetFsInfoResponse response; @@ -203,6 +211,9 @@ bool MdsClientImpl::GetMetaServerInfo( ::curve::common::StringToUll(strs[1], &port); auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; + (void)addrindex; mdsClientMetric_.getMetaServerInfo.qps.count << 1; LatencyUpdater updater(&mdsClientMetric_.getMetaServerInfo.latency); GetMetaServerInfoResponse response; @@ -239,6 +250,8 @@ bool MdsClientImpl::GetMetaServerListInCopysets( const LogicPoolID &logicalpooid, const std::vector ©setidvec, std::vector> *cpinfoVec) { auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; mdsClientMetric_.getMetaServerListInCopysets.qps.count << 1; LatencyUpdater updater( &mdsClientMetric_.getMetaServerListInCopysets.latency); @@ -290,6 +303,8 @@ bool MdsClientImpl::GetMetaServerListInCopysets( bool MdsClientImpl::CreatePartition( uint32_t fsID, uint32_t count, std::vector *partitionInfos) { auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; mdsClientMetric_.createPartition.qps.count << 1; LatencyUpdater updater(&mdsClientMetric_.createPartition.latency); CreatePartitionResponse response; @@ -335,6 +350,8 @@ bool MdsClientImpl::GetCopysetOfPartitions( const std::vector &partitionIDList, std::map *copysetMap) { auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; mdsClientMetric_.getCopysetOfPartitions.qps.count << 1; LatencyUpdater updater( &mdsClientMetric_.getCopysetOfPartitions.latency); @@ -379,6 +396,8 @@ bool MdsClientImpl::GetCopysetOfPartitions( bool MdsClientImpl::ListPartition(uint32_t fsID, std::vector *partitionInfos) { auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; mdsClientMetric_.listPartition.qps.count << 1; LatencyUpdater updater(&mdsClientMetric_.listPartition.latency); ListPartitionResponse response; @@ -416,6 +435,8 @@ bool MdsClientImpl::ListPartition(uint32_t fsID, bool MdsClientImpl::AllocOrGetMemcacheCluster(uint32_t fsId, MemcacheClusterInfo* cluster) { auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; mdsClientMetric_.allocOrGetMemcacheCluster.qps.count << 1; LatencyUpdater updater( &mdsClientMetric_.allocOrGetMemcacheCluster.latency); @@ -447,6 +468,8 @@ bool MdsClientImpl::AllocOrGetMemcacheCluster(uint32_t fsId, FSStatusCode MdsClientImpl::AllocS3ChunkId(uint32_t fsId, uint32_t idNum, uint64_t *chunkId) { auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; mdsClientMetric_.allocS3ChunkId.qps.count << 1; LatencyUpdater updater(&mdsClientMetric_.allocS3ChunkId.latency); AllocateS3ChunkResponse response; @@ -478,8 +501,11 @@ FSStatusCode MdsClientImpl::RefreshSession(const std::vector &txIds, std::vector *latestTxIdList, const std::string& fsName, - const Mountpoint& mountpoint) { + const Mountpoint& mountpoint, + std::atomic* enableSumInDir) { auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; mdsClientMetric_.refreshSession.qps.count << 1; LatencyUpdater updater(&mdsClientMetric_.refreshSession.latency); RefreshSessionRequest request; @@ -507,6 +533,11 @@ MdsClientImpl::RefreshSession(const std::vector &txIds, LOG(INFO) << "RefreshSession need update partition txid list: " << response.DebugString(); } + if (enableSumInDir->load() && !response.enablesumindir()) { + enableSumInDir->store(response.enablesumindir()); + LOG(INFO) << "update enableSumInDir to " + << response.enablesumindir(); + } return ret; }; @@ -517,6 +548,8 @@ MdsClientImpl::RefreshSession(const std::vector &txIds, FSStatusCode MdsClientImpl::GetLatestTxId(const GetLatestTxIdRequest& request, GetLatestTxIdResponse* response) { auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; VLOG(3) << "GetLatestTxId [request]: " << request.DebugString(); mdsClientMetric_.getLatestTxId.qps.count << 1; LatencyUpdater updater(&mdsClientMetric_.getLatestTxId.latency); @@ -552,6 +585,8 @@ FSStatusCode MdsClientImpl::GetLatestTxId(const GetLatestTxIdRequest& request, FSStatusCode MdsClientImpl::CommitTx(const CommitTxRequest& request) { auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; VLOG(3) << "CommitTx [request]: " << request.DebugString(); mdsClientMetric_.commitTx.qps.count << 1; LatencyUpdater updater(&mdsClientMetric_.commitTx.latency); @@ -670,6 +705,8 @@ SpaceErrCode MdsClientImpl::AllocateVolumeBlockGroup( const std::string &owner, std::vector *groups) { auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; AllocateBlockGroupResponse response; mdsbasecli_->AllocateVolumeBlockGroup(fsId, count, owner, &response, cntl, channel); @@ -705,6 +742,8 @@ SpaceErrCode MdsClientImpl::AcquireVolumeBlockGroup( const std::string &owner, curvefs::mds::space::BlockGroup *groups) { auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; AcquireBlockGroupResponse response; mdsbasecli_->AcquireVolumeBlockGroup(fsId, blockGroupOffset, owner, &response, cntl, channel); @@ -730,6 +769,8 @@ SpaceErrCode MdsClientImpl::ReleaseVolumeBlockGroup( const std::string &owner, const std::vector &blockGroups) { auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; ReleaseBlockGroupResponse response; mdsbasecli_->ReleaseVolumeBlockGroup(fsId, owner, blockGroups, &response, cntl, channel); diff --git a/curvefs/src/client/rpcclient/mds_client.h b/curvefs/src/client/rpcclient/mds_client.h index 903acc78a0..3b7577c9e3 100644 --- a/curvefs/src/client/rpcclient/mds_client.h +++ b/curvefs/src/client/rpcclient/mds_client.h @@ -26,6 +26,7 @@ #include #include #include +#include #include "curvefs/proto/mds.pb.h" #include "curvefs/proto/topology.pb.h" @@ -115,7 +116,8 @@ class MdsClient { RefreshSession(const std::vector &txIds, std::vector *latestTxIdList, const std::string& fsName, - const Mountpoint& mountpoint) = 0; + const Mountpoint& mountpoint, + std::atomic* enableSumInDir) = 0; virtual FSStatusCode GetLatestTxId(uint32_t fsId, std::vector* txIds) = 0; @@ -159,8 +161,7 @@ class MdsClient { class MdsClientImpl : public MdsClient { public: - explicit MdsClientImpl(const std::string &metricPrefix = "") - : mdsClientMetric_(metricPrefix) {} + MdsClientImpl() = default; FSStatusCode Init(const ::curve::client::MetaServerOption &mdsOpt, MDSBaseClient *baseclient) override; @@ -203,7 +204,8 @@ class MdsClientImpl : public MdsClient { FSStatusCode RefreshSession(const std::vector &txIds, std::vector *latestTxIdList, const std::string& fsName, - const Mountpoint& mountpoint) override; + const Mountpoint& mountpoint, + std::atomic* enableSumInDir) override; FSStatusCode GetLatestTxId(uint32_t fsId, std::vector* txIds) override; diff --git a/curvefs/src/client/rpcclient/metacache.cpp b/curvefs/src/client/rpcclient/metacache.cpp index 4da4c32eab..b7bc02f06e 100644 --- a/curvefs/src/client/rpcclient/metacache.cpp +++ b/curvefs/src/client/rpcclient/metacache.cpp @@ -50,13 +50,11 @@ void MetaCache::GetTxId(uint32_t partitionId, uint64_t *txId) { } } -bool MetaCache::GetTxId(uint32_t fsId, - uint64_t inodeId, - uint32_t *partitionId, +bool MetaCache::GetTxId(uint32_t fsId, uint64_t inodeId, uint32_t *partitionId, uint64_t *txId) { for (const auto &partition : partitionInfos_) { - if (fsId == partition.fsid() && - inodeId >= partition.start() && inodeId <= partition.end()) { + if (fsId == partition.fsid() && inodeId >= partition.start() && + inodeId <= partition.end()) { *partitionId = partition.partitionid(); *txId = partition.txid(); GetTxId(*partitionId, txId); @@ -85,7 +83,7 @@ bool MetaCache::RefreshTxId() { return false; } - for (const auto& item : txIds) { + for (const auto &item : txIds) { SetTxId(item.partitionid(), item.txid()); } return true; @@ -215,7 +213,7 @@ bool MetaCache::GetTargetLeader(CopysetTarget *target, uint64_t *applyindex, // if cacahe do not have invalid leader, refresh leader VLOG(3) << "refresh leader for " << target->groupID.ToString(); bool ret = true; - uint32_t retry = 0; + int retry = 0; while (retry++ < metacacheopt_.metacacheGetLeaderRetry) { // refresh from metaserver ret = UpdateLeaderInternal(target->groupID, ©setInfo); @@ -272,7 +270,7 @@ bool MetaCache::CreatePartitions(int currentNum, // already create { ReadLockGuard rl(rwlock4Partitions_); - if (partitionInfos_.size() > currentNum) { + if (static_cast(partitionInfos_.size()) > currentNum) { newPartitions->reserve(partitionInfos_.size() - currentNum); newPartitions->insert(newPartitions->end(), partitionInfos_.begin() + currentNum, @@ -290,8 +288,8 @@ bool MetaCache::CreatePartitions(int currentNum, // add partition and copyset info WriteLockGuard wl4PartitionMap(rwlock4Partitions_); WriteLockGuard wl4CopysetMap(rwlock4copysetInfoMap_); - DoAddOrResetPartitionAndCopyset(*newPartitions, - std::move(copysetMap), false); + DoAddOrResetPartitionAndCopyset(*newPartitions, std::move(copysetMap), + false); return true; } @@ -476,6 +474,7 @@ void MetaCache::UpdateCopysetInfoIfMatchCurrentLeader( std::vector> metaServerInfos; bool ret = mdsClient_->GetMetaServerListInCopysets( groupID.poolID, {groupID.copysetID}, &metaServerInfos); + (void)ret; bool needUpdate = (!metaServerInfos.empty()) && (metaServerInfos[0].HasPeerInCopyset(leaderAddr)); @@ -562,7 +561,8 @@ bool MetaCache::GetCopysetInfowithCopySetID( } bool TryGetPartitionIdByInodeId(const std::vector &plist, - RWLock *lock, uint64_t inodeID, PartitionID *pid) { + RWLock *lock, uint64_t inodeID, + PartitionID *pid) { ReadLockGuard rl(*lock); for (const auto &it : plist) { if (it.start() <= inodeID && it.end() >= inodeID) { @@ -574,17 +574,17 @@ bool TryGetPartitionIdByInodeId(const std::vector &plist, } bool MetaCache::GetPartitionIdByInodeId(uint32_t fsID, uint64_t inodeID, - PartitionID *pid) { - if (!TryGetPartitionIdByInodeId(partitionInfos_, - &rwlock4Partitions_, inodeID, pid)) { + PartitionID *pid) { + if (!TryGetPartitionIdByInodeId(partitionInfos_, &rwlock4Partitions_, + inodeID, pid)) { // list form mds if (!ListPartitions(fsID)) { LOG(ERROR) << "ListPartitions for {fsid:" << fsID << "} fail, partition list not exist"; return false; } - return TryGetPartitionIdByInodeId(partitionInfos_, - &rwlock4Partitions_, inodeID, pid); + return TryGetPartitionIdByInodeId(partitionInfos_, &rwlock4Partitions_, + inodeID, pid); } return true; } diff --git a/curvefs/src/client/rpcclient/metacache.h b/curvefs/src/client/rpcclient/metacache.h index fecec3440e..66fa86ffd8 100644 --- a/curvefs/src/client/rpcclient/metacache.h +++ b/curvefs/src/client/rpcclient/metacache.h @@ -25,12 +25,13 @@ #include #include -#include +#include #include -#include #include -#include +#include +#include +#include #include "curvefs/proto/common.pb.h" #include "curvefs/src/client/common/common.h" diff --git a/curvefs/src/client/rpcclient/metaserver_client.cpp b/curvefs/src/client/rpcclient/metaserver_client.cpp index 0fc7bfb6b0..0b0ced99e7 100644 --- a/curvefs/src/client/rpcclient/metaserver_client.cpp +++ b/curvefs/src/client/rpcclient/metaserver_client.cpp @@ -42,12 +42,12 @@ using ::curve::common::StringToUl; using ::curve::common::StringToUll; -using curvefs::metaserver::GetOrModifyS3ChunkInfoRequest; -using curvefs::metaserver::GetOrModifyS3ChunkInfoResponse; using curvefs::metaserver::BatchGetInodeAttrRequest; using curvefs::metaserver::BatchGetInodeAttrResponse; using curvefs::metaserver::BatchGetXAttrRequest; using curvefs::metaserver::BatchGetXAttrResponse; +using curvefs::metaserver::GetOrModifyS3ChunkInfoRequest; +using curvefs::metaserver::GetOrModifyS3ChunkInfoResponse; namespace curvefs { namespace client { @@ -67,10 +67,10 @@ using UpdateVolumeExtentExecutor = TaskExecutor; using GetVolumeExtentExecutor = TaskExecutor; using ::curvefs::common::LatencyUpdater; -using ::curvefs::common::StreamOptions; using ::curvefs::common::StreamConnection; -using ::curvefs::metaserver::S3ChunkInfo; +using ::curvefs::common::StreamOptions; using ::curvefs::metaserver::MetaServerService_Stub; +using ::curvefs::metaserver::S3ChunkInfo; MetaStatusCode MetaServerClientImpl::Init( const ExcutorOpt &excutorOpt, const ExcutorOpt &excutorInternalOpt, @@ -86,12 +86,12 @@ MetaStatusCode MetaServerClientImpl::Init( #define RPCTask \ [&](LogicPoolID poolID, CopysetID copysetID, PartitionID partitionID, \ uint64_t txId, uint64_t applyIndex, brpc::Channel * channel, \ - brpc::Controller * cntl, TaskExecutorDone *taskExecutorDone) -> int + brpc::Controller * cntl, TaskExecutorDone * taskExecutorDone) -> int #define AsyncRPCTask \ [=](LogicPoolID poolID, CopysetID copysetID, PartitionID partitionID, \ uint64_t txId, uint64_t applyIndex, brpc::Channel * channel, \ - brpc::Controller * cntl, TaskExecutorDone *taskExecutorDone) -> int + brpc::Controller * cntl, TaskExecutorDone * taskExecutorDone) -> int class MetaServerClientRpcDoneBase : public google::protobuf::Closure { public: @@ -123,6 +123,7 @@ MetaStatusCode MetaServerClientImpl::GetDentry(uint32_t fsId, uint64_t inodeid, const std::string &name, Dentry *out) { auto task = RPCTask { + (void)taskExecutorDone; metric_.getDentry.qps.count << 1; LatencyUpdater updater(&metric_.getDentry.latency); GetDentryResponse response; @@ -183,10 +184,10 @@ MetaStatusCode MetaServerClientImpl::GetDentry(uint32_t fsId, uint64_t inodeid, MetaStatusCode MetaServerClientImpl::ListDentry(uint32_t fsId, uint64_t inodeid, const std::string &last, - uint32_t count, - bool onlyDir, + uint32_t count, bool onlyDir, std::list *dentryList) { auto task = RPCTask { + (void)taskExecutorDone; metric_.listDentry.qps.count << 1; LatencyUpdater updater(&metric_.listDentry.latency); ListDentryRequest request; @@ -217,9 +218,8 @@ MetaStatusCode MetaServerClientImpl::ListDentry(uint32_t fsId, uint64_t inodeid, MetaStatusCode ret = response.statuscode(); if (ret != MetaStatusCode::OK) { LOG(WARNING) << "ListDentry: fsId = " << fsId - << ", inodeid = " << inodeid - << ", last = " << last << ", count = " << count - << ", onlyDir = " << onlyDir + << ", inodeid = " << inodeid << ", last = " << last + << ", count = " << count << ", onlyDir = " << onlyDir << ", errcode = " << ret << ", errmsg = " << MetaStatusCode_Name(ret); } else if (response.has_appliedindex()) { @@ -230,12 +230,11 @@ MetaStatusCode MetaServerClientImpl::ListDentry(uint32_t fsId, uint64_t inodeid, for_each(dentrys.begin(), dentrys.end(), [&](Dentry &d) { dentryList->push_back(d); }); } else { - LOG(WARNING) - << "ListDentry: fsId = " << fsId << ", inodeid = " << inodeid - << ", last = " << last << ", count = " << count - << ", onlyDir = " << onlyDir - << " ok, but applyIndex not set in response:" - << response.DebugString(); + LOG(WARNING) << "ListDentry: fsId = " << fsId + << ", inodeid = " << inodeid << ", last = " << last + << ", count = " << count << ", onlyDir = " << onlyDir + << " ok, but applyIndex not set in response:" + << response.DebugString(); return -1; } @@ -254,6 +253,8 @@ MetaStatusCode MetaServerClientImpl::ListDentry(uint32_t fsId, uint64_t inodeid, MetaStatusCode MetaServerClientImpl::CreateDentry(const Dentry &dentry) { auto task = RPCTask { + (void)applyIndex; + (void)taskExecutorDone; metric_.createDentry.qps.count << 1; LatencyUpdater updater(&metric_.createDentry.latency); CreateDentryResponse response; @@ -324,6 +325,8 @@ MetaStatusCode MetaServerClientImpl::DeleteDentry(uint32_t fsId, const std::string &name, FsFileType type) { auto task = RPCTask { + (void)applyIndex; + (void)taskExecutorDone; metric_.deleteDentry.qps.count << 1; LatencyUpdater updater(&metric_.deleteDentry.latency); DeleteDentryResponse response; @@ -382,9 +385,11 @@ MetaStatusCode MetaServerClientImpl::DeleteDentry(uint32_t fsId, MetaStatusCode MetaServerClientImpl::PrepareRenameTx(const std::vector &dentrys) { auto task = RPCTask { + (void)txId; + (void)applyIndex; + (void)taskExecutorDone; metric_.prepareRenameTx.qps.count << 1; - LatencyUpdater updater( - &metric_.prepareRenameTx.latency); + LatencyUpdater updater(&metric_.prepareRenameTx.latency); PrepareRenameTxRequest request; PrepareRenameTxResponse response; request.set_poolid(poolID); @@ -433,8 +438,10 @@ MetaServerClientImpl::PrepareRenameTx(const std::vector &dentrys) { } MetaStatusCode MetaServerClientImpl::GetInode(uint32_t fsId, uint64_t inodeid, - Inode *out, bool* streaming) { + Inode *out, bool *streaming) { auto task = RPCTask { + (void)txId; + (void)taskExecutorDone; metric_.getInode.qps.count << 1; LatencyUpdater updater(&metric_.getInode.latency); GetInodeRequest request; @@ -493,8 +500,7 @@ MetaStatusCode MetaServerClientImpl::GetInode(uint32_t fsId, uint64_t inodeid, } bool GroupInodeIdByPartition( - uint32_t fsId, - std::shared_ptr metaCache, + uint32_t fsId, std::shared_ptr metaCache, const std::set &inodeIds, std::unordered_map> *inodeGroups) { for (const auto &it : inodeIds) { @@ -516,8 +522,7 @@ bool GroupInodeIdByPartition( } bool MetaServerClientImpl::SplitRequestInodes( - uint32_t fsId, - const std::set &inodeIds, + uint32_t fsId, const std::set &inodeIds, std::vector> *inodeGroups) { std::unordered_map> groups; bool ret = GroupInodeIdByPartition(fsId, metaCache_, inodeIds, &groups); @@ -552,7 +557,7 @@ void BatchGetInodeAttrRpcDone::Run() { std::unique_ptr self_guard(this); brpc::ClosureGuard done_guard(done_); auto taskCtx = done_->GetTaskExcutor()->GetTaskCxt(); - auto& cntl = taskCtx->cntl_; + auto &cntl = taskCtx->cntl_; auto metaCache = done_->GetTaskExcutor()->GetMetaCache(); if (cntl.Failed()) { metric_->batchGetInodeAttr.eps.count << 1; @@ -560,7 +565,7 @@ void BatchGetInodeAttrRpcDone::Run() { << cntl.ErrorCode() << ", error content: " << cntl.ErrorText() << ", log id: " << cntl.log_id(); - done_->SetRetCode(-cntl.ErrorCode()); + done_->SetRetCode(-cntl.ErrorCode()); return; } @@ -583,14 +588,15 @@ void BatchGetInodeAttrRpcDone::Run() { VLOG(6) << "batchGetInodeAttr done, " << "response: " << response.DebugString(); done_->SetRetCode(ret); - dynamic_cast(done_) - ->SetInodeAttrs(response.attr()); + dynamic_cast(done_)->SetInodeAttrs( + response.attr()); return; } -MetaStatusCode MetaServerClientImpl::BatchGetInodeAttr(uint32_t fsId, - const std::set &inodeIds, - std::list *attr) { +MetaStatusCode +MetaServerClientImpl::BatchGetInodeAttr(uint32_t fsId, + const std::set &inodeIds, + std::list *attr) { // group inodeid by partition and batchlimit std::vector> inodeGroups; if (!SplitRequestInodes(fsId, inodeIds, &inodeGroups)) { @@ -605,6 +611,8 @@ MetaStatusCode MetaServerClientImpl::BatchGetInodeAttr(uint32_t fsId, } uint64_t inodeId = *it.begin(); auto task = RPCTask { + (void)txId; + (void)taskExecutorDone; metric_.batchGetInodeAttr.qps.count << 1; LatencyUpdater updater(&metric_.batchGetInodeAttr.latency); BatchGetInodeAttrRequest request; @@ -614,7 +622,7 @@ MetaStatusCode MetaServerClientImpl::BatchGetInodeAttr(uint32_t fsId, request.set_partitionid(partitionID); request.set_fsid(fsId); request.set_appliedindex(applyIndex); - *request.mutable_inodeid() = { it.begin(), it.end() }; + *request.mutable_inodeid() = {it.begin(), it.end()}; curvefs::metaserver::MetaServerService_Stub stub(channel); stub.BatchGetInodeAttr(cntl, &request, &response, nullptr); @@ -633,14 +641,13 @@ MetaStatusCode MetaServerClientImpl::BatchGetInodeAttr(uint32_t fsId, LOG(WARNING) << "BatchGetInodeAttr failed, errcode = " << ret << ", errmsg = " << MetaStatusCode_Name(ret); } else if (response.attr_size() > 0 && - response.has_appliedindex()) { + response.has_appliedindex()) { auto *attrs = response.mutable_attr(); attr->insert(attr->end(), std::make_move_iterator(attrs->begin()), std::make_move_iterator(attrs->end())); - metaCache_->UpdateApplyIndex( - CopysetGroupID(poolID, copysetID), - response.appliedindex()); + metaCache_->UpdateApplyIndex(CopysetGroupID(poolID, copysetID), + response.appliedindex()); } else { LOG(WARNING) << "BatchGetInodeAttr ok, but" << " applyIndex or attr not set in response: " @@ -651,8 +658,8 @@ MetaStatusCode MetaServerClientImpl::BatchGetInodeAttr(uint32_t fsId, }; auto taskCtx = std::make_shared( MetaServerOpType::BatchGetInodeAttr, task, fsId, inodeId); - BatchGetInodeAttrExcutor excutor( - opt_, metaCache_, channelManager_, std::move(taskCtx)); + BatchGetInodeAttrExcutor excutor(opt_, metaCache_, channelManager_, + std::move(taskCtx)); auto ret = ConvertToMetaStatusCode(excutor.DoRPCTask()); if (ret != MetaStatusCode::OK) { attr->clear(); @@ -662,14 +669,16 @@ MetaStatusCode MetaServerClientImpl::BatchGetInodeAttr(uint32_t fsId, return MetaStatusCode::OK; } -MetaStatusCode MetaServerClientImpl::BatchGetInodeAttrAsync(uint32_t fsId, - const std::vector &inodeIds, MetaServerClientDone *done) { +MetaStatusCode MetaServerClientImpl::BatchGetInodeAttrAsync( + uint32_t fsId, const std::vector &inodeIds, + MetaServerClientDone *done) { if (inodeIds.empty()) { done->Run(); return MetaStatusCode::OK; } auto task = AsyncRPCTask { + (void)txId; metric_.batchGetInodeAttr.qps.count << 1; LatencyUpdater updater(&metric_.batchGetInodeAttr.latency); BatchGetInodeAttrRequest request; @@ -679,26 +688,27 @@ MetaStatusCode MetaServerClientImpl::BatchGetInodeAttrAsync(uint32_t fsId, request.set_partitionid(partitionID); request.set_fsid(fsId); request.set_appliedindex(applyIndex); - *request.mutable_inodeid() = { inodeIds.begin(), inodeIds.end() }; - auto *rpcDone = new BatchGetInodeAttrRpcDone(taskExecutorDone, - &metric_); + *request.mutable_inodeid() = {inodeIds.begin(), inodeIds.end()}; + auto *rpcDone = + new BatchGetInodeAttrRpcDone(taskExecutorDone, &metric_); curvefs::metaserver::MetaServerService_Stub stub(channel); stub.BatchGetInodeAttr(cntl, &request, &rpcDone->response, rpcDone); return MetaStatusCode::OK; }; auto taskCtx = std::make_shared( MetaServerOpType::BatchGetInodeAttr, task, fsId, *inodeIds.begin()); - auto excutor = std::make_shared(opt_, - metaCache_, channelManager_, std::move(taskCtx)); - TaskExecutorDone *taskDone = new BatchGetInodeAttrTaskExecutorDone( - excutor, done); + auto excutor = std::make_shared( + opt_, metaCache_, channelManager_, std::move(taskCtx)); + TaskExecutorDone *taskDone = + new BatchGetInodeAttrTaskExecutorDone(excutor, done); excutor->DoAsyncRPCTask(taskDone); return MetaStatusCode::OK; } -MetaStatusCode MetaServerClientImpl::BatchGetXAttr(uint32_t fsId, - const std::set &inodeIds, - std::list *xattr) { +MetaStatusCode +MetaServerClientImpl::BatchGetXAttr(uint32_t fsId, + const std::set &inodeIds, + std::list *xattr) { // group inodeid by partition and batchlimit std::vector> inodeGroups; if (!SplitRequestInodes(fsId, inodeIds, &inodeGroups)) { @@ -714,6 +724,8 @@ MetaStatusCode MetaServerClientImpl::BatchGetXAttr(uint32_t fsId, uint64_t inodeId = *it.begin(); auto task = RPCTask { + (void)txId; + (void)taskExecutorDone; metric_.batchGetXattr.qps.count << 1; LatencyUpdater updater(&metric_.batchGetXattr.latency); BatchGetXAttrRequest request; @@ -723,17 +735,17 @@ MetaStatusCode MetaServerClientImpl::BatchGetXAttr(uint32_t fsId, request.set_partitionid(partitionID); request.set_fsid(fsId); request.set_appliedindex(applyIndex); - *request.mutable_inodeid() = { it.begin(), it.end() }; + *request.mutable_inodeid() = {it.begin(), it.end()}; curvefs::metaserver::MetaServerService_Stub stub(channel); stub.BatchGetXAttr(cntl, &request, &response, nullptr); if (cntl->Failed()) { metric_.batchGetXattr.eps.count << 1; - LOG(WARNING) << "BatchGetXAttr Failed, errorcode = " - << cntl->ErrorCode() - << ", error content:" << cntl->ErrorText() - << ", log id = " << cntl->log_id(); + LOG(WARNING) + << "BatchGetXAttr Failed, errorcode = " << cntl->ErrorCode() + << ", error content:" << cntl->ErrorText() + << ", log id = " << cntl->log_id(); return -cntl->ErrorCode(); } @@ -742,14 +754,13 @@ MetaStatusCode MetaServerClientImpl::BatchGetXAttr(uint32_t fsId, LOG(WARNING) << "BatchGetXAttr failed, errcode = " << ret << ", errmsg = " << MetaStatusCode_Name(ret); } else if (response.xattr_size() > 0 && - response.has_appliedindex()) { + response.has_appliedindex()) { auto *xattrs = response.mutable_xattr(); xattr->insert(xattr->end(), std::make_move_iterator(xattrs->begin()), std::make_move_iterator(xattrs->end())); - metaCache_->UpdateApplyIndex( - CopysetGroupID(poolID, copysetID), - response.appliedindex()); + metaCache_->UpdateApplyIndex(CopysetGroupID(poolID, copysetID), + response.appliedindex()); } else { LOG(WARNING) << "BatchGetXAttr ok, but" << " applyIndex or attr not set in response: " @@ -760,8 +771,8 @@ MetaStatusCode MetaServerClientImpl::BatchGetXAttr(uint32_t fsId, }; auto taskCtx = std::make_shared( MetaServerOpType::BatchGetInodeAttr, task, fsId, inodeId); - BatchGetInodeAttrExcutor excutor( - opt_, metaCache_, channelManager_, std::move(taskCtx)); + BatchGetInodeAttrExcutor excutor(opt_, metaCache_, channelManager_, + std::move(taskCtx)); auto ret = ConvertToMetaStatusCode(excutor.DoRPCTask()); if (ret != MetaStatusCode::OK) { xattr->clear(); @@ -775,6 +786,9 @@ MetaStatusCode MetaServerClientImpl::UpdateInode(const UpdateInodeRequest &request, bool internal) { auto task = RPCTask { + (void)txId; + (void)applyIndex; + (void)taskExecutorDone; metric_.updateInode.qps.count << 1; LatencyUpdater updater(&metric_.updateInode.latency); @@ -831,18 +845,15 @@ MetaServerClientImpl::UpdateInode(const UpdateInodeRequest &request, namespace { -#define SET_REQUEST_FIELD_IF_HAS(request, attr, field) \ - do { \ - if ((attr).has_##field()) { \ - (request)->set_##field((attr).field()); \ - } \ +#define SET_REQUEST_FIELD_IF_HAS(request, attr, field) \ + do { \ + if ((attr).has_##field()) { \ + (request)->set_##field((attr).field()); \ + } \ } while (false) -void FillInodeAttr(uint32_t fsId, - uint64_t inodeId, - const InodeAttr& attr, - bool nlink, - UpdateInodeRequest* request) { +void FillInodeAttr(uint32_t fsId, uint64_t inodeId, const InodeAttr &attr, + bool nlink, UpdateInodeRequest *request) { request->set_fsid(fsId); request->set_inodeid(inodeId); @@ -869,7 +880,7 @@ void FillInodeAttr(uint32_t fsId, #undef SET_REQUEST_FIELD_IF_HAS -void FillDataIndices(DataIndices&& indices, UpdateInodeRequest* request) { +void FillDataIndices(DataIndices &&indices, UpdateInodeRequest *request) { if (indices.s3ChunkInfoMap && !indices.s3ChunkInfoMap->empty()) { *request->mutable_s3chunkinfoadd() = std::move(indices.s3ChunkInfoMap.value()); @@ -883,24 +894,19 @@ void FillDataIndices(DataIndices&& indices, UpdateInodeRequest* request) { } // namespace -MetaStatusCode MetaServerClientImpl::UpdateInodeAttr( - uint32_t fsId, - uint64_t inodeId, - const InodeAttr& attr) { +MetaStatusCode MetaServerClientImpl::UpdateInodeAttr(uint32_t fsId, + uint64_t inodeId, + const InodeAttr &attr) { UpdateInodeRequest request; FillInodeAttr(fsId, inodeId, attr, /*nlink=*/true, &request); return UpdateInode(request); } MetaStatusCode MetaServerClientImpl::UpdateInodeAttrWithOutNlink( - uint32_t fsId, - uint64_t inodeId, - const InodeAttr& attr, - S3ChunkInfoMap* s3ChunkInfoAdd, - bool internal) { + uint32_t fsId, uint64_t inodeId, const InodeAttr &attr, + S3ChunkInfoMap *s3ChunkInfoAdd, bool internal) { UpdateInodeRequest request; - FillInodeAttr(fsId, inodeId, attr, /*nlink=*/false, - &request); + FillInodeAttr(fsId, inodeId, attr, /*nlink=*/false, &request); if (s3ChunkInfoAdd != nullptr) { DataIndices indices; indices.s3ChunkInfoMap = *s3ChunkInfoAdd; @@ -921,30 +927,27 @@ void UpdateInodeRpcDone::Run() { std::unique_ptr self_guard(this); brpc::ClosureGuard done_guard(done_); auto taskCtx = done_->GetTaskExcutor()->GetTaskCxt(); - auto& cntl = taskCtx->cntl_; + auto &cntl = taskCtx->cntl_; auto metaCache = done_->GetTaskExcutor()->GetMetaCache(); if (cntl.Failed()) { metric_->updateInode.eps.count << 1; - LOG(WARNING) << "UpdateInode Failed, errorcode = " - << cntl.ErrorCode() + LOG(WARNING) << "UpdateInode Failed, errorcode = " << cntl.ErrorCode() << ", error content: " << cntl.ErrorText() << ", log id: " << cntl.log_id(); - done_->SetRetCode(-cntl.ErrorCode()); + done_->SetRetCode(-cntl.ErrorCode()); return; } MetaStatusCode ret = response.statuscode(); if (ret != MetaStatusCode::OK) { - LOG(WARNING) << "UpdateInode: inodeid = " - << taskCtx->inodeID + LOG(WARNING) << "UpdateInode: inodeid = " << taskCtx->inodeID << ", errcode = " << ret << ", errmsg = " << MetaStatusCode_Name(ret); } else if (response.has_appliedindex()) { metaCache->UpdateApplyIndex(taskCtx->target.groupID, - response.appliedindex()); + response.appliedindex()); } else { - LOG(WARNING) << "UpdateInode: inodeid = " - << taskCtx->inodeID + LOG(WARNING) << "UpdateInode: inodeid = " << taskCtx->inodeID << "ok, but applyIndex not set in response:" << response.DebugString(); done_->SetRetCode(-1); @@ -957,8 +960,10 @@ void UpdateInodeRpcDone::Run() { } void MetaServerClientImpl::UpdateInodeAsync(const UpdateInodeRequest &request, - MetaServerClientDone *done) { + MetaServerClientDone *done) { auto task = AsyncRPCTask { + (void)txId; + (void)applyIndex; metric_.updateInode.qps.count << 1; UpdateInodeRequest req = request; @@ -974,28 +979,24 @@ void MetaServerClientImpl::UpdateInodeAsync(const UpdateInodeRequest &request, auto taskCtx = std::make_shared( MetaServerOpType::UpdateInode, task, request.fsid(), request.inodeid()); - auto excutor = std::make_shared(opt_, - metaCache_, channelManager_, std::move(taskCtx)); - TaskExecutorDone *taskDone = new TaskExecutorDone( - excutor, done); + auto excutor = std::make_shared( + opt_, metaCache_, channelManager_, std::move(taskCtx)); + TaskExecutorDone *taskDone = new TaskExecutorDone(excutor, done); excutor->DoAsyncRPCTask(taskDone); } void MetaServerClientImpl::UpdateInodeWithOutNlinkAsync( - uint32_t fsId, - uint64_t inodeId, - const InodeAttr& attr, - MetaServerClientDone* done, - DataIndices&& indices) { + uint32_t fsId, uint64_t inodeId, const InodeAttr &attr, + MetaServerClientDone *done, DataIndices &&indices) { UpdateInodeRequest request; FillInodeAttr(fsId, inodeId, attr, /*nlink=*/false, &request); FillDataIndices(std::move(indices), &request); UpdateInodeAsync(request, done); } -bool MetaServerClientImpl::ParseS3MetaStreamBuffer(butil::IOBuf* buffer, - uint64_t* chunkIndex, - S3ChunkInfoList* list) { +bool MetaServerClientImpl::ParseS3MetaStreamBuffer(butil::IOBuf *buffer, + uint64_t *chunkIndex, + S3ChunkInfoList *list) { butil::IOBuf out; std::string delim = ":"; if (buffer->cut_until(&out, delim) != 0) { @@ -1012,16 +1013,16 @@ bool MetaServerClientImpl::ParseS3MetaStreamBuffer(butil::IOBuf* buffer, return true; } -bool MetaServerClientImpl::HandleS3MetaStreamBuffer(butil::IOBuf* buffer, - S3ChunkInfoMap* out) { +bool MetaServerClientImpl::HandleS3MetaStreamBuffer(butil::IOBuf *buffer, + S3ChunkInfoMap *out) { uint64_t chunkIndex; S3ChunkInfoList list; if (!ParseS3MetaStreamBuffer(buffer, &chunkIndex, &list)) { return false; } - auto merge = [](S3ChunkInfoList* from, S3ChunkInfoList* to) { - for (size_t i = 0; i < from->s3chunks_size(); i++) { + auto merge = [](S3ChunkInfoList *from, S3ChunkInfoList *to) { + for (int i = 0; i < from->s3chunks_size(); i++) { auto chunkinfo = to->add_s3chunks(); *chunkinfo = std::move(*from->mutable_s3chunks(i)); } @@ -1042,6 +1043,9 @@ MetaStatusCode MetaServerClientImpl::GetOrModifyS3ChunkInfo( bool returnS3ChunkInfoMap, google::protobuf::Map *out, bool internal) { auto task = RPCTask { + (void)txId; + (void)applyIndex; + (void)taskExecutorDone; metric_.appendS3ChunkInfo.qps.count << 1; LatencyUpdater updater(&metric_.appendS3ChunkInfo.latency); GetOrModifyS3ChunkInfoRequest request; @@ -1064,7 +1068,7 @@ MetaStatusCode MetaServerClientImpl::GetOrModifyS3ChunkInfo( streamClient_.Close(connection); } }); - auto receiveCallback = [&](butil::IOBuf* buffer) { + auto receiveCallback = [&](butil::IOBuf *buffer) { return HandleS3MetaStreamBuffer(buffer, out); }; if (returnS3ChunkInfoMap) { @@ -1090,8 +1094,7 @@ MetaStatusCode MetaServerClientImpl::GetOrModifyS3ChunkInfo( MetaStatusCode ret = response.statuscode(); if (ret != MetaStatusCode::OK) { LOG(WARNING) << "GetOrModifyS3ChunkInfo, inodeId: " << inodeId - << ", fsId: " << fsId - << ", errorcode: " << ret + << ", fsId: " << fsId << ", errorcode: " << ret << ", errmsg: " << MetaStatusCode_Name(ret); return ret; } else if (response.has_appliedindex()) { @@ -1120,17 +1123,17 @@ MetaStatusCode MetaServerClientImpl::GetOrModifyS3ChunkInfo( }; bool streaming = returnS3ChunkInfoMap; - auto taskCtx = std::make_shared( - MetaServerOpType::GetOrModifyS3ChunkInfo, - task, fsId, inodeId, streaming); + auto taskCtx = + std::make_shared(MetaServerOpType::GetOrModifyS3ChunkInfo, + task, fsId, inodeId, streaming); ExcutorOpt opt; if (internal) { opt = optInternal_; } else { opt = opt_; } - GetOrModifyS3ChunkInfoExcutor excutor( - opt, metaCache_, channelManager_, std::move(taskCtx)); + GetOrModifyS3ChunkInfoExcutor excutor(opt, metaCache_, channelManager_, + std::move(taskCtx)); return ConvertToMetaStatusCode(excutor.DoRPCTask()); } @@ -1146,7 +1149,7 @@ void GetOrModifyS3ChunkInfoRpcDone::Run() { std::unique_ptr self_guard(this); brpc::ClosureGuard done_guard(done_); auto taskCtx = done_->GetTaskExcutor()->GetTaskCxt(); - auto& cntl = taskCtx->cntl_; + auto &cntl = taskCtx->cntl_; auto metaCache = done_->GetTaskExcutor()->GetMetaCache(); if (cntl.Failed()) { metric_->appendS3ChunkInfo.eps.count << 1; @@ -1160,21 +1163,17 @@ void GetOrModifyS3ChunkInfoRpcDone::Run() { MetaStatusCode ret = response.statuscode(); if (ret != MetaStatusCode::OK) { - LOG(WARNING) << "GetOrModifyS3ChunkInfo, inodeId: " - << taskCtx->inodeID - << ", fsId: " << taskCtx->fsID - << ", errorcode: " << ret + LOG(WARNING) << "GetOrModifyS3ChunkInfo, inodeId: " << taskCtx->inodeID + << ", fsId: " << taskCtx->fsID << ", errorcode: " << ret << ", errmsg: " << MetaStatusCode_Name(ret); done_->SetRetCode(ret); return; } else if (response.has_appliedindex()) { metaCache->UpdateApplyIndex(taskCtx->target.groupID, - response.appliedindex()); + response.appliedindex()); } else { - LOG(WARNING) << "GetOrModifyS3ChunkInfo, inodeId: " - << taskCtx->inodeID - << ", fsId: " - << taskCtx->fsID + LOG(WARNING) << "GetOrModifyS3ChunkInfo, inodeId: " << taskCtx->inodeID + << ", fsId: " << taskCtx->fsID << "ok, but applyIndex or inode not set in response: " << response.DebugString(); done_->SetRetCode(-1); @@ -1188,10 +1187,11 @@ void GetOrModifyS3ChunkInfoRpcDone::Run() { void MetaServerClientImpl::GetOrModifyS3ChunkInfoAsync( uint32_t fsId, uint64_t inodeId, - const google::protobuf::Map< - uint64_t, S3ChunkInfoList> &s3ChunkInfos, + const google::protobuf::Map &s3ChunkInfos, MetaServerClientDone *done) { auto task = AsyncRPCTask { + (void)txId; + (void)applyIndex; metric_.appendS3ChunkInfo.qps.count << 1; GetOrModifyS3ChunkInfoRequest request; @@ -1207,23 +1207,25 @@ void MetaServerClientImpl::GetOrModifyS3ChunkInfoAsync( new GetOrModifyS3ChunkInfoRpcDone(taskExecutorDone, &metric_); curvefs::metaserver::MetaServerService_Stub stub(channel); - stub.GetOrModifyS3ChunkInfo( - cntl, &request, &rpcDone->response, rpcDone); + stub.GetOrModifyS3ChunkInfo(cntl, &request, &rpcDone->response, + rpcDone); return MetaStatusCode::OK; }; auto taskCtx = std::make_shared( MetaServerOpType::GetOrModifyS3ChunkInfo, task, fsId, inodeId); - auto excutor = std::make_shared(opt_, - metaCache_, channelManager_, std::move(taskCtx)); - TaskExecutorDone *taskDone = new TaskExecutorDone( - excutor, done); + auto excutor = std::make_shared( + opt_, metaCache_, channelManager_, std::move(taskCtx)); + TaskExecutorDone *taskDone = new TaskExecutorDone(excutor, done); excutor->DoAsyncRPCTask(taskDone); } MetaStatusCode MetaServerClientImpl::CreateInode(const InodeParam ¶m, Inode *out) { auto task = RPCTask { + (void)txId; + (void)applyIndex; + (void)taskExecutorDone; metric_.createInode.qps.count << 1; LatencyUpdater updater(&metric_.createInode.latency); CreateInodeResponse response; @@ -1242,7 +1244,7 @@ MetaStatusCode MetaServerClientImpl::CreateInode(const InodeParam ¶m, request.set_parent(param.parent); struct timespec now; clock_gettime(CLOCK_REALTIME, &now); - Time* tm = new Time(); + Time *tm = new Time(); tm->set_sec(now.tv_sec); tm->set_nsec(now.tv_nsec); request.set_allocated_create(tm); @@ -1295,6 +1297,9 @@ MetaStatusCode MetaServerClientImpl::CreateInode(const InodeParam ¶m, MetaStatusCode MetaServerClientImpl::CreateManageInode(const InodeParam ¶m, Inode *out) { auto task = RPCTask { + (void)txId; + (void)applyIndex; + (void)taskExecutorDone; metric_.createInode.qps.count << 1; LatencyUpdater updater(&metric_.createInode.latency); CreateManageInodeResponse response; @@ -1349,7 +1354,7 @@ MetaStatusCode MetaServerClientImpl::CreateManageInode(const InodeParam ¶m, }; auto taskCtx = std::make_shared( - MetaServerOpType::CreateManageInode, task, param.fsId, 0); + MetaServerOpType::CreateManageInode, task, param.fsId, 0); CreateInodeExcutor excutor(opt_, metaCache_, channelManager_, std::move(taskCtx)); return ConvertToMetaStatusCode(excutor.DoRPCTask()); @@ -1358,6 +1363,9 @@ MetaStatusCode MetaServerClientImpl::CreateManageInode(const InodeParam ¶m, MetaStatusCode MetaServerClientImpl::DeleteInode(uint32_t fsId, uint64_t inodeid) { auto task = RPCTask { + (void)txId; + (void)applyIndex; + (void)taskExecutorDone; metric_.deleteInode.qps.count << 1; LatencyUpdater updater(&metric_.deleteInode.latency); DeleteInodeResponse response; @@ -1421,7 +1429,7 @@ void UpdateVolumeExtentRpcDone::Run() { auto taskCtx = done_->GetTaskExcutor()->GetTaskCxt(); auto metaCache = done_->GetTaskExcutor()->GetMetaCache(); - auto& cntl = taskCtx->cntl_; + auto &cntl = taskCtx->cntl_; if (cntl.Failed()) { metric_->updateVolumeExtent.eps.count << 1; @@ -1447,21 +1455,21 @@ void UpdateVolumeExtentRpcDone::Run() { done_->SetRetCode(st); } -#define SET_COMMON_FIELDS \ - do { \ - request.set_poolid(poolID); \ - request.set_copysetid(copysetID); \ - request.set_partitionid(partitionID); \ - request.set_fsid(fsId); \ - request.set_inodeid(inodeId); \ +#define SET_COMMON_FIELDS \ + do { \ + request.set_poolid(poolID); \ + request.set_copysetid(copysetID); \ + request.set_partitionid(partitionID); \ + request.set_fsid(fsId); \ + request.set_inodeid(inodeId); \ } while (0) void MetaServerClientImpl::AsyncUpdateVolumeExtent( - uint32_t fsId, - uint64_t inodeId, - const VolumeExtentList &extents, + uint32_t fsId, uint64_t inodeId, const VolumeExtentList &extents, MetaServerClientDone *done) { auto task = AsyncRPCTask { + (void)txId; + (void)applyIndex; metric_.updateVolumeExtent.qps.count << 1; metaserver::UpdateVolumeExtentRequest request; SET_COMMON_FIELDS; @@ -1503,11 +1511,10 @@ struct ParseVolumeExtentCallBack { } // namespace -MetaStatusCode MetaServerClientImpl::GetVolumeExtent( - uint32_t fsId, - uint64_t inodeId, - bool streaming, - VolumeExtentList *extents) { +MetaStatusCode +MetaServerClientImpl::GetVolumeExtent(uint32_t fsId, uint64_t inodeId, + bool streaming, + VolumeExtentList *extents) { auto task = RPCTask { (void)txId; (void)applyIndex; @@ -1589,8 +1596,9 @@ MetaStatusCode MetaServerClientImpl::GetVolumeExtent( return ConvertToMetaStatusCode(executor.DoRPCTask()); } -MetaStatusCode MetaServerClientImpl::GetInodeAttr( - uint32_t fsId, uint64_t inodeid, InodeAttr *attr) { +MetaStatusCode MetaServerClientImpl::GetInodeAttr(uint32_t fsId, + uint64_t inodeid, + InodeAttr *attr) { std::set inodeIds; inodeIds.insert(inodeid); std::list attrs; diff --git a/curvefs/src/client/rpcclient/metaserver_client.h b/curvefs/src/client/rpcclient/metaserver_client.h index de899a39b6..419ed34943 100644 --- a/curvefs/src/client/rpcclient/metaserver_client.h +++ b/curvefs/src/client/rpcclient/metaserver_client.h @@ -172,8 +172,7 @@ class MetaServerClient { class MetaServerClientImpl : public MetaServerClient { public: - explicit MetaServerClientImpl(const std::string &metricPrefix = "") - : metric_(metricPrefix) {} + MetaServerClientImpl() = default; MetaStatusCode Init(const ExcutorOpt &excutorOpt, const ExcutorOpt &excutorInternalOpt, diff --git a/curvefs/src/client/s3/client_s3_adaptor.cpp b/curvefs/src/client/s3/client_s3_adaptor.cpp index 0126ceac65..c640945311 100644 --- a/curvefs/src/client/s3/client_s3_adaptor.cpp +++ b/curvefs/src/client/s3/client_s3_adaptor.cpp @@ -42,7 +42,6 @@ S3ClientAdaptorImpl::Init( std::shared_ptr diskCacheManagerImpl, std::shared_ptr kvClientManager, bool startBackGround) { - pendingReq_ = 0; blockSize_ = option.blockSize; chunkSize_ = option.chunkSize; pageSize_ = option.pageSize; @@ -52,7 +51,6 @@ S3ClientAdaptorImpl::Init( << blockSize_; return CURVEFS_ERROR::INVALIDPARAM; } - fuseMaxSize_ = option.fuseMaxSize; prefetchBlocks_ = option.prefetchBlocks; prefetchExecQueueNum_ = option.prefetchExecQueueNum; diskCacheType_ = option.diskCacheOpt.diskCacheType; @@ -62,6 +60,7 @@ S3ClientAdaptorImpl::Init( chunkFlushThreads_ = option.chunkFlushThreads; maxReadRetryIntervalMs_ = option.maxReadRetryIntervalMs; readRetryIntervalMs_ = option.readRetryIntervalMs; + objectPrefix_ = option.objectPrefix; client_ = client; inodeManager_ = inodeManager; mdsClient_ = mdsClient; @@ -99,6 +98,7 @@ S3ClientAdaptorImpl::Init( << ", flushIntervalSec: " << option.flushIntervalSec << ", writeCacheMaxByte: " << option.writeCacheMaxByte << ", readCacheMaxByte: " << option.readCacheMaxByte + << ", readCacheThreads: " << option.readCacheThreads << ", nearfullRatio: " << option.nearfullRatio << ", baseSleepUs: " << option.baseSleepUs; // start chunk flush threads @@ -111,30 +111,25 @@ int S3ClientAdaptorImpl::Write(uint64_t inodeId, uint64_t offset, VLOG(6) << "write start offset:" << offset << ", len:" << length << ", fsId:" << fsId_ << ", inodeId:" << inodeId; uint64_t start = butil::cpuwide_time_us(); - FileCacheManagerPtr fileCacheManager = - fsCacheManager_->FindOrCreateFileCacheManager(fsId_, inodeId); { - std::lock_guard lockguard(ioMtx_); - pendingReq_.fetch_add(1, std::memory_order_seq_cst); - VLOG(6) << "pendingReq_ is: " << pendingReq_; - uint64_t pendingReq = pendingReq_.load(std::memory_order_seq_cst); + std::lock_guard lockGuard(ioMtx_); fsCacheManager_->DataCacheByteInc(length); uint64_t size = fsCacheManager_->GetDataCacheSize(); - uint64_t maxSize = fsCacheManager_->GetDataCacheMaxSize(); - if ((size + pendingReq * fuseMaxSize_) >= maxSize) { + const uint64_t maxSize = fsCacheManager_->GetDataCacheMaxSize(); + if (size >= maxSize) { VLOG(6) << "write cache is full, wait flush. size: " << size - << ", maxSize:" << maxSize; + << ", maxSize: " << maxSize; // offer to do flush waitInterval_.StopWait(); fsCacheManager_->WaitFlush(); } } - uint64_t memCacheRatio = fsCacheManager_->MemCacheRatio(); + const uint64_t memCacheRatio = fsCacheManager_->MemCacheRatio(); int64_t exceedRatio = memCacheRatio - memCacheNearfullRatio_; if (exceedRatio > 0) { // offer to do flush waitInterval_.StopWait(); - // upload to s3 derectly or cache disk full + // upload to s3 directly or cache disk full bool needSleep = (DisableDiskCache() || IsReadCache()) || (IsReadWriteCache() && diskCacheManagerImpl_->IsDiskCacheFull()); @@ -145,15 +140,15 @@ int S3ClientAdaptorImpl::Write(uint64_t inodeId, uint64_t offset, << memCacheRatio << ", exponent is: " << exponent; } } + FileCacheManagerPtr fileCacheManager = + fsCacheManager_->FindOrCreateFileCacheManager(fsId_, inodeId); int ret = fileCacheManager->Write(offset, length, buf); - pendingReq_.fetch_sub(1, std::memory_order_seq_cst); fsCacheManager_->DataCacheByteDec(length); - if (s3Metric_.get() != nullptr) { + if (s3Metric_ != nullptr) { CollectMetrics(&s3Metric_->adaptorWrite, ret, start); s3Metric_->writeSize.set_value(length); } - VLOG(6) << "write end inodeId:" << inodeId << ",ret:" << ret - << ", pendingReq_ is: " << pendingReq_; + VLOG(6) << "write end inodeId: " << inodeId << ", ret: " << ret; return ret; } @@ -332,6 +327,7 @@ int S3ClientAdaptorImpl::Stop() { int S3ClientAdaptorImpl::ExecAsyncDownloadTask( void *meta, bthread::TaskIterator &iter) { // NOLINT + (void)meta; if (iter.is_queue_stopped()) { return 0; } diff --git a/curvefs/src/client/s3/client_s3_adaptor.h b/curvefs/src/client/s3/client_s3_adaptor.h index 80e57b2b4b..b8ff81dc82 100644 --- a/curvefs/src/client/s3/client_s3_adaptor.h +++ b/curvefs/src/client/s3/client_s3_adaptor.h @@ -33,7 +33,7 @@ #include "curvefs/proto/metaserver.pb.h" #include "curvefs/src/client/common/common.h" #include "curvefs/src/client/common/config.h" -#include "curvefs/src/client/error_code.h" +#include "curvefs/src/client/filesystem/error.h" #include "curvefs/src/client/inode_cache_manager.h" #include "curvefs/src/client/rpcclient/mds_client.h" #include "curvefs/src/client/s3/client_s3.h" @@ -98,6 +98,7 @@ class S3ClientAdaptor { virtual std::shared_ptr GetS3Client() = 0; virtual uint64_t GetBlockSize() = 0; virtual uint64_t GetChunkSize() = 0; + virtual uint32_t GetObjectPrefix() = 0; virtual bool HasDiskCache() = 0; }; @@ -150,6 +151,10 @@ class S3ClientAdaptorImpl : public S3ClientAdaptor { uint64_t GetChunkSize() { return chunkSize_; } + uint32_t GetObjectPrefix() { + return objectPrefix_; + } + std::shared_ptr GetFsCacheManager() { return fsCacheManager_; } @@ -244,7 +249,6 @@ class S3ClientAdaptorImpl : public S3ClientAdaptor { std::shared_ptr client_; uint64_t blockSize_; uint64_t chunkSize_; - uint32_t fuseMaxSize_; uint32_t prefetchBlocks_; uint32_t prefetchExecQueueNum_; std::string allocateServerEps_; @@ -254,6 +258,7 @@ class S3ClientAdaptorImpl : public S3ClientAdaptor { uint32_t throttleBaseSleepUs_; uint32_t maxReadRetryIntervalMs_; uint32_t readRetryIntervalMs_; + uint32_t objectPrefix_; Thread bgFlushThread_; std::atomic toStop_; std::mutex mtx_; @@ -264,7 +269,6 @@ class S3ClientAdaptorImpl : public S3ClientAdaptor { std::shared_ptr inodeManager_; std::shared_ptr diskCacheManagerImpl_; DiskCacheType diskCacheType_; - std::atomic pendingReq_; std::shared_ptr mdsClient_; uint32_t fsId_; std::string fsName_; diff --git a/curvefs/src/client/s3/client_s3_cache_manager.cpp b/curvefs/src/client/s3/client_s3_cache_manager.cpp index aa47d8ee67..125334b135 100644 --- a/curvefs/src/client/s3/client_s3_cache_manager.cpp +++ b/curvefs/src/client/s3/client_s3_cache_manager.cpp @@ -23,7 +23,10 @@ #include "curvefs/src/client/s3/client_s3_cache_manager.h" #include +#include #include +#include "absl/synchronization/blocking_counter.h" +#include "absl/cleanup/cleanup.h" #include "curvefs/src/client/s3/client_s3_adaptor.h" #include "curvefs/src/common/s3util.h" @@ -46,11 +49,11 @@ namespace curvefs { namespace client { void FsCacheManager::DataCacheNumInc() { - g_s3MultiManagerMetric->writeDataCacheNum << 1; - VLOG(9) << "DataCacheNumInc() v: 1,wDataCacheNum:" - << wDataCacheNum_.load(std::memory_order_relaxed); - wDataCacheNum_.fetch_add(1, std::memory_order_relaxed); - } + g_s3MultiManagerMetric->writeDataCacheNum << 1; + VLOG(9) << "DataCacheNumInc() v: 1,wDataCacheNum:" + << wDataCacheNum_.load(std::memory_order_relaxed); + wDataCacheNum_.fetch_add(1, std::memory_order_relaxed); +} void FsCacheManager::DataCacheNumFetchSub(uint64_t v) { g_s3MultiManagerMetric->writeDataCacheNum << -1 * v; @@ -96,7 +99,7 @@ FsCacheManager::FindOrCreateFileCacheManager(uint64_t fsId, uint64_t inodeId) { } FileCacheManagerPtr fileCacheManager = std::make_shared( - fsId, inodeId, s3ClientAdaptor_, kvClientManager_); + fsId, inodeId, s3ClientAdaptor_, kvClientManager_, readTaskPool_); auto ret = fileCacheManagerMap_.emplace(inodeId, fileCacheManager); g_s3MultiManagerMetric->fileManagerNum << 1; assert(ret.second); @@ -137,7 +140,7 @@ bool FsCacheManager::Set(DataCachePtr dataCache, while (lruByte_ >= readCacheMaxByte_) { --iter; - auto& trim = *iter; + auto &trim = *iter; trim->SetReadCacheState(false); lruByte_ -= trim->GetActualLen(); retiredBytes += trim->GetActualLen(); @@ -278,8 +281,7 @@ void FileCacheManager::WriteChunk(uint64_t index, uint64_t chunkPos, chunkCacheManager->WriteNewDataCache(s3ClientAdaptor_, chunkPos, writeLen, dataBuf); } - VLOG(9) << "WriteChunk end, index: " << index - << ", chunkPos: " << chunkPos; + VLOG(9) << "WriteChunk end, index: " << index << ", chunkPos: " << chunkPos; return; } @@ -359,11 +361,10 @@ void FileCacheManager::ReadFromMemCache( *actualReadLen = dataBufferOffset; - VLOG_IF(3, memCacheMissRequest->empty()) - << "greate! memory cache all hit."; + VLOG_IF(3, memCacheMissRequest->empty()) << "great! memory cache all hit."; } -int FileCacheManager::GenerateKVReuqest( +int FileCacheManager::GenerateKVRequest( const std::shared_ptr &inodeWrapper, const std::vector &readRequest, char *dataBuf, std::vector *kvRequest) { @@ -398,8 +399,7 @@ int FileCacheManager::GenerateKVReuqest( } int FileCacheManager::HandleReadS3NotExist( - int ret, uint32_t retry, - const std::shared_ptr &inodeWrapper) { + uint32_t retry, const std::shared_ptr &inodeWrapper) { uint32_t maxIntervalMs = s3ClientAdaptor_->GetMaxReadRetryIntervalMs(); // hardcode, fixme uint32_t retryIntervalMs = s3ClientAdaptor_->GetReadRetryIntervalMs(); @@ -426,7 +426,7 @@ int FileCacheManager::HandleReadS3NotExist( } int FileCacheManager::Read(uint64_t inodeId, uint64_t offset, uint64_t length, - char *dataBuf) { + char *dataBuf) { // 1. read from memory cache uint64_t actualReadLen = 0; std::vector memCacheMissRequest; @@ -448,33 +448,27 @@ int FileCacheManager::Read(uint64_t inodeId, uint64_t offset, uint64_t length, uint32_t retry = 0; do { // generate kv request - std::vector kvRequest; - GenerateKVReuqest(inodeWrapper, memCacheMissRequest, dataBuf, - &kvRequest); + std::vector kvRequests; + GenerateKVRequest(inodeWrapper, memCacheMissRequest, dataBuf, + &kvRequests); // read from kv cluster (localcache -> remote kv cluster -> s3) // localcache/remote kv cluster fail will not return error code. // Failure to read from s3 will eventually return failure. - int ret = ReadKVRequest(kvRequest, dataBuf, inodeWrapper->GetLength()); - if (ret >= 0) { - // read ok + ReadStatus ret = + ReadKVRequest(kvRequests, dataBuf, inodeWrapper->GetLength()); + if (ret == ReadStatus::OK) { break; - } else if (ret == -2) { - // TODO(@anybody): ret should replace the current number with a - // meaningful error code - // read from s3 not exist - // 1. may be the metaserver compaction update inode is not - // synchronized to the client. clear inodecache && get agin - // 2. if it returns -2 multiple times, it may be the data of a - // client has not been flushed back to s3, and need keep - // retrying. - if (0 != HandleReadS3NotExist(ret, retry++, inodeWrapper)) { + } + + if (ret == ReadStatus::S3_NOT_EXIST) { + if (0 != HandleReadS3NotExist(retry++, inodeWrapper)) { return -1; } } else { LOG(INFO) << "read inode = " << inodeId - << " from s3 failed, ret = " << ret; - return ret; + << " from s3 failed, ret = " << static_cast(ret); + return static_cast(ret); } } while (1); @@ -482,9 +476,9 @@ int FileCacheManager::Read(uint64_t inodeId, uint64_t offset, uint64_t length, } bool FileCacheManager::ReadKVRequestFromLocalCache(const std::string &name, - char *databuf, - uint64_t offset, - uint64_t len) { + char *databuf, + uint64_t offset, + uint64_t len) { uint64_t start = butil::cpuwide_time_us(); bool mayCached = s3ClientAdaptor_->HasDiskCache() && @@ -499,7 +493,7 @@ bool FileCacheManager::ReadKVRequestFromLocalCache(const std::string &name, return false; } - if (s3ClientAdaptor_->s3Metric_.get()) { + if (s3ClientAdaptor_->s3Metric_) { s3ClientAdaptor_->CollectMetrics( &s3ClientAdaptor_->s3Metric_->adaptorReadS3, len, start); } @@ -517,6 +511,7 @@ bool FileCacheManager::ReadKVRequestFromRemoteCache(const std::string &name, auto task = std::make_shared(name, databuf, offset, length); CountDownEvent event(1); task->done = [&](const std::shared_ptr &task) { + (void)task; event.Signal(); return; }; @@ -535,7 +530,9 @@ bool FileCacheManager::ReadKVRequestFromS3(const std::string &name, if (*ret < 0) { LOG(ERROR) << "object " << name << " read from s3 fail, ret = " << *ret; return false; - } else if (s3ClientAdaptor_->s3Metric_.get()) { + } + + if (s3ClientAdaptor_->s3Metric_) { s3ClientAdaptor_->CollectMetrics( &s3ClientAdaptor_->s3Metric_->adaptorReadS3, length, start); } @@ -543,103 +540,140 @@ bool FileCacheManager::ReadKVRequestFromS3(const std::string &name, return true; } -int FileCacheManager::ReadKVRequest( - const std::vector &kvRequests, char *dataBuf, - uint64_t fileLen) { - - for (auto req = kvRequests.begin(); req != kvRequests.end(); req++) { - VLOG(6) << "read from kv request " << req->DebugString(); +FileCacheManager::ReadStatus +FileCacheManager::ReadKVRequest(const std::vector &kvRequests, + char *dataBuf, uint64_t fileLen) { + absl::BlockingCounter counter(kvRequests.size()); + std::once_flag cancelFlag; + std::atomic isCanceled{false}; + std::atomic retCode{0}; - uint64_t chunkIndex = 0, chunkPos = 0, blockIndex = 0, blockPos = 0; - uint64_t chunkSize = s3ClientAdaptor_->GetChunkSize(); - uint64_t blockSize = s3ClientAdaptor_->GetBlockSize(); - GetBlockLoc(req->offset, &chunkIndex, &chunkPos, &blockIndex, - &blockPos); + for (const auto &req : kvRequests) { + readTaskPool_->Enqueue([&]() { + auto defer = absl::MakeCleanup([&]() { counter.DecrementCount(); }); + if (isCanceled) { + LOG(WARNING) << "kv request is canceled " << req.DebugString(); + return; + } + ProcessKVRequest(req, dataBuf, fileLen, cancelFlag, isCanceled, + retCode); + }); + } - // prefetch - if (s3ClientAdaptor_->HasDiskCache()) { - PrefetchForBlock(*req, fileLen, blockSize, chunkSize, blockIndex); - } + counter.Wait(); + return toReadStatus(retCode.load()); +} + +void FileCacheManager::ProcessKVRequest(const S3ReadRequest &req, char *dataBuf, + uint64_t fileLen, + std::once_flag &cancelFlag, + std::atomic &isCanceled, + std::atomic &retCode) { + VLOG(6) << "read from kv request " << req.DebugString(); + uint64_t chunkIndex = 0; + uint64_t chunkPos = 0; + uint64_t blockIndex = 0; + uint64_t blockPos = 0; + const uint64_t chunkSize = s3ClientAdaptor_->GetChunkSize(); + const uint64_t blockSize = s3ClientAdaptor_->GetBlockSize(); + const uint32_t objectPrefix = s3ClientAdaptor_->GetObjectPrefix(); + GetBlockLoc(req.offset, &chunkIndex, &chunkPos, &blockIndex, &blockPos); + + // prefetch + if (s3ClientAdaptor_->HasDiskCache()) { + PrefetchForBlock(req, fileLen, blockSize, chunkSize, blockIndex); + } + + // read request + // |--------------------------------|----------------------------------| + // 0 blockSize 2*blockSize + // blockPos length + blockPos + // |-------------------------| + // |--------------| + // currentReadLen + uint64_t length = req.len; + uint64_t currentReadLen = 0; + uint64_t readBufOffset = 0; + uint64_t objectOffset = req.objectOffset; - // read request - // |--------------------------------|----------------------------------| - // 0 blockSize 2*blockSize - // blockPos length + blockPos - // |-------------------------| - // |--------------| - // currentReadLen - uint64_t length = req->len; - uint64_t currentReadLen = 0; - uint64_t readBufOffset = 0; - uint64_t objectOffset = req->objectOffset; + while (length > 0) { + currentReadLen = + length + blockPos > blockSize ? blockSize - blockPos : length; + assert(blockPos >= objectOffset); + std::string name = curvefs::common::s3util::GenObjName( + req.chunkId, blockIndex, req.compaction, req.fsId, req.inodeId, + objectPrefix); + char *currentBuf = dataBuf + req.readOffset + readBufOffset; - while (length > 0) { - int ret = 0; - currentReadLen = - length + blockPos > blockSize ? blockSize - blockPos : length; - assert(blockPos >= objectOffset); - std::string name = curvefs::common::s3util::GenObjName( - req->chunkId, blockIndex, req->compaction, req->fsId, - req->inodeId); - char *currentBuf = dataBuf + req->readOffset + readBufOffset; - - // read from localcache -> remotecache -> s3 + // read from localcache -> remotecache -> s3 + do { if (ReadKVRequestFromLocalCache(name, currentBuf, blockPos - objectOffset, currentReadLen)) { VLOG(9) << "read " << name << " from local cache ok"; - } else if (ReadKVRequestFromRemoteCache(name, currentBuf, - blockPos - objectOffset, - currentReadLen)) { + break; + } + + if (ReadKVRequestFromRemoteCache(name, currentBuf, + blockPos - objectOffset, + currentReadLen)) { VLOG(9) << "read " << name << " from remote cache ok"; - } else if (ReadKVRequestFromS3(name, currentBuf, - blockPos - objectOffset, - currentReadLen, &ret)) { - VLOG(9) << "read " << name << " from s3 ok"; - } else { - LOG(ERROR) << "read " << name << " fail"; - return ret; + break; } - // update param - { - length -= currentReadLen; // Remaining read data length - readBufOffset += currentReadLen; // next read offset - blockIndex++; - blockPos = (blockPos + currentReadLen) % blockSize; - objectOffset = 0; + int ret = 0; + if (ReadKVRequestFromS3(name, currentBuf, blockPos - objectOffset, + currentReadLen, &ret)) { + VLOG(9) << "read " << name << " from s3 ok"; + break; } - } - // add data to memory read cache - if (!curvefs::client::common::FLAGS_enableCto) { - auto chunkCacheManager = FindOrCreateChunkCacheManager(chunkIndex); - WriteLockGuard writeLockGuard(chunkCacheManager->rwLockChunk_); - DataCachePtr dataCache = std::make_shared( - s3ClientAdaptor_, chunkCacheManager, chunkPos, req->len, - dataBuf + req->readOffset, kvClientManager_); - chunkCacheManager->AddReadDataCache(dataCache); + LOG(ERROR) << "read " << name << " fail"; + // make sure variable is set only once + std::call_once(cancelFlag, [&]() { + isCanceled.store(true); + retCode.store(ret); + }); + return; + } while (false); + + // update param + { + length -= currentReadLen; // Remaining read data length + readBufOffset += currentReadLen; // next read offset + blockIndex++; + blockPos = (blockPos + currentReadLen) % blockSize; + objectOffset = 0; } } - return 0; + // add data to memory read cache + if (!curvefs::client::common::FLAGS_enableCto) { + auto chunkCacheManager = FindOrCreateChunkCacheManager(chunkIndex); + WriteLockGuard writeLockGuard(chunkCacheManager->rwLockChunk_); + DataCachePtr dataCache = std::make_shared( + s3ClientAdaptor_, chunkCacheManager, chunkPos, req.len, + dataBuf + req.readOffset, kvClientManager_); + chunkCacheManager->AddReadDataCache(dataCache); + } } void FileCacheManager::PrefetchForBlock(const S3ReadRequest &req, - uint64_t fileLen, uint64_t blockSize, - uint64_t chunkSize, - uint64_t startBlockIndex) { + uint64_t fileLen, uint64_t blockSize, + uint64_t chunkSize, + uint64_t startBlockIndex) { uint32_t prefetchBlocks = s3ClientAdaptor_->GetPrefetchBlocks(); + uint32_t objectPrefix = s3ClientAdaptor_->GetObjectPrefix(); std::vector> prefetchObjs; uint64_t blockIndex = startBlockIndex; for (uint32_t i = 0; i < prefetchBlocks; i++) { std::string name = curvefs::common::s3util::GenObjName( - req.chunkId, blockIndex, req.compaction, req.fsId, req.inodeId); + req.chunkId, blockIndex, req.compaction, + req.fsId, req.inodeId, objectPrefix); uint64_t maxReadLen = (blockIndex + 1) * blockSize; - uint64_t needReadLen = maxReadLen > fileLen - ? fileLen - blockIndex * blockSize - : blockSize; + uint64_t needReadLen = + maxReadLen > fileLen ? fileLen - blockIndex * blockSize : blockSize; prefetchObjs.push_back(std::make_pair(name, needReadLen)); @@ -660,7 +694,7 @@ class AsyncPrefetchCallback { void operator()(const S3Adapter *, const std::shared_ptr &context) { VLOG(9) << "prefetch end: " << context->key << ", len " << context->len - << "actual len: " << context->actualLen; + << "actual len: " << context->actualLen; std::unique_ptr guard(context->buf); auto fileCache = s3Client_->GetFsCacheManager()->FindFileCacheManager(inode_); @@ -680,8 +714,8 @@ class AsyncPrefetchCallback { int ret = s3Client_->GetDiskCacheManager()->WriteReadDirect( context->key, context->buf, context->actualLen); if (ret < 0) { - LOG_EVERY_SECOND(INFO) << - "write read directly failed, key: " << context->key; + LOG_EVERY_SECOND(INFO) + << "write read directly failed, key: " << context->key; } { curve::common::LockGuard lg(fileCache->downloadMtx_); @@ -979,7 +1013,7 @@ void FileCacheManager::ReleaseCache() { WriteLockGuard writeLockGuard(rwLock_); uint64_t chunNum = chunkCacheMap_.size(); - for (auto& chunk : chunkCacheMap_) { + for (auto &chunk : chunkCacheMap_) { chunk.second->ReleaseCache(); } @@ -1013,6 +1047,7 @@ void FileCacheManager::TruncateCache(uint64_t offset, uint64_t fileSize) { } CURVEFS_ERROR FileCacheManager::Flush(bool force, bool toS3) { + (void)toS3; // Todo: concurrent flushes within one file // instead of multiple file flushes may be better CURVEFS_ERROR ret = CURVEFS_ERROR::OK; @@ -1085,9 +1120,10 @@ CURVEFS_ERROR FileCacheManager::Flush(bool force, bool toS3) { } void ChunkCacheManager::ReadChunk(uint64_t index, uint64_t chunkPos, - uint64_t readLen, char *dataBuf, - uint64_t dataBufOffset, - std::vector *requests) { + uint64_t readLen, char *dataBuf, + uint64_t dataBufOffset, + std::vector *requests) { + (void)index; ReadLockGuard readLockGuard(rwLockChunk_); std::vector cacheMissWriteRequests, cacheMissFlushDataRequest; // read by write cache @@ -1100,9 +1136,8 @@ void ChunkCacheManager::ReadChunk(uint64_t index, uint64_t chunkPos, // read by flushing data cache for (auto request : cacheMissWriteRequests) { std::vector tmpRequests; - ReadByFlushData(request.chunkPos, request.len, - dataBuf, request.bufOffset, - &tmpRequests); + ReadByFlushData(request.chunkPos, request.len, dataBuf, + request.bufOffset, &tmpRequests); cacheMissFlushDataRequest.insert(cacheMissFlushDataRequest.end(), tmpRequests.begin(), tmpRequests.end()); @@ -1112,9 +1147,8 @@ void ChunkCacheManager::ReadChunk(uint64_t index, uint64_t chunkPos, // read by read cache for (auto request : cacheMissFlushDataRequest) { std::vector tmpRequests; - ReadByReadCache(request.chunkPos, request.len, - dataBuf, request.bufOffset, - &tmpRequests); + ReadByReadCache(request.chunkPos, request.len, dataBuf, + request.bufOffset, &tmpRequests); requests->insert(requests->end(), tmpRequests.begin(), tmpRequests.end()); } @@ -1125,11 +1159,10 @@ void ChunkCacheManager::ReadChunk(uint64_t index, uint64_t chunkPos, // read by read cache for (auto request : cacheMissWriteRequests) { std::vector tmpRequests; - ReadByReadCache(request.chunkPos, request.len, - dataBuf, request.bufOffset, - &tmpRequests); + ReadByReadCache(request.chunkPos, request.len, dataBuf, + request.bufOffset, &tmpRequests); requests->insert(requests->end(), tmpRequests.begin(), - tmpRequests.end()); + tmpRequests.end()); } return; @@ -1205,8 +1238,8 @@ void ChunkCacheManager::ReadByWriteCache(uint64_t chunkPos, uint64_t readLen, --------- DataCache */ if (chunkPos + readLen <= dcChunkPos + dcLen) { - iter->second->CopyDataCacheToBuf( - chunkPos - dcChunkPos, readLen, dataBuf + dataBufOffset); + iter->second->CopyDataCacheToBuf(chunkPos - dcChunkPos, readLen, + dataBuf + dataBufOffset); readLen = 0; break; /* @@ -1215,8 +1248,8 @@ void ChunkCacheManager::ReadByWriteCache(uint64_t chunkPos, uint64_t readLen, */ } else { iter->second->CopyDataCacheToBuf(chunkPos - dcChunkPos, - dcChunkPos + dcLen - chunkPos, - dataBuf + dataBufOffset); + dcChunkPos + dcLen - chunkPos, + dataBuf + dataBufOffset); readLen = chunkPos + readLen - dcChunkPos - dcLen; dataBufOffset = dcChunkPos + dcLen - chunkPos + dataBufOffset; chunkPos = dcChunkPos + dcLen; @@ -1311,7 +1344,7 @@ void ChunkCacheManager::ReadByReadCache(uint64_t chunkPos, uint64_t readLen, */ if (chunkPos + readLen <= dcChunkPos + dcLen) { dataCache->CopyDataCacheToBuf(chunkPos - dcChunkPos, readLen, - dataBuf + dataBufOffset); + dataBuf + dataBufOffset); readLen = 0; break; /* @@ -1320,8 +1353,8 @@ void ChunkCacheManager::ReadByReadCache(uint64_t chunkPos, uint64_t readLen, */ } else { dataCache->CopyDataCacheToBuf(chunkPos - dcChunkPos, - dcChunkPos + dcLen - chunkPos, - dataBuf + dataBufOffset); + dcChunkPos + dcLen - chunkPos, + dataBuf + dataBufOffset); readLen = chunkPos + readLen - dcChunkPos - dcLen; dataBufOffset = dcChunkPos + dcLen - chunkPos + dataBufOffset; chunkPos = dcChunkPos + dcLen; @@ -1343,8 +1376,8 @@ void ChunkCacheManager::ReadByReadCache(uint64_t chunkPos, uint64_t readLen, } void ChunkCacheManager::ReadByFlushData(uint64_t chunkPos, uint64_t readLen, - char *dataBuf, uint64_t dataBufOffset, - std::vector *requests) { + char *dataBuf, uint64_t dataBufOffset, + std::vector *requests) { uint64_t dcChunkPos = flushingDataCache_->GetChunkPos(); uint64_t dcLen = flushingDataCache_->GetLen(); ReadRequest request; @@ -1402,9 +1435,9 @@ void ChunkCacheManager::ReadByFlushData(uint64_t chunkPos, uint64_t readLen, --------- DataCache */ } else { - flushingDataCache_->CopyDataCacheToBuf(chunkPos - dcChunkPos, - dcChunkPos + dcLen - chunkPos, - dataBuf + dataBufOffset); + flushingDataCache_->CopyDataCacheToBuf( + chunkPos - dcChunkPos, dcChunkPos + dcLen - chunkPos, + dataBuf + dataBufOffset); readLen = chunkPos + readLen - dcChunkPos - dcLen; dataBufOffset = dcChunkPos + dcLen - chunkPos + dataBufOffset; chunkPos = dcChunkPos + dcLen; @@ -1522,18 +1555,15 @@ void ChunkCacheManager::AddReadDataCache(DataCachePtr dataCache) { uint64_t actualLen = (*dcpIter)->GetActualLen(); if (s3ClientAdaptor_->GetFsCacheManager()->Delete(dcpIter)) { g_s3MultiManagerMetric->readDataCacheNum << -1; - g_s3MultiManagerMetric->readDataCacheByte - << -1 * actualLen; + g_s3MultiManagerMetric->readDataCacheByte << -1 * actualLen; dataRCacheMap_.erase(iter); } } std::list::iterator outIter; - bool ret = - s3ClientAdaptor_->GetFsCacheManager()->Set(dataCache, &outIter); + bool ret = s3ClientAdaptor_->GetFsCacheManager()->Set(dataCache, &outIter); if (ret) { g_s3MultiManagerMetric->readDataCacheNum << 1; - g_s3MultiManagerMetric->readDataCacheByte - << dataCache->GetActualLen(); + g_s3MultiManagerMetric->readDataCacheByte << dataCache->GetActualLen(); dataRCacheMap_.emplace(chunkPos, outIter); } } @@ -1547,7 +1577,7 @@ void ChunkCacheManager::ReleaseReadDataCache(uint64_t key) { } g_s3MultiManagerMetric->readDataCacheNum << -1; g_s3MultiManagerMetric->readDataCacheByte - << -1 * (*(iter->second))->GetActualLen(); + << -1 * (*(iter->second))->GetActualLen(); dataRCacheMap_.erase(iter); return; } @@ -1617,8 +1647,7 @@ void ChunkCacheManager::TruncateReadCache(uint64_t chunkPos) { if ((dcChunkPos + dcLen) > chunkPos) { if (s3ClientAdaptor_->GetFsCacheManager()->Delete(rIter->second)) { g_s3MultiManagerMetric->readDataCacheNum << -1; - g_s3MultiManagerMetric->readDataCacheByte - << -1 * dcActualLen; + g_s3MultiManagerMetric->readDataCacheByte << -1 * dcActualLen; dataRCacheMap_.erase(next(rIter).base()); } } else { @@ -1629,8 +1658,7 @@ void ChunkCacheManager::TruncateReadCache(uint64_t chunkPos) { void ChunkCacheManager::ReleaseWriteDataCache(const DataCachePtr &dataCache) { s3ClientAdaptor_->GetFsCacheManager()->DataCacheNumFetchSub(1); - VLOG(9) << "chunk flush DataCacheByteDec len:" - << dataCache->GetActualLen(); + VLOG(9) << "chunk flush DataCacheByteDec len:" << dataCache->GetActualLen(); s3ClientAdaptor_->GetFsCacheManager()->DataCacheByteDec( dataCache->GetActualLen()); if (!s3ClientAdaptor_->GetFsCacheManager()->WriteCacheIsFull()) { @@ -1710,7 +1738,7 @@ CURVEFS_ERROR ChunkCacheManager::Flush(uint64_t inodeId, bool force, } void ChunkCacheManager::UpdateWriteCacheMap(uint64_t oldChunkPos, - DataCache *pDataCache) { + DataCache *pDataCache) { auto iter = dataWCacheMap_.find(oldChunkPos); DataCachePtr datacache; if (iter != dataWCacheMap_.end()) { @@ -1736,10 +1764,10 @@ void ChunkCacheManager::AddWriteDataCacheForTest(DataCachePtr dataCache) { DataCache::DataCache(S3ClientAdaptorImpl *s3ClientAdaptor, ChunkCacheManagerPtr chunkCacheManager, uint64_t chunkPos, uint64_t len, const char *data, - std::shared_ptr kvClientManager) + std::shared_ptr kvClientManager) : s3ClientAdaptor_(std::move(s3ClientAdaptor)), - chunkCacheManager_(chunkCacheManager), - status_(DataCacheStatus::Dirty), inReadCache_(false) { + chunkCacheManager_(chunkCacheManager), status_(DataCacheStatus::Dirty), + inReadCache_(false) { uint64_t blockSize = s3ClientAdaptor->GetBlockSize(); uint32_t pageSize = s3ClientAdaptor->GetPageSize(); chunkPos_ = chunkPos; @@ -1800,7 +1828,7 @@ DataCache::DataCache(S3ClientAdaptorImpl *s3ClientAdaptor, } void DataCache::CopyBufToDataCache(uint64_t dataCachePos, uint64_t len, - const char *data) { + const char *data) { uint64_t blockSize = s3ClientAdaptor_->GetBlockSize(); uint32_t pageSize = s3ClientAdaptor_->GetPageSize(); uint64_t pos = chunkPos_ + dataCachePos; @@ -1939,9 +1967,9 @@ void DataCache::MergeDataCacheToDataCache(DataCachePtr mergeDataCache, uint64_t pageIndex = blockPos / pageSize; uint64_t pagePos = blockPos % pageSize; char *data = nullptr; - PageData *meragePage = nullptr; + PageData *mergePage = nullptr; PageDataMap *pdMap = &dataMap_[blockIndex]; - int n = 0; + uint64_t n = 0; VLOG(9) << "MergeDataCacheToDataCache dataOffset:" << dataOffset << ", len:" << len << ",dataCache chunkPos:" << chunkPos_ @@ -1956,8 +1984,8 @@ void DataCache::MergeDataCacheToDataCache(DataCachePtr mergeDataCache, pageIndex = 0; pdMap = &dataMap_[blockIndex]; } - meragePage = mergeDataCache->GetPageData(blockIndex, pageIndex); - assert(meragePage); + mergePage = mergeDataCache->GetPageData(blockIndex, pageIndex); + assert(mergePage); if (pdMap->count(pageIndex)) { data = (*pdMap)[pageIndex]->data; if (pagePos + len > pageSize) { @@ -1967,10 +1995,10 @@ void DataCache::MergeDataCacheToDataCache(DataCachePtr mergeDataCache, } VLOG(9) << "MergeDataCacheToDataCache n:" << n << ", pagePos:" << pagePos; - memcpy(data + pagePos, meragePage->data + pagePos, n); + memcpy(data + pagePos, mergePage->data + pagePos, n); // mergeDataCache->ReleasePageData(blockIndex, pageIndex); } else { - pdMap->emplace(pageIndex, meragePage); + pdMap->emplace(pageIndex, mergePage); mergeDataCache->ErasePageData(blockIndex, pageIndex); n = pageSize; actualLen_ += pageSize; @@ -1993,10 +2021,8 @@ void DataCache::MergeDataCacheToDataCache(DataCachePtr mergeDataCache, void DataCache::Write(uint64_t chunkPos, uint64_t len, const char *data, const std::vector &mergeDataCacheVer) { - uint64_t totalSize = 0; uint64_t addByte = 0; uint64_t oldSize = 0; - uint32_t pageSize = s3ClientAdaptor_->GetPageSize(); VLOG(9) << "DataCache Write() chunkPos:" << chunkPos << ",len:" << len << ",dataCache's chunkPos:" << chunkPos_ << ",actualChunkPos:" << actualChunkPos_ @@ -2018,7 +2044,7 @@ void DataCache::Write(uint64_t chunkPos, uint64_t len, const char *data, chunkCacheManager_->rwLockWrite_.WRLock(); oldSize = actualLen_; CopyBufToDataCache(0, chunkPos + len - chunkPos_, - data + chunkPos_ - chunkPos); + data + chunkPos_ - chunkPos); AddDataBefore(chunkPos_ - chunkPos, data); addByte = actualLen_ - oldSize; s3ClientAdaptor_->GetFsCacheManager()->DataCacheByteInc(addByte); @@ -2038,7 +2064,7 @@ void DataCache::Write(uint64_t chunkPos, uint64_t len, const char *data, chunkCacheManager_->rwLockWrite_.WRLock(); oldSize = actualLen_; CopyBufToDataCache(0, chunkPos + len - chunkPos_, - data + chunkPos_ - chunkPos); + data + chunkPos_ - chunkPos); MergeDataCacheToDataCache( (*iter), chunkPos + len - (*iter)->GetChunkPos(), (*iter)->GetChunkPos() + (*iter)->GetLen() - chunkPos - @@ -2048,7 +2074,7 @@ void DataCache::Write(uint64_t chunkPos, uint64_t len, const char *data, s3ClientAdaptor_->GetFsCacheManager()->DataCacheByteInc( addByte); chunkCacheManager_->UpdateWriteCacheMap(oldChunkPos, this); - chunkCacheManager_->rwLockWrite_.Unlock(); + chunkCacheManager_->rwLockWrite_.Unlock(); return; } } @@ -2059,7 +2085,7 @@ void DataCache::Write(uint64_t chunkPos, uint64_t len, const char *data, chunkCacheManager_->rwLockWrite_.WRLock(); oldSize = actualLen_; CopyBufToDataCache(0, chunkPos + len - chunkPos_, - data + chunkPos_ - chunkPos); + data + chunkPos_ - chunkPos); AddDataBefore(chunkPos_ - chunkPos, data); addByte = actualLen_ - oldSize; s3ClientAdaptor_->GetFsCacheManager()->DataCacheByteInc(addByte); @@ -2178,7 +2204,7 @@ void DataCache::Truncate(uint64_t size) { ((chunkPos_ + len_ - actualChunkPos_) / pageSize + 1) * pageSize; } assert(tmpActualLen == actualLen_); - (void) tmpActualLen; + (void)tmpActualLen; return; } @@ -2263,8 +2289,8 @@ CURVEFS_ERROR DataCache::Flush(uint64_t inodeId, bool toS3) { // inode ship to flush std::shared_ptr inodeWrapper; - ret = s3ClientAdaptor_->GetInodeCacheManager()->GetInode( - inodeId, inodeWrapper); + ret = s3ClientAdaptor_->GetInodeCacheManager()->GetInode(inodeId, + inodeWrapper); if (ret != CURVEFS_ERROR::OK) { LOG(WARNING) << "get inode fail, ret:" << ret; status_.store(DataCacheStatus::Dirty, std::memory_order_release); @@ -2282,8 +2308,9 @@ CURVEFS_ERROR DataCache::Flush(uint64_t inodeId, bool toS3) { return CURVEFS_ERROR::OK; } -CURVEFS_ERROR DataCache::PrepareFlushTasks(uint64_t inodeId, - char *data, std::vector> *s3Tasks, +CURVEFS_ERROR DataCache::PrepareFlushTasks( + uint64_t inodeId, char *data, + std::vector> *s3Tasks, std::vector> *kvCacheTasks, uint64_t *chunkId, uint64_t *writeOffset) { // allocate chunkid @@ -2295,8 +2322,8 @@ CURVEFS_ERROR DataCache::PrepareFlushTasks(uint64_t inodeId, } // generate flush task - uint64_t chunkSize = s3ClientAdaptor_->GetChunkSize(); uint64_t blockSize = s3ClientAdaptor_->GetBlockSize(); + uint32_t objectPrefix = s3ClientAdaptor_->GetObjectPrefix(); uint64_t blockPos = chunkPos_ % blockSize; uint64_t blockIndex = chunkPos_ / blockSize; uint64_t remainLen = len_; @@ -2306,9 +2333,7 @@ CURVEFS_ERROR DataCache::PrepareFlushTasks(uint64_t inodeId, // generate flush to disk or s3 task std::string objectName = curvefs::common::s3util::GenObjName( - *chunkId, blockIndex, 0, fsId, inodeId); - int ret = 0; - uint64_t start = butil::cpuwide_time_us(); + *chunkId, blockIndex, 0, fsId, inodeId, objectPrefix); auto context = std::make_shared(); context->key = objectName; context->buffer = data + (*writeOffset); @@ -2334,29 +2359,29 @@ CURVEFS_ERROR DataCache::PrepareFlushTasks(uint64_t inodeId, return CURVEFS_ERROR::OK; } -CachePoily DataCache::GetCachePolicy(bool toS3) { +CachePolicy DataCache::GetCachePolicy(bool toS3) { const bool mayCache = s3ClientAdaptor_->HasDiskCache() && !s3ClientAdaptor_->GetDiskCacheManager()->IsDiskCacheFull() && !toS3; if (s3ClientAdaptor_->IsReadCache() && mayCache) { - return CachePoily::RCache; + return CachePolicy::RCache; } else if (s3ClientAdaptor_->IsReadWriteCache() && mayCache) { - return CachePoily::WRCache; + return CachePolicy::WRCache; } else { - return CachePoily::NCache; + return CachePolicy::NCache; } } void DataCache::FlushTaskExecute( - CachePoily cachePoily, + CachePolicy cachePolicy, const std::vector> &s3Tasks, const std::vector> &kvCacheTasks) { // callback std::atomic s3PendingTaskCal(s3Tasks.size()); std::atomic kvPendingTaskCal(kvCacheTasks.size()); - CountDownEvent s3TaskEnvent(s3PendingTaskCal); - CountDownEvent kvTaskEnvent(kvPendingTaskCal); + CountDownEvent s3TaskEvent(s3PendingTaskCal); + CountDownEvent kvTaskEvent(kvPendingTaskCal); PutObjectAsyncCallBack s3cb = [&](const std::shared_ptr &context) { @@ -2367,7 +2392,7 @@ void DataCache::FlushTaskExecute( context->bufferSize, context->startTime); } - if (CachePoily::RCache == cachePoily) { + if (CachePolicy::RCache == cachePolicy) { VLOG(9) << "write to read cache, name = " << context->key; s3ClientAdaptor_->GetDiskCacheManager()->Enqueue(context, true); @@ -2376,7 +2401,7 @@ void DataCache::FlushTaskExecute( // Don't move the if sentence to the front // it will cause core dumped because s3Metric_ // will be destructed before being accessed - s3TaskEnvent.Signal(); + s3TaskEvent.Signal(); return; } @@ -2385,7 +2410,7 @@ void DataCache::FlushTaskExecute( }; SetKVCacheDone kvdone = [&](const std::shared_ptr &task) { - kvTaskEnvent.Signal(); + kvTaskEvent.Signal(); return; }; @@ -2395,7 +2420,7 @@ void DataCache::FlushTaskExecute( s3Tasks.begin(), s3Tasks.end(), [&](const std::shared_ptr &context) { context->cb = s3cb; - if (CachePoily::WRCache == cachePoily) { + if (CachePolicy::WRCache == cachePolicy) { s3ClientAdaptor_->GetDiskCacheManager()->Enqueue(context); } else { s3ClientAdaptor_->GetS3Client()->UploadAsync(context); @@ -2409,14 +2434,14 @@ void DataCache::FlushTaskExecute( task->done = kvdone; kvClientManager_->Set(task); }); - kvTaskEnvent.Wait(); + kvTaskEvent.Wait(); } - s3TaskEnvent.Wait(); + s3TaskEvent.Wait(); } void DataCache::PrepareS3ChunkInfo(uint64_t chunkId, uint64_t offset, - uint64_t len, S3ChunkInfo *info) { + uint64_t len, S3ChunkInfo *info) { info->set_chunkid(chunkId); info->set_compaction(0); info->set_offset(offset); @@ -2467,7 +2492,7 @@ void FsCacheManager::ReadCacheReleaseExecutor::ReleaseCache() { tmp.swap(retired_); } - for (auto& c : tmp) { + for (auto &c : tmp) { c->Release(); c.reset(); } @@ -2489,7 +2514,7 @@ FsCacheManager::ReadCacheReleaseExecutor::~ReadCacheReleaseExecutor() { } void FsCacheManager::ReadCacheReleaseExecutor::Release( - std::list* caches) { + std::list *caches) { std::lock_guard lk(mtx_); retired_.splice(retired_.end(), *caches); cond_.notify_one(); diff --git a/curvefs/src/client/s3/client_s3_cache_manager.h b/curvefs/src/client/s3/client_s3_cache_manager.h index 9a7733e0fc..d27aa308c5 100644 --- a/curvefs/src/client/s3/client_s3_cache_manager.h +++ b/curvefs/src/client/s3/client_s3_cache_manager.h @@ -34,14 +34,11 @@ #include #include "curvefs/proto/metaserver.pb.h" -#include "curvefs/src/client/error_code.h" +#include "curvefs/src/client/filesystem/error.h" #include "curvefs/src/client/s3/client_s3.h" -#include "curvefs/src/client/common/common.h" #include "src/common/concurrent/concurrent.h" -#include "src/common/timeutility.h" -#include "curvefs/src/client/metric/client_metric.h" +#include "src/common/concurrent/task_thread_pool.h" #include "curvefs/src/client/kvclient/kvclient_manager.h" -#include "curvefs/src/client/kvclient/kvclient.h" #include "curvefs/src/client/inode_wrapper.h" using curve::common::ReadLockGuard; @@ -70,7 +67,7 @@ using curvefs::metaserver::S3ChunkInfoList; enum CacheType { Write = 1, Read = 2 }; -enum class CachePoily { +enum class CachePolicy { NCache, RCache, WRCache, @@ -219,11 +216,11 @@ class DataCache : public std::enable_shared_from_this { uint64_t *chunkId, uint64_t *writeOffset); void FlushTaskExecute( - CachePoily cachePoily, + CachePolicy cachePolicy, const std::vector> &s3Tasks, const std::vector> &kvCacheTasks); - CachePoily GetCachePolicy(bool toS3); + CachePolicy GetCachePolicy(bool toS3); private: S3ClientAdaptorImpl *s3ClientAdaptor_; @@ -335,10 +332,13 @@ class FileCacheManager { public: FileCacheManager(uint32_t fsid, uint64_t inode, S3ClientAdaptorImpl *s3ClientAdaptor, - std::shared_ptr kvClientManager) + std::shared_ptr kvClientManager, + std::shared_ptr> threadPool) : fsId_(fsid), inode_(inode), s3ClientAdaptor_(s3ClientAdaptor), - kvClientManager_(std::move(kvClientManager)) {} - FileCacheManager() {} + kvClientManager_(std::move(kvClientManager)), + readTaskPool_(threadPool) {} + FileCacheManager() = default; + ~FileCacheManager() = default; ChunkCacheManagerPtr FindOrCreateChunkCacheManager(uint64_t index); @@ -403,19 +403,41 @@ class FileCacheManager { // miss read from memory read/write cache, need read from // kv(localdisk/remote cache/s3) - int GenerateKVReuqest(const std::shared_ptr &inodeWrapper, + int GenerateKVRequest(const std::shared_ptr &inodeWrapper, const std::vector &readRequest, char *dataBuf, std::vector *kvRequest); + enum class ReadStatus { + OK = 0, + S3_READ_FAIL = -1, + S3_NOT_EXIST = -2, + }; + + ReadStatus toReadStatus(const int retCode) { + ReadStatus st = ReadStatus::OK; + if (retCode < 0) { + st = (retCode == -2) ? ReadStatus::S3_NOT_EXIST + : ReadStatus::S3_READ_FAIL; + } + return st; + } + // read kv request, need - int ReadKVRequest(const std::vector &kvRequests, - char *dataBuf, uint64_t fileLen); + ReadStatus ReadKVRequest(const std::vector &kvRequests, + char *dataBuf, uint64_t fileLen); + + // thread function for ReadKVRequest + void ProcessKVRequest(const S3ReadRequest &req, char *dataBuf, + uint64_t fileLen, + std::once_flag &cancelFlag, // NOLINT + std::atomic &isCanceled, // NOLINT + std::atomic &retCode); // NOLINT // read kv request from local disk cache bool ReadKVRequestFromLocalCache(const std::string &name, char *databuf, uint64_t offset, uint64_t len); - // read kv reuqest from remote cache like memcached + // read kv request from remote cache like memcached bool ReadKVRequestFromRemoteCache(const std::string &name, char *databuf, uint64_t offset, uint64_t length); @@ -424,7 +446,7 @@ class FileCacheManager { uint64_t offset, uint64_t length, int *ret); // read retry policy when read from s3 occur not exist error - int HandleReadS3NotExist(int ret, uint32_t retry, + int HandleReadS3NotExist(uint32_t retry, const std::shared_ptr &inodeWrapper); // prefetch for block @@ -445,19 +467,24 @@ class FileCacheManager { std::set downloadingObj_; std::shared_ptr kvClientManager_; + std::shared_ptr> readTaskPool_; }; class FsCacheManager { public: FsCacheManager(S3ClientAdaptorImpl *s3ClientAdaptor, uint64_t readCacheMaxByte, uint64_t writeCacheMaxByte, + uint32_t readCacheThreads, std::shared_ptr kvClientManager) : lruByte_(0), wDataCacheNum_(0), wDataCacheByte_(0), readCacheMaxByte_(readCacheMaxByte), writeCacheMaxByte_(writeCacheMaxByte), s3ClientAdaptor_(s3ClientAdaptor), isWaiting_(false), - kvClientManager_(std::move(kvClientManager)) {} - FsCacheManager() {} + kvClientManager_(std::move(kvClientManager)) { + readTaskPool_->Start(readCacheThreads); + } + FsCacheManager() = default; + virtual ~FsCacheManager() { readTaskPool_->Stop(); } virtual FileCacheManagerPtr FindFileCacheManager(uint64_t inodeId); virtual FileCacheManagerPtr FindOrCreateFileCacheManager(uint64_t fsId, uint64_t inodeId); @@ -566,6 +593,9 @@ class FsCacheManager { ReadCacheReleaseExecutor releaseReadCache_; std::shared_ptr kvClientManager_; + + std::shared_ptr> readTaskPool_ = + std::make_shared>(); }; } // namespace client diff --git a/curvefs/src/client/s3/disk_cache_base.cpp b/curvefs/src/client/s3/disk_cache_base.cpp index 1bfdf36297..3bfe3d8a73 100644 --- a/curvefs/src/client/s3/disk_cache_base.cpp +++ b/curvefs/src/client/s3/disk_cache_base.cpp @@ -26,6 +26,7 @@ #include #include #include +#include #include "curvefs/src/client/s3/disk_cache_base.h" @@ -37,13 +38,13 @@ namespace client { #define CACHE_READ_DIR "cacheread" void DiskCacheBase::Init(std::shared_ptr wrapper, - const std::string cacheDir) { + const std::string cacheDir, uint32_t objectPrefix) { cacheDir_ = cacheDir; posixWrapper_ = wrapper; + objectPrefix_ = objectPrefix; } int DiskCacheBase::CreateIoDir(bool writreDir) { - struct stat statFile; bool ret; std::string FullDirPath; @@ -85,6 +86,37 @@ std::string DiskCacheBase::GetCacheIoFullDir() { return fullPath; } +int DiskCacheBase::CreateDir(const std::string dir) { + size_t p = dir.find_last_of('/'); + std::string dirPath = dir; + if (p != -1ULL) { + dirPath.erase(dirPath.begin()+p, dirPath.end()); + } + std::vector names; + ::curve::common::SplitString(dirPath, "/", &names); + // root dir must exists + if (0 == names.size()) + return 0; + + std::string path; + for (size_t i = 0; i < names.size(); ++i) { + if (0 == i && dirPath[0] != '/') + path = path + names[i]; + else + path = path + "/" + names[i]; + + if (IsFileExist(path)) { + continue; + } + // dir needs 755 permission,or “Permission denied” + if (posixWrapper_->mkdir(path.c_str(), 0755) < 0) { + LOG(WARNING) << "mkdir " << path << " failed. "<< strerror(errno); + return -errno; + } + } + return 0; +} + int DiskCacheBase::LoadAllCacheFile(std::set *cachedObj) { std::string cachePath = GetCacheIoFullDir(); bool ret = IsFileExist(cachePath); @@ -94,26 +126,44 @@ int DiskCacheBase::LoadAllCacheFile(std::set *cachedObj) { } VLOG(3) << "LoadAllCacheFile start, dir: " << cachePath; - DIR *cacheDir = NULL; - struct dirent *cacheDirent = NULL; - cacheDir = posixWrapper_->opendir(cachePath.c_str()); - if (!cacheDir) { - LOG(ERROR) << "LoadAllCacheFile, opendir error, errno = " << errno; - return -1; - } - while ((cacheDirent = posixWrapper_->readdir(cacheDir)) != NULL) { - if ((!strncmp(cacheDirent->d_name, ".", 1)) || - (!strncmp(cacheDirent->d_name, "..", 2))) - continue; - std::string fileName = cacheDirent->d_name; - cachedObj->emplace(fileName); - VLOG(9) << "LoadAllCacheFile obj, name = " << fileName; - } + std::function *cacheObj)> listDir; - int rc = posixWrapper_->closedir(cacheDir); - if (rc < 0) { - LOG(ERROR) << "LoadAllCacheFile, opendir error, errno = " << errno; - return rc; + listDir = [&listDir, this](const std::string &path, + std::set *cacheObj) -> bool { + DIR *dir; + struct dirent *ent; + std::string fileName, nextdir; + if ((dir = posixWrapper_->opendir(path.c_str())) != NULL) { + while ((ent = posixWrapper_->readdir(dir)) != NULL) { + VLOG(9) << "LoadAllCacheFile obj, name = " << ent->d_name; + if (strncmp(ent->d_name, ".", 1) == 0 || + strncmp(ent->d_name, "..", 2) == 0) { + continue; + } else if (ent->d_type == 8) { + fileName = std::string(ent->d_name); + VLOG(9) << "LoadAllCacheFile obj, name = " << fileName; + cacheObj->emplace(fileName); + } else { + nextdir = std::string(ent->d_name); + nextdir = path + '/' + nextdir; + if (!listDir(nextdir, cacheObj)) { + return false; + } + } + } + int ret = posixWrapper_->closedir(dir); + if (ret < 0) { + LOG(ERROR) << "close dir " << dir << ", error = " << errno; + } + return ret >= 0; + } + LOG(ERROR) << "LoadAllCacheFile Opendir error, path =" << path; + return false; + }; + ret = listDir(cachePath, cachedObj); + if (!ret) { + return -1; } VLOG(3) << "LoadAllCacheReadFile end, dir: " << cachePath; return 0; diff --git a/curvefs/src/client/s3/disk_cache_base.h b/curvefs/src/client/s3/disk_cache_base.h index e1df338894..5d7d34417b 100644 --- a/curvefs/src/client/s3/disk_cache_base.h +++ b/curvefs/src/client/s3/disk_cache_base.h @@ -28,6 +28,7 @@ #include #include #include +#include #include #include "curvefs/src/common/wrap_posix.h" @@ -45,23 +46,24 @@ class DiskCacheBase { DiskCacheBase() {} virtual ~DiskCacheBase() {} virtual void Init(std::shared_ptr wrapper, - const std::string cacheDir); + const std::string cacheDir, uint32_t objectPrefix); /** * @brief Create Read/Write Cache Dir. */ virtual int CreateIoDir(bool writreDir); virtual bool IsFileExist(const std::string file); + virtual int CreateDir(const std::string name); /** * @brief Get Read/Write Cache full Dir(include CacheDir_). */ virtual std::string GetCacheIoFullDir(); virtual int LoadAllCacheFile(std::set *cachedObj); + uint32_t objectPrefix_; private: std::string cacheIoDir_; std::string cacheDir_; - // file system operation encapsulation std::shared_ptr posixWrapper_; }; diff --git a/curvefs/src/client/s3/disk_cache_manager.cpp b/curvefs/src/client/s3/disk_cache_manager.cpp index 0490cf09f5..fb0dae3b1e 100644 --- a/curvefs/src/client/s3/disk_cache_manager.cpp +++ b/curvefs/src/client/s3/disk_cache_manager.cpp @@ -28,6 +28,7 @@ #include "curvefs/src/client/s3/client_s3_adaptor.h" #include "curvefs/src/client/s3/disk_cache_manager.h" +#include "curvefs/src/common/s3util.h" namespace curvefs { @@ -65,6 +66,7 @@ DiskCacheManager::DiskCacheManager(std::shared_ptr posixWrapper, safeRatio_ = 0; diskUsedInit_ = false; maxUsableSpaceBytes_ = 0; + objectPrefix_ = 0; // cannot limit the size, // because cache is been delete must after upload to s3 cachedObjName_ = std::make_shared< @@ -85,10 +87,10 @@ int DiskCacheManager::Init(std::shared_ptr client, maxUsableSpaceBytes_ = option.diskCacheOpt.maxUsableSpaceBytes; maxFileNums_ = option.diskCacheOpt.maxFileNums; cmdTimeoutSec_ = option.diskCacheOpt.cmdTimeoutSec; - - cacheWrite_->Init(client_, posixWrapper_, cacheDir_, - option.diskCacheOpt.asyncLoadPeriodMs, cachedObjName_); - cacheRead_->Init(posixWrapper_, cacheDir_); + objectPrefix_ = option.objectPrefix; + cacheWrite_->Init(client_, posixWrapper_, cacheDir_, objectPrefix_, + option.diskCacheOpt.asyncLoadPeriodMs, cachedObjName_); + cacheRead_->Init(posixWrapper_, cacheDir_, objectPrefix_); int ret; ret = CreateDir(); if (ret < 0) { @@ -103,7 +105,7 @@ int DiskCacheManager::Init(std::shared_ptr client, return ret; } - // start aync upload thread + // start async upload thread cacheWrite_->AsyncUploadRun(); std::thread uploadThread = std::thread(&DiskCacheManager::UploadAllCacheWriteFile, this); @@ -157,13 +159,12 @@ int DiskCacheManager::ClearReadCache(const std::list &files) { return cacheRead_->ClearReadCache(files); } -void DiskCacheManager::AddCache(const std::string name, - bool cacheWriteExist) { +void DiskCacheManager::AddCache(const std::string &name) { cachedObjName_->Put(name); VLOG(9) << "cache size is: " << cachedObjName_->Size(); } -bool DiskCacheManager::IsCached(const std::string name) { +bool DiskCacheManager::IsCached(const std::string &name) { if (!cachedObjName_->IsCached(name)) { VLOG(9) << "not cached, name = " << name; return false; @@ -172,16 +173,18 @@ bool DiskCacheManager::IsCached(const std::string name) { return true; } +bool DiskCacheManager::IsCacheClean() { + return cacheWrite_->IsCacheClean(); +} + int DiskCacheManager::UmountDiskCache() { LOG(INFO) << "umount disk cache."; - int ret; - diskInitThread_.join(); - ret = cacheWrite_->UploadAllCacheWriteFile(); - if (ret < 0) { - LOG(ERROR) << "umount disk cache error."; + if (diskInitThread_.joinable()) { + diskInitThread_.join(); } TrimStop(); cacheWrite_->AsyncUploadStop(); + LOG_IF(ERROR, !IsCacheClean()) << "umount disk cache error."; LOG(INFO) << "umount disk cache end."; return 0; } @@ -211,7 +214,7 @@ int DiskCacheManager::CreateDir() { LOG(ERROR) << "create cache read dir error. ret = " << ret; return ret; } - VLOG(9) << "create cache dir sucess."; + VLOG(9) << "create cache dir success."; return 0; } @@ -359,6 +362,9 @@ void DiskCacheManager::TrimCache() { waitIntervalSec_.Init(trimCheckIntervalSec_ * 1000); // trim will start after get the disk size while (!IsDiskUsedInited()) { + if (!isRunning_) { + return; + } waitIntervalSec_.WaitForNextExcution(); } // 1. check cache disk usage every sleepSec seconds. @@ -385,8 +391,12 @@ void DiskCacheManager::TrimCache() { } VLOG(6) << "obj will be removed01: " << cacheKey; - cacheReadFile = cacheReadFullDir + "/" + cacheKey; - cacheWriteFile = cacheWriteFullDir + "/" + cacheKey; + cacheReadFile = cacheReadFullDir + "/" + + curvefs::common::s3util::GenPathByObjName( + cacheKey, objectPrefix_); + cacheWriteFile = cacheWriteFullDir + "/" + + curvefs::common::s3util::GenPathByObjName( + cacheKey, objectPrefix_); struct stat statFile; int ret = 0; ret = posixWrapper_->stat(cacheWriteFile.c_str(), &statFile); diff --git a/curvefs/src/client/s3/disk_cache_manager.h b/curvefs/src/client/s3/disk_cache_manager.h index bb108aad20..1c77fe6c83 100644 --- a/curvefs/src/client/s3/disk_cache_manager.h +++ b/curvefs/src/client/s3/disk_cache_manager.h @@ -65,7 +65,7 @@ class DiskCacheManager { const S3ClientAdaptorOption option); virtual int UmountDiskCache(); - virtual bool IsCached(const std::string name); + virtual bool IsCached(const std::string &name); /** * @brief add obj to cachedObjName @@ -73,8 +73,7 @@ class DiskCacheManager { * @param[in] cacheWriteExist whether the obj is * exist in cache write */ - void AddCache(const std::string name, - bool cacheWriteExist = true); + void AddCache(const std::string &name); int CreateDir(); std::string GetCacheReadFullDir(); @@ -112,7 +111,7 @@ class DiskCacheManager { void InitMetrics(const std::string &fsName); /** - * @brief: has geted the origin used size or not. + * @brief: has got the origin used size or not. */ virtual bool IsDiskUsedInited() { return diskUsedInit_.load(); @@ -160,6 +159,11 @@ class DiskCacheManager { */ bool IsExceedFileNums(); + /** + * @brief check whether cache dir does not exist or there is no cache file + */ + bool IsCacheClean(); + curve::common::Thread backEndThread_; curve::common::Atomic isRunning_; curve::common::InterruptibleSleeper sleeper_; @@ -169,6 +173,7 @@ class DiskCacheManager { uint32_t safeRatio_; uint64_t maxUsableSpaceBytes_; uint64_t maxFileNums_; + uint32_t objectPrefix_; // used bytes of disk cache std::atomic usedBytes_; // used ratio of the file system in disk cache @@ -188,7 +193,7 @@ class DiskCacheManager { S3ClientAdaptorOption option_; - // has geted the origin used size or not + // has got the origin used size or not std::atomic diskUsedInit_; curve::common::Thread diskInitThread_; }; diff --git a/curvefs/src/client/s3/disk_cache_manager_impl.cpp b/curvefs/src/client/s3/disk_cache_manager_impl.cpp index d1bc3ec82d..209a2d4e97 100644 --- a/curvefs/src/client/s3/disk_cache_manager_impl.cpp +++ b/curvefs/src/client/s3/disk_cache_manager_impl.cpp @@ -75,7 +75,7 @@ void DiskCacheManagerImpl::Enqueue( int DiskCacheManagerImpl::WriteReadDirectClosure( std::shared_ptr context) { VLOG(9) << "WriteReadClosure start, name: " << context->key; - // Write to read cache, we don't care if the cache wirte success + // Write to read cache, we don't care if the cache write success int ret = WriteReadDirect(context->key, context->buffer, context->bufferSize); VLOG(9) << "WriteReadClosure end, name: " << context->key; @@ -151,7 +151,7 @@ int DiskCacheManagerImpl::WriteReadDirect(const std::string fileName, return ret; } // add cache. - diskCacheManager_->AddCache(fileName, false); + diskCacheManager_->AddCache(fileName); return ret; } @@ -165,7 +165,7 @@ int DiskCacheManagerImpl::Read(const std::string name, char *buf, } // read disk file maybe fail because of disk file has been removed. int ret = diskCacheManager_->ReadDiskFile(name, buf, offset, length); - if (ret < 0 || ret < length) { + if (ret < static_cast(length)) { LOG(ERROR) << "read disk file error. readRet = " << ret; ret = client_->Download(name, buf, offset, length); if (ret < 0) { @@ -186,13 +186,13 @@ bool DiskCacheManagerImpl::IsDiskCacheFull() { } int DiskCacheManagerImpl::UmountDiskCache() { + taskPool_.Stop(); int ret; ret = diskCacheManager_->UmountDiskCache(); if (ret < 0) { LOG(ERROR) << "umount disk cache error."; return -1; } - taskPool_.Stop(); client_->Deinit(); return 0; } diff --git a/curvefs/src/client/s3/disk_cache_manager_impl.h b/curvefs/src/client/s3/disk_cache_manager_impl.h index 01d838432d..7a691909ff 100644 --- a/curvefs/src/client/s3/disk_cache_manager_impl.h +++ b/curvefs/src/client/s3/disk_cache_manager_impl.h @@ -83,7 +83,7 @@ class DiskCacheManagerImpl { * @brief Write obj * @param[in] name obj name * @param[in] buf what to write - * @param[in] length wtite length + * @param[in] length write length * @return success: write length, fail : < 0 */ int Write(const std::string name, const char *buf, uint64_t length); diff --git a/curvefs/src/client/s3/disk_cache_read.cpp b/curvefs/src/client/s3/disk_cache_read.cpp index b0ac46c6d9..4f452a3181 100644 --- a/curvefs/src/client/s3/disk_cache_read.cpp +++ b/curvefs/src/client/s3/disk_cache_read.cpp @@ -37,9 +37,9 @@ namespace curvefs { namespace client { void DiskCacheRead::Init(std::shared_ptr posixWrapper, - const std::string cacheDir) { + const std::string cacheDir, uint32_t objectPrefix) { posixWrapper_ = posixWrapper; - DiskCacheBase::Init(posixWrapper, cacheDir); + DiskCacheBase::Init(posixWrapper, cacheDir, objectPrefix); } int DiskCacheRead::ReadDiskFile(const std::string name, char *buf, @@ -47,7 +47,7 @@ int DiskCacheRead::ReadDiskFile(const std::string name, char *buf, VLOG(6) << "ReadDiskFile start. name = " << name << ", offset = " << offset << ", length = " << length; std::string fileFullPath; - int fd, ret; + int fd; fileFullPath = GetCacheIoFullDir() + "/" + name; fd = posixWrapper_->open(fileFullPath.c_str(), O_RDONLY, MODE); if (fd < 0) { @@ -69,7 +69,7 @@ int DiskCacheRead::ReadDiskFile(const std::string name, char *buf, posixWrapper_->close(fd); return readLen; } - if (readLen < length) { + if (readLen < static_cast(length)) { LOG(ERROR) << "read disk file is not entirely. read len = " << readLen << ", but want len = " << length << ", file = " << name; posixWrapper_->close(fd); @@ -85,7 +85,7 @@ int DiskCacheRead::LinkWriteToRead(const std::string fileName, const std::string fullWriteDir, const std::string fullReadDir) { VLOG(6) << "LinkWriteToRead start. name = " << fileName; - std::string fullReadPath, fullWritePath; + std::string fullReadPath, fullWritePath, dirPath; fullWritePath = fullWriteDir + "/" + fileName; fullReadPath = fullReadDir + "/" + fileName; int ret; @@ -94,6 +94,16 @@ int DiskCacheRead::LinkWriteToRead(const std::string fileName, << ", file = " << fullWritePath; return -1; } + + if (objectPrefix_ != 0) { + ret = CreateDir(fullReadPath); + if (ret < 0 && errno != EEXIST) { + LOG(ERROR) << "Mkdir error. ret = " << ret << ", errno = " << errno + << ", path is " << fullReadPath; + return -1; + } + } + ret = posixWrapper_->link(fullWritePath.c_str(), fullReadPath.c_str()); if (ret < 0 && errno != EEXIST ) { @@ -138,7 +148,7 @@ int DiskCacheRead::WriteDiskFile(const std::string fileName, const char *buf, return fd; } ssize_t writeLen = posixWrapper_->write(fd, buf, length); - if (writeLen < 0 || writeLen < length) { + if (writeLen < static_cast(length)) { LOG(ERROR) << "write disk file error. ret = " << writeLen << ", file = " << fileName; posixWrapper_->close(fd); diff --git a/curvefs/src/client/s3/disk_cache_read.h b/curvefs/src/client/s3/disk_cache_read.h index 2261984c98..0956a93bdb 100644 --- a/curvefs/src/client/s3/disk_cache_read.h +++ b/curvefs/src/client/s3/disk_cache_read.h @@ -45,7 +45,7 @@ class DiskCacheRead : public DiskCacheBase { DiskCacheRead() {} virtual ~DiskCacheRead() {} virtual void Init(std::shared_ptr posixWrapper, - const std::string cacheDir); + const std::string cacheDir, uint32_t objectPrefix); virtual int ReadDiskFile(const std::string name, char *buf, uint64_t offset, uint64_t length); virtual int WriteDiskFile(const std::string fileName, const char *buf, diff --git a/curvefs/src/client/s3/disk_cache_write.cpp b/curvefs/src/client/s3/disk_cache_write.cpp index c7abcb7f3f..149ea65fff 100644 --- a/curvefs/src/client/s3/disk_cache_write.cpp +++ b/curvefs/src/client/s3/disk_cache_write.cpp @@ -27,7 +27,6 @@ #include #include - #include "curvefs/src/client/s3/disk_cache_write.h" #include "curvefs/src/common/s3util.h" @@ -38,6 +37,7 @@ namespace client { void DiskCacheWrite::Init(std::shared_ptr client, std::shared_ptr posixWrapper, const std::string cacheDir, + uint32_t objectPrefix, uint64_t asyncLoadPeriodMs, std::shared_ptr> cachedObjName) { @@ -45,11 +45,11 @@ void DiskCacheWrite::Init(std::shared_ptr client, posixWrapper_ = posixWrapper; asyncLoadPeriodMs_ = asyncLoadPeriodMs; cachedObjName_ = cachedObjName; - DiskCacheBase::Init(posixWrapper, cacheDir); + DiskCacheBase::Init(posixWrapper, cacheDir, objectPrefix); } void DiskCacheWrite::AsyncUploadEnqueue(const std::string objName) { - std::lock_guard lk(mtx_); + std::lock_guard lock(mtx_); waitUpload_.push_back(objName); } @@ -139,7 +139,7 @@ int DiskCacheWrite::UploadFile(const std::string &name, [&, buffer, syncTask, name] (const std::shared_ptr &context) { if (context->retCode == 0) { - if (metric_.get() != nullptr) { + if (metric_ != nullptr) { metric_->writeS3.bps.count << context->bufferSize; metric_->writeS3.qps.count << 1; metric_->writeS3.latency @@ -186,7 +186,7 @@ bool DiskCacheWrite::WriteCacheValid() { int DiskCacheWrite::GetUploadFile(const std::string &inode, std::list *toUpload) { - std::unique_lock lk(mtx_); + std::unique_lock lock(mtx_); if (waitUpload_.empty()) { return 0; } @@ -195,8 +195,8 @@ int DiskCacheWrite::GetUploadFile(const std::string &inode, return toUpload->size(); } waitUpload_.remove_if([&](const std::string &filename) { - bool inodeFile = - curvefs::common::s3util::ValidNameOfInode(inode, filename); + bool inodeFile = curvefs::common::s3util::ValidNameOfInode( + inode, filename, objectPrefix_); if (inodeFile) { toUpload->emplace_back(filename); } @@ -207,17 +207,18 @@ int DiskCacheWrite::GetUploadFile(const std::string &inode, } int DiskCacheWrite::FileExist(const std::string &inode) { - // load all write cacahe + // load all write cache std::set cachedObj; int ret = LoadAllCacheFile(&cachedObj); if (ret < 0) { - LOG(ERROR) << "DiskCacheWrite, load all cacched file fail ret = " + LOG(ERROR) << "DiskCacheWrite, load all cached file fail ret = " << ret; return ret; } for (auto iter = cachedObj.begin(); iter != cachedObj.end(); iter++) { - bool exist = curvefs::common::s3util::ValidNameOfInode(inode, *iter); + bool exist = curvefs::common::s3util::ValidNameOfInode( + inode, *iter, objectPrefix_); if (exist) { return 1; } @@ -274,6 +275,7 @@ int DiskCacheWrite::AsyncUploadFunc() { } std::list toUpload; + std::shared_ptr syncTask; VLOG(3) << "async upload function start."; while (sleeper_.wait_for(std::chrono::milliseconds(asyncLoadPeriodMs_))) { @@ -282,13 +284,23 @@ int DiskCacheWrite::AsyncUploadFunc() { return 0; } toUpload.clear(); - if (GetUploadFile("", &toUpload) <= 0) { + int num = GetUploadFile("", &toUpload); + if (num <= 0) { + std::unique_lock lock(mtx_); + if (waitUpload_.empty()) { + cond_.notify_all(); + } continue; } - VLOG(6) << "async upload file size = " << toUpload.size(); - UploadFile(toUpload, nullptr); + VLOG(6) << "async upload file size = " << num; + syncTask.reset(new SynchronizationTask(num)); + UploadFile(toUpload, syncTask); VLOG(6) << "async upload all files"; } + + if (syncTask) { + syncTask->Wait(); + } return 0; } @@ -303,15 +315,20 @@ int DiskCacheWrite::AsyncUploadRun() { } int DiskCacheWrite::AsyncUploadStop() { + if (isRunning_.load()) { + std::unique_lock lock(mtx_); + while (!waitUpload_.empty()) { + cond_.wait_for(lock, std::chrono::milliseconds(asyncLoadPeriodMs_)); + } + } if (isRunning_.exchange(false)) { LOG(INFO) << "stop AsyncUpload thread..."; sleeper_.interrupt(); backEndThread_.join(); LOG(INFO) << "stop AsyncUpload thread ok."; return -1; - } else { - LOG(INFO) << "AsyncUpload thread not running."; } + LOG(INFO) << "AsyncUpload thread not running."; return 0; } @@ -319,33 +336,50 @@ int DiskCacheWrite::UploadAllCacheWriteFile() { VLOG(3) << "upload all cached write file start."; std::string fileFullPath; bool ret; - DIR *cacheWriteDir = NULL; - struct dirent *cacheWriteDirent = NULL; + int doRet; fileFullPath = GetCacheIoFullDir(); ret = IsFileExist(fileFullPath); if (!ret) { LOG(ERROR) << "cache write dir is not exist."; return -1; } - cacheWriteDir = posixWrapper_->opendir(fileFullPath.c_str()); - if (!cacheWriteDir) { - LOG(ERROR) << "opendir error, errno = " << errno; - return -1; - } - int doRet; std::vector uploadObjs; - while ((cacheWriteDirent = posixWrapper_->readdir(cacheWriteDir)) != NULL) { - if ((!strncmp(cacheWriteDirent->d_name, ".", 1)) || - (!strncmp(cacheWriteDirent->d_name, "..", 2))) - continue; - - std::string fileName = cacheWriteDirent->d_name; - uploadObjs.push_back(fileName); - } - doRet = posixWrapper_->closedir(cacheWriteDir); - if (doRet < 0) { - LOG(ERROR) << "close error, errno = " << errno; - return doRet; + std::function *cacheObj)> listDir; + listDir = [&listDir, this](const std::string &path, + std::vector *cacheObj) -> bool { + DIR *dir; + struct dirent *ent; + std::string fileName, nextdir; + if ((dir = posixWrapper_->opendir(path.c_str())) != NULL) { + while ((ent = posixWrapper_->readdir(dir)) != NULL) { + if (strncmp(ent->d_name, ".", 1) == 0 || + strncmp(ent->d_name, "..", 2) == 0) { + continue; + } else if (ent->d_type == 8) { + fileName = std::string(ent->d_name); + VLOG(9) << "LoadAllCacheFile obj, name = " << fileName; + cacheObj->emplace_back(fileName); + } else { + nextdir = std::string(ent->d_name); + nextdir = path + '/' + nextdir; + if (!listDir(nextdir, cacheObj)) { + return false; + } + } + } + int ret = posixWrapper_->closedir(dir); + if (ret < 0) { + LOG(ERROR) << "close dir " << dir << ", error = " << errno; + } + return ret >= 0; + } + LOG(ERROR) << "cache write dir open failed, path: " << path; + return false; + }; + ret = listDir(fileFullPath, &uploadObjs); + if (!ret) { + return -1; } if (uploadObjs.empty()) { return 0; @@ -380,7 +414,8 @@ int DiskCacheWrite::UploadAllCacheWriteFile() { client_->UploadAsync(context); }; auto context = std::make_shared(); - context->key = *iter; + context->key = curvefs::common::s3util::GenPathByObjName( + *iter, objectPrefix_); context->buffer = buffer; context->bufferSize = fileSize; context->cb = cb; @@ -420,6 +455,14 @@ int DiskCacheWrite::WriteDiskFile(const std::string fileName, const char *buf, std::string fileFullPath; int fd, ret; fileFullPath = GetCacheIoFullDir() + "/" + fileName; + if (objectPrefix_ != 0) { + ret = CreateDir(fileFullPath); + if (ret < 0) { + LOG(ERROR) << "create dirpath error. errno = " << errno + << ", file = " << fileFullPath; + return -1; + } + } fd = posixWrapper_->open(fileFullPath.c_str(), O_RDWR | O_CREAT, MODE); if (fd < 0) { LOG(ERROR) << "open disk file error. errno = " << errno @@ -427,7 +470,7 @@ int DiskCacheWrite::WriteDiskFile(const std::string fileName, const char *buf, return fd; } ssize_t writeLen = posixWrapper_->write(fd, buf, length); - if (writeLen < 0 || writeLen < length) { + if (writeLen < static_cast(length)) { LOG(ERROR) << "write disk file error. ret: " << writeLen << ", file: " << fileName << ", error: " << errno; @@ -458,5 +501,13 @@ int DiskCacheWrite::WriteDiskFile(const std::string fileName, const char *buf, return writeLen; } +bool DiskCacheWrite::IsCacheClean() { + if (!WriteCacheValid()) { + return true; + } + std::set objs; + return LoadAllCacheFile(&objs) == 0 && objs.empty(); +} + } // namespace client } // namespace curvefs diff --git a/curvefs/src/client/s3/disk_cache_write.h b/curvefs/src/client/s3/disk_cache_write.h index 54e13eeaa4..d0da516171 100644 --- a/curvefs/src/client/s3/disk_cache_write.h +++ b/curvefs/src/client/s3/disk_cache_write.h @@ -55,20 +55,20 @@ class DiskCacheWrite : public DiskCacheBase { public: class SynchronizationTask { public: - explicit SynchronizationTask(int enventNum) { - countDownEnvent_.Reset(enventNum); + explicit SynchronizationTask(int eventNum) { + countDownEvent_.Reset(eventNum); errorCount_ = 0; } - void Wait() { countDownEnvent_.Wait(); } + void Wait() { countDownEvent_.Wait(); } - void Signal() { countDownEnvent_.Signal(); } + void Signal() { countDownEvent_.Signal(); } void SetError() { errorCount_.fetch_add(1); } bool Success() { return errorCount_ == 0; } public: - curve::common::CountDownEvent countDownEnvent_; + curve::common::CountDownEvent countDownEvent_; std::atomic errorCount_; }; @@ -82,10 +82,11 @@ class DiskCacheWrite : public DiskCacheBase { } void Init(std::shared_ptr client, std::shared_ptr posixWrapper, - const std::string cacheDir, uint64_t asyncLoadPeriodMs, + const std::string cacheDir, uint32_t objectPrefix, + uint64_t asyncLoadPeriodMs, std::shared_ptr> cachedObjName); /** - * @brief write obj to write cahce disk + * @brief write obj to write cache disk * @param[in] client S3Client * @param[in] option config option * @return success: 0, fail : < 0 @@ -116,7 +117,7 @@ class DiskCacheWrite : public DiskCacheBase { virtual int UploadFileByInode(const std::string &inode); /** - * @brief: start aync upload thread + * @brief: start async upload thread */ virtual int AsyncUploadRun(); /** @@ -125,7 +126,7 @@ class DiskCacheWrite : public DiskCacheBase { */ virtual void AsyncUploadEnqueue(const std::string objName); /** - * @brief: stop aync upload thread. + * @brief: stop async upload thread. */ virtual int AsyncUploadStop(); @@ -133,6 +134,11 @@ class DiskCacheWrite : public DiskCacheBase { metric_ = metric; } + /** + * @brief check that cache dir does not exist or there is no cache file + */ + virtual bool IsCacheClean(); + private: using DiskCacheBase::Init; int AsyncUploadFunc(); @@ -146,7 +152,8 @@ class DiskCacheWrite : public DiskCacheBase { curve::common::Thread backEndThread_; curve::common::Atomic isRunning_; std::list waitUpload_; - bthread::Mutex mtx_; + std::mutex mtx_; + std::condition_variable cond_; InterruptibleSleeper sleeper_; uint64_t asyncLoadPeriodMs_; std::shared_ptr client_; diff --git a/curvefs/src/client/volume/default_volume_storage.cpp b/curvefs/src/client/volume/default_volume_storage.cpp index bd8a2a391d..f6aa3e614d 100644 --- a/curvefs/src/client/volume/default_volume_storage.cpp +++ b/curvefs/src/client/volume/default_volume_storage.cpp @@ -30,7 +30,7 @@ #include #include "absl/meta/type_traits.h" -#include "curvefs/src/client/error_code.h" +#include "curvefs/src/client/filesystem/error.h" #include "curvefs/src/client/inode_cache_manager.h" #include "curvefs/src/client/inode_wrapper.h" #include "curvefs/src/client/volume/extent_cache.h" @@ -74,7 +74,8 @@ std::ostream& operator<<(std::ostream& os, const std::vector& iov) { CURVEFS_ERROR DefaultVolumeStorage::Write(uint64_t ino, off_t offset, size_t len, - const char* data) { + const char* data, + FileOut* fileOut) { std::shared_ptr inodeWrapper; LatencyUpdater updater(&metric_.writeLatency); auto ret = inodeCacheManager_->GetInode(ino, inodeWrapper); @@ -116,6 +117,7 @@ CURVEFS_ERROR DefaultVolumeStorage::Write(uint64_t ino, } inodeWrapper->UpdateTimestampLocked(kModifyTime | kChangeTime); + inodeWrapper->GetInodeAttrLocked(&fileOut->attr); } inodeCacheManager_->ShipToFlush(inodeWrapper); diff --git a/curvefs/src/client/volume/default_volume_storage.h b/curvefs/src/client/volume/default_volume_storage.h index c7d599f5e7..8c051cb580 100644 --- a/curvefs/src/client/volume/default_volume_storage.h +++ b/curvefs/src/client/volume/default_volume_storage.h @@ -31,12 +31,16 @@ #include "curvefs/src/client/volume/volume_storage.h" #include "curvefs/src/volume/block_device_client.h" #include "curvefs/src/volume/space_manager.h" +#include "curvefs/src/client/filesystem/error.h" +#include "curvefs/src/client/filesystem/meta.h" namespace curvefs { namespace client { using ::curvefs::volume::BlockDeviceClient; using ::curvefs::volume::SpaceManager; +using ::curvefs::client::filesystem::CURVEFS_ERROR; +using ::curvefs::client::filesystem::FileOut; class InodeCacheManager; @@ -66,7 +70,8 @@ class DefaultVolumeStorage final : public VolumeStorage { CURVEFS_ERROR Write(uint64_t ino, off_t offset, size_t len, - const char* data) override; + const char* data, + FileOut* fileOut) override; CURVEFS_ERROR Flush(uint64_t ino) override; diff --git a/curvefs/src/client/volume/volume_storage.h b/curvefs/src/client/volume/volume_storage.h index 90496237ed..255afa2960 100644 --- a/curvefs/src/client/volume/volume_storage.h +++ b/curvefs/src/client/volume/volume_storage.h @@ -28,11 +28,15 @@ #include #include -#include "curvefs/src/client/error_code.h" +#include "curvefs/src/client/filesystem/error.h" +#include "curvefs/src/client/filesystem/meta.h" namespace curvefs { namespace client { +using ::curvefs::client::filesystem::CURVEFS_ERROR; +using ::curvefs::client::filesystem::FileOut; + class VolumeStorage { public: virtual ~VolumeStorage() = default; @@ -45,7 +49,8 @@ class VolumeStorage { virtual CURVEFS_ERROR Write(uint64_t ino, off_t offset, size_t len, - const char* data) = 0; + const char* data, + FileOut* fileOut) = 0; virtual CURVEFS_ERROR Flush(uint64_t ino) = 0; diff --git a/curvefs/src/client/warmup/warmup_manager.cpp b/curvefs/src/client/warmup/warmup_manager.cpp index ededad191e..3392a8f89d 100644 --- a/curvefs/src/client/warmup/warmup_manager.cpp +++ b/curvefs/src/client/warmup/warmup_manager.cpp @@ -32,6 +32,7 @@ #include #include +#include "curvefs/src/client/common/common.h" #include "curvefs/src/client/inode_wrapper.h" #include "curvefs/src/client/kvclient/kvclient_manager.h" #include "curvefs/src/client/s3/client_s3_cache_manager.h" @@ -48,16 +49,17 @@ using curve::common::WriteLockGuard; #define WARMUP_CHECKINTERVAL_US (1000 * 1000) -bool WarmupManagerS3Impl::AddWarmupFilelist(fuse_ino_t key) { +bool WarmupManagerS3Impl::AddWarmupFilelist(fuse_ino_t key, + WarmupStorageType type) { if (!mounted_.load(std::memory_order_acquire)) { LOG(ERROR) << "not mounted"; return false; } // add warmup Progress - if (AddWarmupProcess(key)) { + if (AddWarmupProcess(key, type)) { VLOG(9) << "add warmup list task:" << key; WriteLockGuard lock(warmupFilelistDequeMutex_); - auto iter = FindKeyWarmupFilelistLocked(key); + auto iter = FindWarmupFilelistByKeyLocked(key); if (iter == warmupFilelistDeque_.end()) { std::shared_ptr inodeWrapper; CURVEFS_ERROR ret = inodeManager_->GetInode(key, inodeWrapper); @@ -73,14 +75,14 @@ bool WarmupManagerS3Impl::AddWarmupFilelist(fuse_ino_t key) { return true; } -bool WarmupManagerS3Impl::AddWarmupFile(fuse_ino_t key, - const std::string &path) { +bool WarmupManagerS3Impl::AddWarmupFile(fuse_ino_t key, const std::string &path, + WarmupStorageType type) { if (!mounted_.load(std::memory_order_acquire)) { LOG(ERROR) << "not mounted"; return false; } // add warmup Progress - if (AddWarmupProcess(key)) { + if (AddWarmupProcess(key, type)) { VLOG(9) << "add warmup single task:" << key; FetchDentryEnqueue(key, path); } @@ -246,7 +248,7 @@ void WarmupManagerS3Impl::FetchDentry(fuse_ino_t key, fuse_ino_t ino, } if (FsFileType::TYPE_S3 == dentry.type()) { WriteLockGuard lock(warmupInodesDequeMutex_); - auto iterDeque = FindKeyWarmupInodesLocked(key); + auto iterDeque = FindWarmupInodesByKeyLocked(key); if (iterDeque == warmupInodesDeque_.end()) { warmupInodesDeque_.emplace_back( key, std::set{dentry.inodeid()}); @@ -286,7 +288,7 @@ void WarmupManagerS3Impl::FetchChildDentry(fuse_ino_t key, fuse_ino_t ino) { << " dentry: " << dentry.name(); if (FsFileType::TYPE_S3 == dentry.type()) { WriteLockGuard lock(warmupInodesDequeMutex_); - auto iterDeque = FindKeyWarmupInodesLocked(key); + auto iterDeque = FindWarmupInodesByKeyLocked(key); if (iterDeque == warmupInodesDeque_.end()) { warmupInodesDeque_.emplace_back( key, std::set{dentry.inodeid()}); @@ -329,8 +331,7 @@ void WarmupManagerS3Impl::FetchDataEnqueue(fuse_ino_t key, fuse_ino_t ino) { TravelChunks(key, ino, s3ChunkInfoMap); }; AddFetchS3objectsTask(key, task); - VLOG(9) - << "FetchDataEnqueue end: key:" << key << " inode: " << ino; + VLOG(9) << "FetchDataEnqueue end: key:" << key << " inode: " << ino; } void WarmupManagerS3Impl::TravelChunks( @@ -343,7 +344,7 @@ void WarmupManagerS3Impl::TravelChunks( TravelChunk(ino, infoIter.second, &prefetchObjs); { ReadLockGuard lock(inode2ProgressMutex_); - auto iter = FindKeyWarmupProgressLocked(key); + auto iter = FindWarmupProgressByKeyLocked(key); if (iter != inode2Progress_.end()) { iter->second.AddTotal(prefetchObjs.size()); } else { @@ -363,9 +364,9 @@ void WarmupManagerS3Impl::TravelChunk(fuse_ino_t ino, ObjectListType *prefetchObjs) { uint64_t blockSize = s3Adaptor_->GetBlockSize(); uint64_t chunkSize = s3Adaptor_->GetChunkSize(); + uint32_t objectPrefix = s3Adaptor_->GetObjectPrefix(); uint64_t offset, len, chunkid, compaction; - for (size_t i = 0; i < chunkInfo.s3chunks_size(); i++) { - auto const &chunkinfo = chunkInfo.s3chunks(i); + for (const auto &chunkinfo : chunkInfo.s3chunks()) { auto fsId = fsInfo_->fsid(); chunkid = chunkinfo.chunkid(); compaction = chunkinfo.compaction(); @@ -373,14 +374,12 @@ void WarmupManagerS3Impl::TravelChunk(fuse_ino_t ino, len = chunkinfo.len(); // the offset in the chunk uint64_t chunkPos = offset % chunkSize; - // the offset in the block - uint64_t blockPos = chunkPos % blockSize; // the first blockIndex uint64_t blockIndexBegin = chunkPos / blockSize; if (len < blockSize) { // just one block auto objectName = curvefs::common::s3util::GenObjName( - chunkid, blockIndexBegin, compaction, fsId, ino); + chunkid, blockIndexBegin, compaction, fsId, ino, objectPrefix); prefetchObjs->push_back(std::make_pair(objectName, len)); } else { // the offset in the block @@ -412,7 +411,8 @@ void WarmupManagerS3Impl::TravelChunk(fuse_ino_t ino, if (!firstBlockFull) { travelStartIndex = blockIndexBegin + 1; auto objectName = curvefs::common::s3util::GenObjName( - chunkid, blockIndexBegin, compaction, fsId, ino); + chunkid, blockIndexBegin, compaction, + fsId, ino, objectPrefix); prefetchObjs->push_back( std::make_pair(objectName, firstBlockSize)); } else { @@ -424,7 +424,8 @@ void WarmupManagerS3Impl::TravelChunk(fuse_ino_t ino, ? blockIndexEnd : blockIndexEnd - 1; auto objectName = curvefs::common::s3util::GenObjName( - chunkid, blockIndexEnd, compaction, fsId, ino); + chunkid, blockIndexEnd, compaction, + fsId, ino, objectPrefix); // there is no need to care about the order // in which objects are downloaded prefetchObjs->push_back( @@ -445,7 +446,7 @@ void WarmupManagerS3Impl::TravelChunk(fuse_ino_t ino, for (auto blockIndex = travelStartIndex; blockIndex <= travelEndIndex; blockIndex++) { auto objectName = curvefs::common::s3util::GenObjName( - chunkid, blockIndex, compaction, fsId, ino); + chunkid, blockIndex, compaction, fsId, ino, objectPrefix); prefetchObjs->push_back(std::make_pair(objectName, blockSize)); } } @@ -459,28 +460,23 @@ void WarmupManagerS3Impl::WarmUpAllObjs( const std::list> &prefetchObjs) { std::atomic pendingReq(0); curve::common::CountDownEvent cond(1); + uint64_t start = butil::cpuwide_time_us(); // callback function GetObjectAsyncCallBack cb = [&](const S3Adapter *adapter, const std::shared_ptr &context) { + (void)adapter; if (bgFetchStop_.load(std::memory_order_acquire)) { VLOG(9) << "need stop warmup"; cond.Signal(); return; } - { - // update progress - ReadLockGuard lock(inode2ProgressMutex_); - auto iter = FindKeyWarmupProgressLocked(key); - if (iter != inode2Progress_.end()) { - iter->second.FinishedPlusOne(); - } else { - VLOG(9) << "no such warmup progress: " << key; - } - } if (context->retCode == 0) { VLOG(9) << "Get Object success: " << context->key; - PutObjectToCache(context->key, context->buf, context->len); + PutObjectToCache(key, context->key, context->buf, context->len); + CollectMetrics(&warmupS3Metric_.warmupS3Cached, context->len, + start); + warmupS3Metric_.warmupS3CacheSize << context->len; if (pendingReq.fetch_sub(1, std::memory_order_seq_cst) == 1) { VLOG(6) << "pendingReq is over"; cond.Signal(); @@ -488,6 +484,7 @@ void WarmupManagerS3Impl::WarmUpAllObjs( delete[] context->buf; return; } + warmupS3Metric_.warmupS3Cached.eps.count << 1; if (++context->retry >= option_.downloadMaxRetryTimes) { if (pendingReq.fetch_sub(1, std::memory_order_seq_cst) == 1) { VLOG(6) << "pendingReq is over"; @@ -511,9 +508,17 @@ void WarmupManagerS3Impl::WarmUpAllObjs( VLOG(9) << "download start: " << iter.first; std::string name = iter.first; uint64_t readLen = iter.second; - if (s3Adaptor_->GetDiskCacheManager()->IsCached(name)) { - pendingReq.fetch_sub(1); - continue; + { + ReadLockGuard lock(inode2ProgressMutex_); + auto iterProgress = FindWarmupProgressByKeyLocked(key); + if (iterProgress->second.GetStorageType() == + curvefs::client::common::WarmupStorageType:: + kWarmupStorageTypeDisk && + s3Adaptor_->GetDiskCacheManager()->IsCached(name)) { + // storage in disk and has cached + pendingReq.fetch_sub(1); + continue; + } } char *cacheS3 = new char[readLen]; memset(cacheS3, 0, readLen); @@ -535,25 +540,26 @@ bool WarmupManagerS3Impl::ProgressDone(fuse_ino_t key) { bool ret; { ReadLockGuard lockList(warmupFilelistDequeMutex_); - ret = FindKeyWarmupFilelistLocked(key) == warmupFilelistDeque_.end(); + ret = + FindWarmupFilelistByKeyLocked(key) == warmupFilelistDeque_.end(); } { ReadLockGuard lockDentry(inode2FetchDentryPoolMutex_); - ret = ret && (FindKeyFetchDentryPoolLocked(key) == + ret = ret && (FindFetchDentryPoolByKeyLocked(key) == inode2FetchDentryPool_.end()); } { ReadLockGuard lockInodes(warmupInodesDequeMutex_); - ret = - ret && (FindKeyWarmupInodesLocked(key) == warmupInodesDeque_.end()); + ret = ret && + (FindWarmupInodesByKeyLocked(key) == warmupInodesDeque_.end()); } { ReadLockGuard lockS3Objects(inode2FetchS3ObjectsPoolMutex_); - ret = ret && (FindKeyFetchS3ObjectsPoolLocked(key) == + ret = ret && (FindFetchS3ObjectsPoolByKeyLocked(key) == inode2FetchS3ObjectsPool_.end()); } return ret; @@ -672,21 +678,45 @@ void WarmupManagerS3Impl::AddFetchS3objectsTask(fuse_ino_t key, } } -void WarmupManagerS3Impl::PutObjectToCache(const std::string &filename, +void WarmupManagerS3Impl::PutObjectToCache(fuse_ino_t key, + const std::string &filename, const char *data, uint64_t len) { - int ret = - s3Adaptor_->GetDiskCacheManager()->WriteReadDirect(filename, data, len); - if (ret < 0) { - LOG_EVERY_SECOND(INFO) - << "write read directly failed, key: " << filename; + ReadLockGuard lock(inode2ProgressMutex_); + auto iter = FindWarmupProgressByKeyLocked(key); + if (iter == inode2Progress_.end()) { + VLOG(9) << "no this warmup task progress: " << key; + return; } - - if (kvClientManager_ != nullptr) { - kvClientManager_->Set( - std::make_shared(filename, data, len)); + int ret; + // update progress + iter->second.FinishedPlusOne(); + switch (iter->second.GetStorageType()) { + case curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk: + ret = s3Adaptor_->GetDiskCacheManager()->WriteReadDirect(filename, data, + len); + if (ret < 0) { + LOG_EVERY_SECOND(INFO) + << "write read directly failed, key: " << filename; + } + break; + case curvefs::client::common::WarmupStorageType::kWarmupStorageTypeKvClient: + if (kvClientManager_ != nullptr) { + kvClientManager_->Set( + std::make_shared(filename, data, len)); + } + break; + default: + LOG_EVERY_N(ERROR, 1000) << "unsupported warmup storage type"; } } +void WarmupManager::CollectMetrics(InterfaceMetric *interface, int count, + uint64_t start) { + interface->bps.count << count; + interface->qps.count << 1; + interface->latency << (butil::cpuwide_time_us() - start); +} + } // namespace warmup } // namespace client } // namespace curvefs diff --git a/curvefs/src/client/warmup/warmup_manager.h b/curvefs/src/client/warmup/warmup_manager.h index 51a2758b99..bb1f80e474 100644 --- a/curvefs/src/client/warmup/warmup_manager.h +++ b/curvefs/src/client/warmup/warmup_manager.h @@ -39,6 +39,7 @@ #include #include +#include "curvefs/src/client/common/common.h" #include "curvefs/src/client/dentry_cache_manager.h" #include "curvefs/src/client/fuse_common.h" #include "curvefs/src/client/inode_cache_manager.h" @@ -49,6 +50,7 @@ #include "src/common/concurrent/concurrent.h" #include "src/common/concurrent/rw_lock.h" #include "curvefs/src/common/task_thread_pool.h" +#include "curvefs/src/client/metric/client_metric.h" namespace curvefs { namespace client { @@ -60,6 +62,8 @@ using ThreadPool = curvefs::common::TaskThreadPool2; using curve::common::BthreadRWLock; +using curvefs::client::common::WarmupStorageType; + class WarmupFile { public: explicit WarmupFile(fuse_ino_t key = 0, uint64_t fileLen = 0) @@ -103,11 +107,13 @@ using FuseOpReadFunctionType = class WarmupProgress { public: - explicit WarmupProgress(uint64_t total = 0, uint64_t finished = 0) - : total_(total), finished_(finished) {} + explicit WarmupProgress(WarmupStorageType type = curvefs::client::common:: + WarmupStorageType::kWarmupStorageTypeUnknown) + : total_(0), finished_(0), storageType_(type) {} WarmupProgress(const WarmupProgress &wp) - : total_(wp.total_), finished_(wp.finished_) {} + : total_(wp.total_), finished_(wp.finished_), + storageType_(wp.storageType_) {} void AddTotal(uint64_t add) { std::lock_guard lock(totalMutex_); @@ -142,11 +148,16 @@ class WarmupProgress { ",finished:" + std::to_string(finished_); } + WarmupStorageType GetStorageType() { + return storageType_; + } + private: uint64_t total_; std::mutex totalMutex_; uint64_t finished_; std::mutex finishedMutex_; + WarmupStorageType storageType_; }; class WarmupManager { @@ -177,8 +188,9 @@ class WarmupManager { } virtual void UnInit() { ClearWarmupProcess(); } - virtual bool AddWarmupFilelist(fuse_ino_t key) = 0; - virtual bool AddWarmupFile(fuse_ino_t key, const std::string &path) = 0; + virtual bool AddWarmupFilelist(fuse_ino_t key, WarmupStorageType type) = 0; + virtual bool AddWarmupFile(fuse_ino_t key, const std::string &path, + WarmupStorageType type) = 0; void SetMounted(bool mounted) { mounted_.store(mounted, std::memory_order_release); @@ -205,7 +217,7 @@ class WarmupManager { bool QueryWarmupProgress(fuse_ino_t key, WarmupProgress *progress) { bool ret = true; ReadLockGuard lock(inode2ProgressMutex_); - auto iter = FindKeyWarmupProgressLocked(key); + auto iter = FindWarmupProgressByKeyLocked(key); if (iter != inode2Progress_.end()) { *progress = iter->second; } else { @@ -214,6 +226,8 @@ class WarmupManager { return ret; } + void CollectMetrics(InterfaceMetric *interface, int count, uint64_t start); + protected: /** * @brief Add warmupProcess @@ -221,9 +235,9 @@ class WarmupManager { * @return true * @return false warmupProcess has been added */ - virtual bool AddWarmupProcess(fuse_ino_t key) { + virtual bool AddWarmupProcess(fuse_ino_t key, WarmupStorageType type) { WriteLockGuard lock(inode2ProgressMutex_); - auto ret = inode2Progress_.emplace(key, WarmupProgress()); + auto ret = inode2Progress_.emplace(key, WarmupProgress(type)); return ret.second; } @@ -234,7 +248,7 @@ class WarmupManager { * @return std::unordered_map::iterator */ std::unordered_map::iterator - FindKeyWarmupProgressLocked(fuse_ino_t key) { + FindWarmupProgressByKeyLocked(fuse_ino_t key) { return inode2Progress_.find(key); } @@ -284,8 +298,9 @@ class WarmupManagerS3Impl : public WarmupManager { std::move(readFunc), std::move(kvClientManager)), s3Adaptor_(std::move(s3Adaptor)) {} - bool AddWarmupFilelist(fuse_ino_t key) override; - bool AddWarmupFile(fuse_ino_t key, const std::string &path) override; + bool AddWarmupFilelist(fuse_ino_t key, WarmupStorageType type) override; + bool AddWarmupFile(fuse_ino_t key, const std::string &path, + WarmupStorageType type) override; void Init(const FuseClientOption &option) override; void UnInit() override; @@ -311,7 +326,7 @@ class WarmupManagerS3Impl : public WarmupManager { * @return std::deque::iterator */ std::deque::iterator - FindKeyWarmupInodesLocked(fuse_ino_t key) { + FindWarmupInodesByKeyLocked(fuse_ino_t key) { return std::find_if(warmupInodesDeque_.begin(), warmupInodesDeque_.end(), [key](const WarmupInodes &inodes) { @@ -326,7 +341,7 @@ class WarmupManagerS3Impl : public WarmupManager { * @return std::deque::iterator */ std::deque::iterator - FindKeyWarmupFilelistLocked(fuse_ino_t key) { + FindWarmupFilelistByKeyLocked(fuse_ino_t key) { return std::find_if(warmupFilelistDeque_.begin(), warmupFilelistDeque_.end(), [key](const WarmupFilelist &filelist_) { @@ -342,7 +357,7 @@ class WarmupManagerS3Impl : public WarmupManager { * std::unique_ptr>::iterator */ std::unordered_map>::iterator - FindKeyFetchDentryPoolLocked(fuse_ino_t key) { + FindFetchDentryPoolByKeyLocked(fuse_ino_t key) { return inode2FetchDentryPool_.find(key); } @@ -354,7 +369,7 @@ class WarmupManagerS3Impl : public WarmupManager { * std::unique_ptr>::iterator */ std::unordered_map>::iterator - FindKeyFetchS3ObjectsPoolLocked(fuse_ino_t key) { + FindFetchS3ObjectsPoolByKeyLocked(fuse_ino_t key) { return inode2FetchS3ObjectsPool_.find(key); } @@ -398,8 +413,8 @@ class WarmupManagerS3Impl : public WarmupManager { void AddFetchS3objectsTask(fuse_ino_t key, std::function task); - void PutObjectToCache(const std::string &filename, const char *data, - uint64_t len); + void PutObjectToCache(fuse_ino_t key, const std::string &filename, + const char *data, uint64_t len); protected: std::deque warmupFilelistDeque_; @@ -424,6 +439,8 @@ class WarmupManagerS3Impl : public WarmupManager { std::unordered_map> inode2FetchS3ObjectsPool_; mutable RWLock inode2FetchS3ObjectsPoolMutex_; + + curvefs::client::metric::WarmupManagerS3Metric warmupS3Metric_; }; } // namespace warmup diff --git a/curvefs/src/client/xattr_manager.cpp b/curvefs/src/client/xattr_manager.cpp index e9f72edcc6..4f9c3bf928 100644 --- a/curvefs/src/client/xattr_manager.cpp +++ b/curvefs/src/client/xattr_manager.cpp @@ -78,20 +78,14 @@ CURVEFS_ERROR XattrManager::CalOneLayerSumInfo(InodeAttr *attr) { summaryInfo.entries++; summaryInfo.fbytes += it.length(); } - if (!(AddUllStringToFirst( - &(attr->mutable_xattr()->find(XATTRFILES)->second), - summaryInfo.files, true) && - AddUllStringToFirst( - &(attr->mutable_xattr()->find(XATTRSUBDIRS)->second), - summaryInfo.subdirs, true) && - AddUllStringToFirst( - &(attr->mutable_xattr()->find(XATTRENTRIES)->second), - summaryInfo.entries, true) && - AddUllStringToFirst( - &(attr->mutable_xattr()->find(XATTRFBYTES)->second), - summaryInfo.fbytes + attr->length(), true))) { - ret = CURVEFS_ERROR::INTERNAL; - } + (*attr->mutable_xattr())[XATTRFILES] = + std::to_string(summaryInfo.files); + (*attr->mutable_xattr())[XATTRSUBDIRS] = + std::to_string(summaryInfo.subdirs); + (*attr->mutable_xattr())[XATTRENTRIES] = + std::to_string(summaryInfo.entries); + (*attr->mutable_xattr())[XATTRFBYTES] = + std::to_string(summaryInfo.fbytes + attr->length()); } return ret; } diff --git a/curvefs/src/client/xattr_manager.h b/curvefs/src/client/xattr_manager.h index ec36fbb2b3..f97cf40186 100644 --- a/curvefs/src/client/xattr_manager.h +++ b/curvefs/src/client/xattr_manager.h @@ -36,7 +36,7 @@ #include "curvefs/src/common/define.h" #include "curvefs/proto/metaserver.pb.h" -#include "curvefs/src/client/error_code.h" +#include "curvefs/src/client/filesystem/error.h" #include "curvefs/src/client/client_operator.h" #include "src/common/interruptible_sleeper.h" @@ -126,13 +126,13 @@ class XattrManager { // dentry cache manager std::shared_ptr dentryManager_; - Atomic isStop_; - InterruptibleSleeper sleeper_; uint32_t listDentryLimit_; uint32_t listDentryThreads_; + + Atomic isStop_; }; } // namespace client diff --git a/curvefs/src/common/process.cpp b/curvefs/src/common/process.cpp index 2239de2dbc..2966702b27 100644 --- a/curvefs/src/common/process.cpp +++ b/curvefs/src/common/process.cpp @@ -24,37 +24,39 @@ #include "curvefs/src/common/process.h" -extern char** environ; +extern char **environ; namespace curvefs { namespace common { -char** Process::OsArgv_ = nullptr; -char* Process::OsArgvLast_ = nullptr; +char **Process::OsArgv_ = nullptr; +char *Process::OsArgvLast_ = nullptr; pid_t Process::SpawnProcess(ProcFunc proc) { pid_t pid = fork(); switch (pid) { - case -1: - return -1; - case 0: - proc(); - break; - default: - break; + case -1: + return -1; + case 0: + proc(); + break; + default: + break; } return pid; } -void Process::InitSetProcTitle(int argc, char* const* argv) { +void Process::InitSetProcTitle(int argc, char *const *argv) { + (void)argc; // Silence the warning + size_t size = 0; for (auto i = 0; environ[i]; i++) { size += strlen(environ[i]) + 1; } - OsArgv_ = (char**) argv; // NOLINT + OsArgv_ = (char **)argv; // NOLINT OsArgvLast_ = OsArgv_[0]; for (auto i = 0; OsArgv_[i]; i++) { if (OsArgvLast_ == OsArgv_[i]) { @@ -62,7 +64,7 @@ void Process::InitSetProcTitle(int argc, char* const* argv) { } } - char* p = new (std::nothrow) char[size]; + char *p = new (std::nothrow) char[size]; for (auto i = 0; environ[i]; i++) { if (OsArgvLast_ == environ[i]) { size = strlen(environ[i]) + 1; @@ -78,15 +80,15 @@ void Process::InitSetProcTitle(int argc, char* const* argv) { OsArgvLast_--; } -void Process::SetProcTitle(const std::string& title) { +void Process::SetProcTitle(const std::string &title) { OsArgv_[1] = NULL; strncpy(OsArgv_[0], title.c_str(), OsArgvLast_ - OsArgv_[0]); } -bool Process::InitSignals(const std::vector& signals) { +bool Process::InitSignals(const std::vector &signals) { struct sigaction sa; - for (const auto& signal : signals) { + for (const auto &signal : signals) { memset(&sa, 0, sizeof(struct sigaction)); if (signal.handler) { sa.sa_sigaction = signal.handler; diff --git a/curvefs/src/common/rpc_stream.cpp b/curvefs/src/common/rpc_stream.cpp index b01d8c0e47..27490e3d63 100644 --- a/curvefs/src/common/rpc_stream.cpp +++ b/curvefs/src/common/rpc_stream.cpp @@ -257,6 +257,8 @@ std::shared_ptr StreamServer::Accept(brpc::Controller* cntl) { int StreamServer::on_received_messages(brpc::StreamId id, butil::IOBuf* const buffers[], size_t size) { + (void)buffers; // Slience the warnings + (void)size; LOG(ERROR) << "on_received_messages: stream (streamId=" << id << ") in server-side should not reveice any message" << ", but now we received"; diff --git a/curvefs/src/common/s3util.cpp b/curvefs/src/common/s3util.cpp index 61ef25a149..7ce98d440e 100644 --- a/curvefs/src/common/s3util.cpp +++ b/curvefs/src/common/s3util.cpp @@ -27,10 +27,39 @@ namespace curvefs { namespace common { namespace s3util { -bool ValidNameOfInode(const std::string &inode, const std::string &objName) { - std::vector res; - curve::common::SplitString(objName, "_", &res); - return res.size() == 5 && res[1] == inode; + +std::string GenPathByObjName(const std::string &objName, + uint32_t objectPrefix_) { + std::vector objs; + uint64_t inodeid; + curve::common::SplitString(objName, "_", &objs); + if (objectPrefix_ == 0) { + return objName; + } else if (objectPrefix_ == 1) { + inodeid = std::stoll(objs[1]); + return objs[0] + "/" + std::to_string(inodeid/1000/1000) + "/" + + std::to_string(inodeid/1000) + "/" + objName; + } else { + inodeid = std::stoll(objs[1]); + return objs[0] + "/" + std::to_string(inodeid%256) + "/" + + std::to_string(inodeid/1000) + "/" + objName; + } +} + +bool ValidNameOfInode(const std::string &inode, const std::string &objName, + uint32_t objectPrefix) { + std::vector res, objs; + if (objectPrefix == 0) { + curve::common::SplitString(objName, "_", &res); + return res.size() == 5 && res[1] == inode; + } else { + curve::common::SplitString(objName, "/", &objs); + if (objs.size() == 4) { + curve::common::SplitString(objs[3], "_", &res); + return res.size() == 5 && res[1] == inode; + } + return false; + } } } // namespace s3util } // namespace common diff --git a/curvefs/src/common/s3util.h b/curvefs/src/common/s3util.h index 45f71d2f21..f414b2c911 100644 --- a/curvefs/src/common/s3util.h +++ b/curvefs/src/common/s3util.h @@ -31,13 +31,37 @@ namespace s3util { inline std::string GenObjName(uint64_t chunkid, uint64_t index, uint64_t compaction, uint64_t fsid, - uint64_t inodeid) { - return std::to_string(fsid) + "_" + std::to_string(inodeid) + "_" + - std::to_string(chunkid) + "_" + std::to_string(index) + "_" + - std::to_string(compaction); + uint64_t inodeid, + uint32_t objectPrefix) { + std::string objName; + if (objectPrefix == 0) { + objName = std::to_string(fsid) + "_" + + std::to_string(inodeid) + "_" + + std::to_string(chunkid) + "_" + std::to_string(index) + "_" + + std::to_string(compaction); + } else if (objectPrefix == 1) { + objName = std::to_string(fsid) + "/" + + std::to_string(inodeid/1000/1000) + "/" + + std::to_string(inodeid/1000) + "/" + + std::to_string(fsid) + "_" + std::to_string(inodeid) + "_" + + std::to_string(chunkid) + "_" + std::to_string(index) + "_" + + std::to_string(compaction); + } else { + objName = std::to_string(fsid) + "/" + + std::to_string(inodeid%256) + "/" + + std::to_string(inodeid/1000) + "/" + + std::to_string(fsid) + "_" + std::to_string(inodeid) + "_" + + std::to_string(chunkid) + "_" + std::to_string(index) + "_" + + std::to_string(compaction); + } + return objName; } -bool ValidNameOfInode(const std::string &inode, const std::string &objName); +bool ValidNameOfInode(const std::string &inode, const std::string &objName, + uint32_t objectPrefix); + + +std::string GenPathByObjName(const std::string &objName, uint32_t objectPrefix); } // namespace s3util } // namespace common diff --git a/curvefs/src/mds/fs_info_wrapper.cpp b/curvefs/src/mds/fs_info_wrapper.cpp index db58450f68..83b1d9c209 100644 --- a/curvefs/src/mds/fs_info_wrapper.cpp +++ b/curvefs/src/mds/fs_info_wrapper.cpp @@ -113,6 +113,10 @@ void FsInfoWrapper::AddMountPoint(const Mountpoint& mp) { *p = mp; fsInfo_.set_mountnum(fsInfo_.mountnum() + 1); + + if (fsInfo_.enablesumindir() && fsInfo_.mountnum() > 1) { + fsInfo_.set_enablesumindir(false); + } } FSStatusCode FsInfoWrapper::DeleteMountPoint(const Mountpoint& mp) { diff --git a/curvefs/src/mds/fs_manager.cpp b/curvefs/src/mds/fs_manager.cpp index 300b204600..021bea0115 100644 --- a/curvefs/src/mds/fs_manager.cpp +++ b/curvefs/src/mds/fs_manager.cpp @@ -264,11 +264,6 @@ FSStatusCode FsManager::CreateFs(const ::curvefs::mds::CreateFsRequest* request, const auto& fsType = request->fstype(); const auto& detail = request->fsdetail(); - // check fsname - if (!CheckFsName(fsName)) { - return FSStatusCode::FSNAME_INVALID; - } - NameLockGuard lock(nameLock_, fsName); FsInfoWrapper wrapper; bool skipCreateNewFs = false; @@ -300,6 +295,11 @@ FSStatusCode FsManager::CreateFs(const ::curvefs::mds::CreateFsRequest* request, } } + // check fsname + if (!CheckFsName(fsName)) { + return FSStatusCode::FSNAME_INVALID; + } + // check s3info if (!skipCreateNewFs && detail.has_s3info()) { const auto& s3Info = detail.s3info(); @@ -843,6 +843,16 @@ void FsManager::RefreshSession(const RefreshSessionRequest* request, // update this client's alive time UpdateClientAliveTime(request->mountpoint(), request->fsname()); + FsInfoWrapper wrapper; + FSStatusCode ret = fsStorage_->Get(request->fsname(), &wrapper); + if (ret != FSStatusCode::OK) { + LOG(WARNING) << "GetFsInfo fail, get fs fail, fsName = " + << request->fsname() + << ", errCode = " << FSStatusCode_Name(ret); + return; + } + + response->set_enablesumindir(wrapper.ProtoFsInfo().enablesumindir()); } FSStatusCode FsManager::ReloadMountedFsVolumeSpace() { diff --git a/curvefs/src/mds/fs_storage.cpp b/curvefs/src/mds/fs_storage.cpp index 8c12cddf48..5e9d41e106 100644 --- a/curvefs/src/mds/fs_storage.cpp +++ b/curvefs/src/mds/fs_storage.cpp @@ -424,14 +424,14 @@ bool PersisKVStorage::RenameFromStorage(const FsInfoWrapper& oldFs, OpType::OpDelete, const_cast(oldKey.c_str()), const_cast(""), - oldKey.size(), + static_cast(oldKey.size()), 0}; Operation op2{ OpType::OpPut, const_cast(newKey.c_str()), const_cast(newValue.c_str()), - newKey.size(), - newValue.size()}; + static_cast(newKey.size()), + static_cast(newValue.size())}; std::vector ops{op1, op2}; int ret = storage_->TxnN(ops); if (ret != EtcdErrCode::EtcdOK) { diff --git a/curvefs/src/mds/heartbeat/heartbeat_service.cpp b/curvefs/src/mds/heartbeat/heartbeat_service.cpp index 046c106f7d..e3827390b3 100644 --- a/curvefs/src/mds/heartbeat/heartbeat_service.cpp +++ b/curvefs/src/mds/heartbeat/heartbeat_service.cpp @@ -36,6 +36,7 @@ void HeartbeatServiceImpl::MetaServerHeartbeat( const ::curvefs::mds::heartbeat::MetaServerHeartbeatRequest *request, ::curvefs::mds::heartbeat::MetaServerHeartbeatResponse *response, ::google::protobuf::Closure *done) { + (void)controller; brpc::ClosureGuard doneGuard(done); heartbeatManager_->MetaServerHeartbeat(*request, response); } diff --git a/curvefs/src/mds/mds_service.cpp b/curvefs/src/mds/mds_service.cpp index b9544bb38a..76ea58fd5a 100644 --- a/curvefs/src/mds/mds_service.cpp +++ b/curvefs/src/mds/mds_service.cpp @@ -29,13 +29,13 @@ namespace mds { using mds::Mountpoint; -void MdsServiceImpl::CreateFs(::google::protobuf::RpcController* controller, - const ::curvefs::mds::CreateFsRequest* request, - ::curvefs::mds::CreateFsResponse* response, - ::google::protobuf::Closure* done) { +void MdsServiceImpl::CreateFs(::google::protobuf::RpcController *controller, + const ::curvefs::mds::CreateFsRequest *request, + ::curvefs::mds::CreateFsResponse *response, + ::google::protobuf::Closure *done) { + (void)controller; brpc::ClosureGuard doneGuard(done); - brpc::Controller* cntl = static_cast(controller); - const std::string& fsName = request->fsname(); + const std::string &fsName = request->fsname(); uint64_t blockSize = request->blocksize(); FSType type = request->fstype(); bool enableSumInDir = request->enablesumindir(); @@ -46,59 +46,56 @@ void MdsServiceImpl::CreateFs(::google::protobuf::RpcController* controller, LOG(INFO) << "CreateFs request: " << request->ShortDebugString(); // create volume fs - auto createVolumeFs = - [&]() { - if (!request->fsdetail().has_volume()) { - response->set_statuscode(FSStatusCode::PARAM_ERROR); - LOG(ERROR) - << "CreateFs request, type is volume, but has no volume" - << ", fsName = " << fsName; - return; - } - const auto& volume = request->fsdetail().volume(); - FSStatusCode status = - fsManager_->CreateFs(request, response->mutable_fsinfo()); - - if (status != FSStatusCode::OK) { - response->clear_fsinfo(); - response->set_statuscode(status); - LOG(ERROR) << "CreateFs fail, fsName = " << fsName - << ", blockSize = " << blockSize - << ", volume.volumeName = " << volume.volumename() - << ", enableSumInDir = " << enableSumInDir - << ", owner = " << request->owner() - << ", capacity = " << request->capacity() - << ", errCode = " << FSStatusCode_Name(status); - return; - } - }; + auto createVolumeFs = [&]() { + if (!request->fsdetail().has_volume()) { + response->set_statuscode(FSStatusCode::PARAM_ERROR); + LOG(ERROR) << "CreateFs request, type is volume, but has no volume" + << ", fsName = " << fsName; + return; + } + const auto &volume = request->fsdetail().volume(); + FSStatusCode status = + fsManager_->CreateFs(request, response->mutable_fsinfo()); + + if (status != FSStatusCode::OK) { + response->clear_fsinfo(); + response->set_statuscode(status); + LOG(ERROR) << "CreateFs fail, fsName = " << fsName + << ", blockSize = " << blockSize + << ", volume.volumeName = " << volume.volumename() + << ", enableSumInDir = " << enableSumInDir + << ", owner = " << request->owner() + << ", capacity = " << request->capacity() + << ", errCode = " << FSStatusCode_Name(status); + return; + } + }; // create s3 fs - auto createS3Fs = - [&]() { - if (!request->fsdetail().has_s3info()) { - response->set_statuscode(FSStatusCode::PARAM_ERROR); - LOG(ERROR) << "CreateFs request, type is s3, but has no s3info" - << ", fsName = " << fsName; - return; - } - const auto& s3Info = request->fsdetail().s3info(); - FSStatusCode status = - fsManager_->CreateFs(request, response->mutable_fsinfo()); - - if (status != FSStatusCode::OK) { - response->clear_fsinfo(); - response->set_statuscode(status); - LOG(ERROR) << "CreateFs fail, fsName = " << fsName - << ", blockSize = " << blockSize - << ", s3Info.bucketname = " << s3Info.bucketname() - << ", enableSumInDir = " << enableSumInDir - << ", owner = " << request->owner() - << ", capacity = " << request->capacity() - << ", errCode = " << FSStatusCode_Name(status); - return; - } - }; + auto createS3Fs = [&]() { + if (!request->fsdetail().has_s3info()) { + response->set_statuscode(FSStatusCode::PARAM_ERROR); + LOG(ERROR) << "CreateFs request, type is s3, but has no s3info" + << ", fsName = " << fsName; + return; + } + const auto &s3Info = request->fsdetail().s3info(); + FSStatusCode status = + fsManager_->CreateFs(request, response->mutable_fsinfo()); + + if (status != FSStatusCode::OK) { + response->clear_fsinfo(); + response->set_statuscode(status); + LOG(ERROR) << "CreateFs fail, fsName = " << fsName + << ", blockSize = " << blockSize + << ", s3Info.bucketname = " << s3Info.bucketname() + << ", enableSumInDir = " << enableSumInDir + << ", owner = " << request->owner() + << ", capacity = " << request->capacity() + << ", errCode = " << FSStatusCode_Name(status); + return; + } + }; auto createHybridFs = [&]() { // not support now @@ -118,23 +115,22 @@ void MdsServiceImpl::CreateFs(::google::protobuf::RpcController* controller, }; switch (type) { - case ::curvefs::common::FSType::TYPE_VOLUME: - createVolumeFs(); - break; - case ::curvefs::common::FSType::TYPE_S3: - createS3Fs(); - break; - case ::curvefs::common::FSType::TYPE_HYBRID: - createHybridFs(); - break; - default: - response->set_statuscode(FSStatusCode::PARAM_ERROR); - LOG(ERROR) << "CreateFs fail, fs type is invalid" - << ", fsName = " << fsName - << ", blockSize = " << blockSize << ", fsType = " << type - << ", errCode = " - << FSStatusCode_Name(FSStatusCode::PARAM_ERROR); - break; + case ::curvefs::common::FSType::TYPE_VOLUME: + createVolumeFs(); + break; + case ::curvefs::common::FSType::TYPE_S3: + createS3Fs(); + break; + case ::curvefs::common::FSType::TYPE_HYBRID: + createHybridFs(); + break; + default: + response->set_statuscode(FSStatusCode::PARAM_ERROR); + LOG(ERROR) << "CreateFs fail, fs type is invalid" + << ", fsName = " << fsName << ", blockSize = " << blockSize + << ", fsType = " << type << ", errCode = " + << FSStatusCode_Name(FSStatusCode::PARAM_ERROR); + break; } if (response->statuscode() != FSStatusCode::OK) { @@ -146,14 +142,14 @@ void MdsServiceImpl::CreateFs(::google::protobuf::RpcController* controller, << ", capacity = " << request->capacity(); } -void MdsServiceImpl::MountFs(::google::protobuf::RpcController* controller, - const ::curvefs::mds::MountFsRequest* request, - ::curvefs::mds::MountFsResponse* response, - ::google::protobuf::Closure* done) { +void MdsServiceImpl::MountFs(::google::protobuf::RpcController *controller, + const ::curvefs::mds::MountFsRequest *request, + ::curvefs::mds::MountFsResponse *response, + ::google::protobuf::Closure *done) { + (void)controller; brpc::ClosureGuard doneGuard(done); - brpc::Controller* cntl = static_cast(controller); - const std::string& fsName = request->fsname(); - const Mountpoint& mount = request->mountpoint(); + const std::string &fsName = request->fsname(); + const Mountpoint &mount = request->mountpoint(); LOG(INFO) << "MountFs request, fsName = " << fsName << ", mountPoint = " << mount.ShortDebugString(); FSStatusCode status = @@ -173,14 +169,14 @@ void MdsServiceImpl::MountFs(::google::protobuf::RpcController* controller, << ", mps: " << response->mutable_fsinfo()->mountpoints_size(); } -void MdsServiceImpl::UmountFs(::google::protobuf::RpcController* controller, - const ::curvefs::mds::UmountFsRequest* request, - ::curvefs::mds::UmountFsResponse* response, - ::google::protobuf::Closure* done) { +void MdsServiceImpl::UmountFs(::google::protobuf::RpcController *controller, + const ::curvefs::mds::UmountFsRequest *request, + ::curvefs::mds::UmountFsResponse *response, + ::google::protobuf::Closure *done) { + (void)controller; brpc::ClosureGuard doneGuard(done); - brpc::Controller* cntl = static_cast(controller); - const std::string& fsName = request->fsname(); - const Mountpoint& mount = request->mountpoint(); + const std::string &fsName = request->fsname(); + const Mountpoint &mount = request->mountpoint(); LOG(INFO) << "UmountFs request, " << request->ShortDebugString(); FSStatusCode status = fsManager_->UmountFs(fsName, mount); if (status != FSStatusCode::OK) { @@ -196,16 +192,16 @@ void MdsServiceImpl::UmountFs(::google::protobuf::RpcController* controller, << ", mountPoint = " << mount.ShortDebugString(); } -void MdsServiceImpl::GetFsInfo(::google::protobuf::RpcController* controller, - const ::curvefs::mds::GetFsInfoRequest* request, - ::curvefs::mds::GetFsInfoResponse* response, - ::google::protobuf::Closure* done) { +void MdsServiceImpl::GetFsInfo(::google::protobuf::RpcController *controller, + const ::curvefs::mds::GetFsInfoRequest *request, + ::curvefs::mds::GetFsInfoResponse *response, + ::google::protobuf::Closure *done) { + (void)controller; brpc::ClosureGuard doneGuard(done); - brpc::Controller* cntl = static_cast(controller); LOG(INFO) << "GetFsInfo request: " << request->ShortDebugString(); - FsInfo* fsInfo = response->mutable_fsinfo(); + FsInfo *fsInfo = response->mutable_fsinfo(); FSStatusCode status = FSStatusCode::OK; if (request->has_fsid() && request->has_fsname()) { status = @@ -231,13 +227,13 @@ void MdsServiceImpl::GetFsInfo(::google::protobuf::RpcController* controller, << response->ShortDebugString(); } -void MdsServiceImpl::DeleteFs(::google::protobuf::RpcController* controller, - const ::curvefs::mds::DeleteFsRequest* request, - ::curvefs::mds::DeleteFsResponse* response, - ::google::protobuf::Closure* done) { +void MdsServiceImpl::DeleteFs(::google::protobuf::RpcController *controller, + const ::curvefs::mds::DeleteFsRequest *request, + ::curvefs::mds::DeleteFsResponse *response, + ::google::protobuf::Closure *done) { + (void)controller; brpc::ClosureGuard doneGuard(done); - brpc::Controller* cntl = static_cast(controller); - const std::string& fsName = request->fsname(); + const std::string &fsName = request->fsname(); LOG(INFO) << "DeleteFs request, fsName = " << fsName; FSStatusCode status = fsManager_->DeleteFs(fsName); response->set_statuscode(status); @@ -251,10 +247,12 @@ void MdsServiceImpl::DeleteFs(::google::protobuf::RpcController* controller, } void MdsServiceImpl::AllocateS3Chunk( - ::google::protobuf::RpcController* controller, - const ::curvefs::mds::AllocateS3ChunkRequest* request, - ::curvefs::mds::AllocateS3ChunkResponse* response, - ::google::protobuf::Closure* done) { + ::google::protobuf::RpcController *controller, + const ::curvefs::mds::AllocateS3ChunkRequest *request, + ::curvefs::mds::AllocateS3ChunkResponse *response, + ::google::protobuf::Closure *done) { + (void)controller; + brpc::ClosureGuard guard(done); VLOG(9) << "start to allocate chunkId."; @@ -286,10 +284,13 @@ void MdsServiceImpl::AllocateS3Chunk( } void MdsServiceImpl::ListClusterFsInfo( - ::google::protobuf::RpcController* controller, - const ::curvefs::mds::ListClusterFsInfoRequest* request, - ::curvefs::mds::ListClusterFsInfoResponse* response, - ::google::protobuf::Closure* done) { + ::google::protobuf::RpcController *controller, + const ::curvefs::mds::ListClusterFsInfoRequest *request, + ::curvefs::mds::ListClusterFsInfoResponse *response, + ::google::protobuf::Closure *done) { + (void)controller; + (void)request; + brpc::ClosureGuard guard(done); LOG(INFO) << "start to check cluster fs info."; fsManager_->GetAllFsInfo(response->mutable_fsinfo()); @@ -302,26 +303,28 @@ void MdsServiceImpl::RefreshSession( const ::curvefs::mds::RefreshSessionRequest *request, ::curvefs::mds::RefreshSessionResponse *response, ::google::protobuf::Closure *done) { + (void)controller; brpc::ClosureGuard guard(done); fsManager_->RefreshSession(request, response); response->set_statuscode(FSStatusCode::OK); } void MdsServiceImpl::GetLatestTxId( - ::google::protobuf::RpcController* controller, - const GetLatestTxIdRequest* request, - GetLatestTxIdResponse* response, - ::google::protobuf::Closure* done) { + ::google::protobuf::RpcController *controller, + const GetLatestTxIdRequest *request, GetLatestTxIdResponse *response, + ::google::protobuf::Closure *done) { + (void)controller; brpc::ClosureGuard guard(done); VLOG(3) << "GetLatestTxId [request]: " << request->DebugString(); fsManager_->GetLatestTxId(request, response); VLOG(3) << "GetLatestTxId [response]: " << response->DebugString(); } -void MdsServiceImpl::CommitTx(::google::protobuf::RpcController* controller, - const CommitTxRequest* request, - CommitTxResponse* response, - ::google::protobuf::Closure* done) { +void MdsServiceImpl::CommitTx(::google::protobuf::RpcController *controller, + const CommitTxRequest *request, + CommitTxResponse *response, + ::google::protobuf::Closure *done) { + (void)controller; brpc::ClosureGuard guard(done); VLOG(3) << "CommitTx [request]: " << request->DebugString(); fsManager_->CommitTx(request, response); diff --git a/curvefs/src/mds/schedule/leaderScheduler.cpp b/curvefs/src/mds/schedule/leaderScheduler.cpp index e3b45bb8e3..ab24032949 100644 --- a/curvefs/src/mds/schedule/leaderScheduler.cpp +++ b/curvefs/src/mds/schedule/leaderScheduler.cpp @@ -201,6 +201,7 @@ bool LeaderScheduler::TransferLeaderOut(MetaServerIdType source, uint16_t replicaNum, PoolIdType poolId, Operator *op, CopySetInfo *selectedCopySet) { + (void)poolId; // find all copyset with source metaserver as its leader as the candidate std::vector candidateInfos; for (auto &cInfo : topo_->GetCopySetInfosInMetaServer(source)) { diff --git a/curvefs/src/mds/schedule/recoverScheduler.cpp b/curvefs/src/mds/schedule/recoverScheduler.cpp index f3007c9e04..be4bc16d7c 100644 --- a/curvefs/src/mds/schedule/recoverScheduler.cpp +++ b/curvefs/src/mds/schedule/recoverScheduler.cpp @@ -71,7 +71,7 @@ int RecoverScheduler::Schedule() { // alarm if over half of the replicas are offline int deadBound = copysetInfo.peers.size() - (copysetInfo.peers.size() / 2 + 1); - if (offlinelists.size() > deadBound) { + if (static_cast(offlinelists.size()) > deadBound) { LOG(ERROR) << "recoverSchdeuler find " << copysetInfo.CopySetInfoStr() << " has " << offlinelists.size() diff --git a/curvefs/src/mds/topology/topology.cpp b/curvefs/src/mds/topology/topology.cpp index a26c85fc3b..9e3acbd181 100644 --- a/curvefs/src/mds/topology/topology.cpp +++ b/curvefs/src/mds/topology/topology.cpp @@ -300,8 +300,9 @@ TopoStatusCode TopologyImpl::UpdateServer(const Server &data) { } } -TopoStatusCode TopologyImpl::UpdateMetaServerOnlineState( - const OnlineState &onlineState, MetaServerIdType id) { +TopoStatusCode +TopologyImpl::UpdateMetaServerOnlineState(const OnlineState &onlineState, + MetaServerIdType id) { ReadLockGuard rlockMetaServerMap(metaServerMutex_); auto it = metaServerMap_.find(id); if (it != metaServerMap_.end()) { @@ -454,8 +455,8 @@ ZoneIdType TopologyImpl::FindZone(const std::string &zoneName, return static_cast(UNINITIALIZE_ID); } -ServerIdType TopologyImpl::FindServerByHostName( - const std::string &hostName) const { +ServerIdType +TopologyImpl::FindServerByHostName(const std::string &hostName) const { ReadLockGuard rlockServer(serverMutex_); for (auto it = serverMap_.begin(); it != serverMap_.end(); it++) { if (it->second.GetHostName() == hostName) { @@ -616,8 +617,9 @@ TopoStatusCode TopologyImpl::UpdatePartition(const Partition &data) { } } -TopoStatusCode TopologyImpl::UpdatePartitionStatistic( - uint32_t partitionId, PartitionStatistic statistic) { +TopoStatusCode +TopologyImpl::UpdatePartitionStatistic(uint32_t partitionId, + PartitionStatistic statistic) { WriteLockGuard wlockPartition(partitionMutex_); auto it = partitionMap_.find(partitionId); if (it != partitionMap_.end()) { @@ -686,8 +688,8 @@ std::list TopologyImpl::GetAvailableCopysetKeyList() const { ReadLockGuard rlockCopySet(copySetMutex_); std::list result; for (auto const &it : copySetMap_) { - if (it.second.GetPartitionNum() - >= option_.maxPartitionNumberInCopyset) { + if (it.second.GetPartitionNum() >= + option_.maxPartitionNumberInCopyset) { continue; } result.push_back(it.first); @@ -700,8 +702,8 @@ std::vector TopologyImpl::GetAvailableCopysetList() const { ReadLockGuard rlockCopySet(copySetMutex_); std::vector result; for (auto const &it : copySetMap_) { - if (it.second.GetPartitionNum() - >= option_.maxPartitionNumberInCopyset) { + if (it.second.GetPartitionNum() >= + option_.maxPartitionNumberInCopyset) { continue; } result.push_back(it.second); @@ -740,8 +742,8 @@ int TopologyImpl::GetAvailableCopysetNum() const { ReadLockGuard rlockCopySet(copySetMutex_); int num = 0; for (auto const &it : copySetMap_) { - if (it.second.GetPartitionNum() - >= option_.maxPartitionNumberInCopyset) { + if (it.second.GetPartitionNum() >= + option_.maxPartitionNumberInCopyset) { continue; } num++; @@ -749,8 +751,8 @@ int TopologyImpl::GetAvailableCopysetNum() const { return num; } -std::list TopologyImpl::GetPartitionOfFs( - FsIdType id, PartitionFilter filter) const { +std::list +TopologyImpl::GetPartitionOfFs(FsIdType id, PartitionFilter filter) const { std::list ret; ReadLockGuard rlockPartitionMap(partitionMutex_); for (auto it = partitionMap_.begin(); it != partitionMap_.end(); it++) { @@ -761,8 +763,9 @@ std::list TopologyImpl::GetPartitionOfFs( return ret; } -std::list TopologyImpl::GetPartitionInfosInPool( - PoolIdType poolId, PartitionFilter filter) const { +std::list +TopologyImpl::GetPartitionInfosInPool(PoolIdType poolId, + PartitionFilter filter) const { std::list ret; ReadLockGuard rlockPartitionMap(partitionMutex_); for (auto it = partitionMap_.begin(); it != partitionMap_.end(); it++) { @@ -773,8 +776,8 @@ std::list TopologyImpl::GetPartitionInfosInPool( return ret; } -std::list TopologyImpl::GetPartitionInfosInCopyset( - CopySetIdType copysetId) const { +std::list +TopologyImpl::GetPartitionInfosInCopyset(CopySetIdType copysetId) const { std::list ret; ReadLockGuard rlockPartitionMap(partitionMutex_); for (auto it = partitionMap_.begin(); it != partitionMap_.end(); it++) { @@ -786,8 +789,8 @@ std::list TopologyImpl::GetPartitionInfosInCopyset( } // getList -std::vector TopologyImpl::GetMetaServerInCluster( - MetaServerFilter filter) const { +std::vector +TopologyImpl::GetMetaServerInCluster(MetaServerFilter filter) const { std::vector ret; ReadLockGuard rlockMetaServerMap(metaServerMutex_); for (auto it = metaServerMap_.begin(); it != metaServerMap_.end(); it++) { @@ -799,8 +802,8 @@ std::vector TopologyImpl::GetMetaServerInCluster( return ret; } -std::vector TopologyImpl::GetServerInCluster( - ServerFilter filter) const { +std::vector +TopologyImpl::GetServerInCluster(ServerFilter filter) const { std::vector ret; ReadLockGuard rlockServer(serverMutex_); for (auto it = serverMap_.begin(); it != serverMap_.end(); it++) { @@ -811,8 +814,8 @@ std::vector TopologyImpl::GetServerInCluster( return ret; } -std::vector TopologyImpl::GetZoneInCluster( - ZoneFilter filter) const { +std::vector +TopologyImpl::GetZoneInCluster(ZoneFilter filter) const { std::vector ret; ReadLockGuard rlockZone(zoneMutex_); for (auto it = zoneMap_.begin(); it != zoneMap_.end(); it++) { @@ -823,8 +826,8 @@ std::vector TopologyImpl::GetZoneInCluster( return ret; } -std::vector TopologyImpl::GetPoolInCluster( - PoolFilter filter) const { +std::vector +TopologyImpl::GetPoolInCluster(PoolFilter filter) const { std::vector ret; ReadLockGuard rlockPool(poolMutex_); for (auto it = poolMap_.begin(); it != poolMap_.end(); it++) { @@ -835,8 +838,9 @@ std::vector TopologyImpl::GetPoolInCluster( return ret; } -std::list TopologyImpl::GetMetaServerInServer( - ServerIdType id, MetaServerFilter filter) const { +std::list +TopologyImpl::GetMetaServerInServer(ServerIdType id, + MetaServerFilter filter) const { std::list ret; ReadLockGuard rlockMetaServerMap(metaServerMutex_); for (auto it = metaServerMap_.begin(); it != metaServerMap_.end(); it++) { @@ -848,8 +852,9 @@ std::list TopologyImpl::GetMetaServerInServer( return ret; } -std::list TopologyImpl::GetMetaServerInZone( - ZoneIdType id, MetaServerFilter filter) const { +std::list +TopologyImpl::GetMetaServerInZone(ZoneIdType id, + MetaServerFilter filter) const { std::list ret; std::list serverList = GetServerInZone(id); for (ServerIdType s : serverList) { @@ -859,8 +864,9 @@ std::list TopologyImpl::GetMetaServerInZone( return ret; } -std::list TopologyImpl::GetMetaServerInPool( - PoolIdType id, MetaServerFilter filter) const { +std::list +TopologyImpl::GetMetaServerInPool(PoolIdType id, + MetaServerFilter filter) const { std::list ret; std::list zoneList = GetZoneInPool(id); for (ZoneIdType z : zoneList) { @@ -870,13 +876,13 @@ std::list TopologyImpl::GetMetaServerInPool( return ret; } -uint32_t TopologyImpl::GetMetaServerNumInPool( - PoolIdType id, MetaServerFilter filter) const { +uint32_t TopologyImpl::GetMetaServerNumInPool(PoolIdType id, + MetaServerFilter filter) const { return GetMetaServerInPool(id, filter).size(); } -std::list TopologyImpl::GetServerInZone( - ZoneIdType id, ServerFilter filter) const { +std::list +TopologyImpl::GetServerInZone(ZoneIdType id, ServerFilter filter) const { std::list ret; ReadLockGuard rlockServer(serverMutex_); for (auto it = serverMap_.begin(); it != serverMap_.end(); it++) { @@ -899,8 +905,8 @@ std::list TopologyImpl::GetZoneInPool(PoolIdType id, return ret; } -std::vector TopologyImpl::GetCopySetsInPool( - PoolIdType poolId, CopySetFilter filter) const { +std::vector +TopologyImpl::GetCopySetsInPool(PoolIdType poolId, CopySetFilter filter) const { std::vector ret; ReadLockGuard rlockCopySet(copySetMutex_); for (const auto &it : copySetMap_) { @@ -911,14 +917,14 @@ std::vector TopologyImpl::GetCopySetsInPool( return ret; } -uint32_t TopologyImpl::GetCopySetNumInPool( - PoolIdType poolId, CopySetFilter filter) const { +uint32_t TopologyImpl::GetCopySetNumInPool(PoolIdType poolId, + CopySetFilter filter) const { return GetCopySetsInPool(poolId, filter).size(); } -std::vector TopologyImpl::GetCopySetsInCluster( - CopySetFilter filter) const { +std::vector +TopologyImpl::GetCopySetsInCluster(CopySetFilter filter) const { std::vector ret; ReadLockGuard rlockCopySet(copySetMutex_); for (const auto &it : copySetMap_) { @@ -929,8 +935,9 @@ std::vector TopologyImpl::GetCopySetsInCluster( return ret; } -std::vector TopologyImpl::GetCopySetInfosInPool( - PoolIdType poolId, CopySetFilter filter) const { +std::vector +TopologyImpl::GetCopySetInfosInPool(PoolIdType poolId, + CopySetFilter filter) const { std::vector ret; ReadLockGuard rlockCopySet(copySetMutex_); for (const auto &it : copySetMap_) { @@ -941,8 +948,9 @@ std::vector TopologyImpl::GetCopySetInfosInPool( return ret; } -std::vector TopologyImpl::GetCopySetsInMetaServer( - MetaServerIdType id, CopySetFilter filter) const { +std::vector +TopologyImpl::GetCopySetsInMetaServer(MetaServerIdType id, + CopySetFilter filter) const { std::vector ret; ReadLockGuard rlockCopySet(copySetMutex_); for (const auto &it : copySetMap_) { @@ -1052,8 +1060,9 @@ TopoStatusCode TopologyImpl::Init(const TopologyOption &option) { } // for upgrade and keep compatibility - // the old version have no partitionIndex in etcd, so need update here of upgrade // NOLINT - // if the fs in old cluster already delete some partitions, it is incompatible. // NOLINT + // the old version have no partitionIndex in etcd, so need update here of + // upgrade // NOLINT if the fs in old cluster already delete some + // partitions, it is incompatible. // NOLINT if (!RefreshPartitionIndexOfFS(partitionMap_)) { LOG(ERROR) << "[TopologyImpl::init], RefreshPartitionIndexOfFS fail."; return TopoStatusCode::TOPO_STORGE_FAIL; @@ -1278,8 +1287,8 @@ bool TopologyImpl::GetClusterInfo(ClusterInformation *info) { } // update partition tx, and ensure atomicity -TopoStatusCode TopologyImpl::UpdatePartitionTxIds( - std::vector txIds) { +TopoStatusCode +TopologyImpl::UpdatePartitionTxIds(std::vector txIds) { std::vector partitions; WriteLockGuard wlockPartition(partitionMutex_); for (auto item : txIds) { @@ -1380,20 +1389,20 @@ uint32_t TopologyImpl::GetLeaderNumInMetaserver(MetaServerIdType id) const { } void TopologyImpl::GetAvailableMetaserversUnlock( - std::vector* vec) { + std::vector *vec) { for (const auto &it : metaServerMap_) { - if (it.second.GetOnlineState() == OnlineState::ONLINE - && it.second.GetMetaServerSpace().IsMetaserverResourceAvailable() - && GetCopysetNumInMetaserver(it.first) - < option_.maxCopysetNumInMetaserver) { + if (it.second.GetOnlineState() == OnlineState::ONLINE && + it.second.GetMetaServerSpace().IsMetaserverResourceAvailable() && + GetCopysetNumInMetaserver(it.first) < + option_.maxCopysetNumInMetaserver) { vec->emplace_back(&(it.second)); } } } TopoStatusCode TopologyImpl::GenCandidateMapUnlock( - PoolIdType poolId, - std::map>* candidateMap) { + PoolIdType poolId, + std::map> *candidateMap) { // 1. get all online and available metaserver std::vector metaservers; GetAvailableMetaserversUnlock(&metaservers); @@ -1402,7 +1411,7 @@ TopoStatusCode TopologyImpl::GenCandidateMapUnlock( Server server; if (!GetServer(serverId, &server)) { LOG(ERROR) << "get server failed when choose metaservers," - << " the serverId = " << serverId; + << " the serverId = " << serverId; return TopoStatusCode::TOPO_SERVER_NOT_FOUND; } @@ -1418,8 +1427,8 @@ TopoStatusCode TopologyImpl::GenCandidateMapUnlock( } TopoStatusCode TopologyImpl::GenCopysetAddrBatchForPool( - PoolIdType poolId, uint16_t replicaNum, - std::list* copysetList) { + PoolIdType poolId, uint16_t replicaNum, + std::list *copysetList) { // 1. genarate candidateMap std::map> candidateMap; auto ret = GenCandidateMapUnlock(poolId, &candidateMap); @@ -1432,8 +1441,9 @@ TopoStatusCode TopologyImpl::GenCopysetAddrBatchForPool( // 2. return error if candidate map has no enough replicaNum if (candidateMap.size() < replicaNum) { LOG(WARNING) << "can not find available metaserver for copyset, " - << "poolId = " << poolId << " need replica num = " - << replicaNum << ", but only has available zone num = " + << "poolId = " << poolId + << " need replica num = " << replicaNum + << ", but only has available zone num = " << candidateMap.size(); return TopoStatusCode::TOPO_METASERVER_NOT_FOUND; } @@ -1461,13 +1471,13 @@ TopoStatusCode TopologyImpl::GenCopysetAddrBatchForPool( std::shuffle(zoneIds.begin(), zoneIds.end(), randomGenerator); std::vector msIds; - for (int i = 0; i < minSize; i++) { + for (uint32_t i = 0; i < minSize; i++) { for (const auto &zoneId : zoneIds) { msIds.push_back(candidateMap[zoneId][i]); } } - for (int i = 0; i < msIds.size() / replicaNum; i++) { + for (size_t i = 0; i < msIds.size() / replicaNum; i++) { CopysetCreateInfo copysetInfo; copysetInfo.poolId = poolId; copysetInfo.copysetId = UNINITIALIZE_ID; @@ -1485,7 +1495,7 @@ TopoStatusCode TopologyImpl::GenCopysetAddrBatchForPool( // Check if there is no copy on the pool. // Generate copyset on the empty copyset pools. void TopologyImpl::GenCopysetIfPoolEmptyUnlocked( - std::list* copysetList) { + std::list *copysetList) { for (const auto &it : poolMap_) { PoolIdType poolId = it.first; uint32_t metaserverNum = GetMetaServerNumInPool(poolId); @@ -1494,7 +1504,7 @@ void TopologyImpl::GenCopysetIfPoolEmptyUnlocked( } uint32_t copysetNum = GetCopySetNumInPool(poolId); - if (copysetNum !=0) { + if (copysetNum != 0) { continue; } @@ -1505,17 +1515,16 @@ void TopologyImpl::GenCopysetIfPoolEmptyUnlocked( continue; } std::list tempCopysetList; - TopoStatusCode ret = GenCopysetAddrBatchForPool(poolId, replicaNum, - &tempCopysetList); + TopoStatusCode ret = + GenCopysetAddrBatchForPool(poolId, replicaNum, &tempCopysetList); if (TopoStatusCode::TOPO_OK == ret) { LOG(INFO) << "Initial Generate copyset addr for pool " << poolId << " success, gen copyset num = " << tempCopysetList.size(); copysetList->splice(copysetList->end(), tempCopysetList); } else { - LOG(WARNING) << "Initial Generate copyset addr for pool " - << poolId << " fail, statusCode = " - << TopoStatusCode_Name(ret); + LOG(WARNING) << "Initial Generate copyset addr for pool " << poolId + << " fail, statusCode = " << TopoStatusCode_Name(ret); } } @@ -1529,10 +1538,10 @@ void TopologyImpl::GenCopysetIfPoolEmptyUnlocked( // 3. according to the pool order of step 2, generate copyset add in the pool // in turn until enough copyset add is generated TopoStatusCode TopologyImpl::GenSubsequentCopysetAddrBatchUnlocked( - uint32_t needCreateNum, std::list* copysetList) { + uint32_t needCreateNum, std::list *copysetList) { LOG(INFO) << "GenSubsequentCopysetAddrBatch needCreateNum = " - << needCreateNum << ", copysetList size = " - << copysetList->size() << " begin"; + << needCreateNum << ", copysetList size = " << copysetList->size() + << " begin"; MetaServerFilter filter = [](const MetaServer &ms) { return ms.GetOnlineState() == OnlineState::ONLINE; @@ -1546,8 +1555,8 @@ TopoStatusCode TopologyImpl::GenSubsequentCopysetAddrBatchUnlocked( } // sort pool list by copyset average num - std::sort(poolList.begin(), poolList.end(), - [=](const Pool& a, const Pool& b) { + std::sort( + poolList.begin(), poolList.end(), [=](const Pool &a, const Pool &b) { PoolIdType poolId1 = a.GetId(); PoolIdType poolId2 = b.GetId(); uint32_t copysetNum1 = GetCopySetNumInPool(poolId1); @@ -1564,8 +1573,8 @@ TopoStatusCode TopologyImpl::GenSubsequentCopysetAddrBatchUnlocked( PoolIdType poolId = it->GetId(); uint16_t replicaNum = it->GetReplicaNum(); std::list tempCopysetList; - TopoStatusCode ret = GenCopysetAddrBatchForPool(poolId, - replicaNum, &tempCopysetList); + TopoStatusCode ret = GenCopysetAddrBatchForPool(poolId, replicaNum, + &tempCopysetList); if (TopoStatusCode::TOPO_OK == ret) { copysetList->splice(copysetList->end(), tempCopysetList); if (copysetList->size() >= needCreateNum) { @@ -1573,9 +1582,10 @@ TopoStatusCode TopologyImpl::GenSubsequentCopysetAddrBatchUnlocked( } it++; } else { - LOG(WARNING) << "Generate " << needCreateNum - << " copyset addr for pool " << poolId - << "fail, statusCode = " << TopoStatusCode_Name(ret); + LOG(WARNING) + << "Generate " << needCreateNum << " copyset addr for pool " + << poolId + << "fail, statusCode = " << TopoStatusCode_Name(ret); it = poolList.erase(it); } } @@ -1596,8 +1606,9 @@ TopoStatusCode TopologyImpl::GenSubsequentCopysetAddrBatchUnlocked( // in this step is enough, return the list. // 2. Sort the pools according to the average number of copies, // and traverse each pool to create copies until the number is sufficient. -TopoStatusCode TopologyImpl::GenCopysetAddrBatch(uint32_t needCreateNum, - std::list* copysetList) { +TopoStatusCode +TopologyImpl::GenCopysetAddrBatch(uint32_t needCreateNum, + std::list *copysetList) { ReadLockGuard rlockPool(poolMutex_); ReadLockGuard rlockMetaserver(metaServerMutex_); ReadLockGuard rlockCopyset(copySetMutex_); @@ -1680,14 +1691,14 @@ bool TopologyImpl::RefreshPartitionIndexOfFS( std::list TopologyImpl::ListMemcacheServers() const { ReadLockGuard rlockMemcacheCluster(memcacheClusterMutex_); std::list ret; - for (auto const& cluster : memcacheClusterMap_) { - auto const& servers = cluster.second.GetServers(); + for (auto const &cluster : memcacheClusterMap_) { + auto const &servers = cluster.second.GetServers(); ret.insert(ret.begin(), servers.cbegin(), servers.cend()); } return ret; } -TopoStatusCode TopologyImpl::AddMemcacheCluster(const MemcacheCluster& data) { +TopoStatusCode TopologyImpl::AddMemcacheCluster(const MemcacheCluster &data) { WriteLockGuard wlockMemcacheCluster(memcacheClusterMutex_); // storage_ to storage TopoStatusCode ret = TopoStatusCode::TOPO_OK; @@ -1700,7 +1711,7 @@ TopoStatusCode TopologyImpl::AddMemcacheCluster(const MemcacheCluster& data) { return ret; } -TopoStatusCode TopologyImpl::AddMemcacheCluster(MemcacheCluster&& data) { +TopoStatusCode TopologyImpl::AddMemcacheCluster(MemcacheCluster &&data) { WriteLockGuard wlockMemcacheCluster(memcacheClusterMutex_); // storage_ to storage TopoStatusCode ret = TopoStatusCode::TOPO_OK; @@ -1716,14 +1727,15 @@ TopoStatusCode TopologyImpl::AddMemcacheCluster(MemcacheCluster&& data) { std::list TopologyImpl::ListMemcacheClusters() const { std::list ret; ReadLockGuard rlockMemcacheCluster(memcacheClusterMutex_); - for (auto const& cluster : memcacheClusterMap_) { + for (auto const &cluster : memcacheClusterMap_) { ret.emplace_back(cluster.second); } return ret; } -TopoStatusCode TopologyImpl::AllocOrGetMemcacheCluster( - FsIdType fsId, MemcacheClusterInfo* cluster) { +TopoStatusCode +TopologyImpl::AllocOrGetMemcacheCluster(FsIdType fsId, + MemcacheClusterInfo *cluster) { TopoStatusCode ret = TopoStatusCode::TOPO_OK; WriteLockGuard wlockFs2MemcacheCluster(fs2MemcacheClusterMutex_); ReadLockGuard rlockMemcacheCluster(memcacheClusterMutex_); diff --git a/curvefs/src/mds/topology/topology_item.cpp b/curvefs/src/mds/topology/topology_item.cpp index e62466b32b..b1774b701a 100644 --- a/curvefs/src/mds/topology/topology_item.cpp +++ b/curvefs/src/mds/topology/topology_item.cpp @@ -24,6 +24,7 @@ #include #include +#include #include "json/json.h" #include "src/common/string_util.h" @@ -53,11 +54,15 @@ bool ClusterInformation::ParseFromString(const std::string &value) { bool Pool::TransRedundanceAndPlaceMentPolicyFromJsonStr( const std::string &jsonStr, RedundanceAndPlaceMentPolicy *rap) { - Json::Reader reader; + Json::CharReaderBuilder builder; + std::unique_ptr reader(builder.newCharReader()); Json::Value rapJson; - if (!reader.parse(jsonStr, rapJson)) { + JSONCPP_STRING errormsg; + if (!reader->parse(jsonStr.data(), jsonStr.data() + jsonStr.length(), + &rapJson, &errormsg)) { return false; } + if (!rapJson["replicaNum"].isNull()) { rap->replicaNum = rapJson["replicaNum"].asInt(); } else { @@ -204,13 +209,17 @@ std::string CopySetInfo::GetCopySetMembersStr() const { } bool CopySetInfo::SetCopySetMembersByJson(const std::string &jsonStr) { - Json::Reader reader; + Json::CharReaderBuilder builder; + std::unique_ptr reader(builder.newCharReader()); Json::Value copysetMemJson; - if (!reader.parse(jsonStr, copysetMemJson)) { + JSONCPP_STRING errormsg; + if (!reader->parse(jsonStr.data(), jsonStr.data() + jsonStr.length(), + ©setMemJson, &errormsg)) { return false; } + peers_.clear(); - for (int i = 0; i < copysetMemJson.size(); i++) { + for (uint32_t i = 0; i < copysetMemJson.size(); i++) { if (copysetMemJson[i].isInt()) { peers_.insert(copysetMemJson[i].asInt()); } else { @@ -299,14 +308,14 @@ common::PartitionInfo Partition::ToPartitionInfo() { return info; } -bool MemcacheCluster::ParseFromString(const std::string& value) { +bool MemcacheCluster::ParseFromString(const std::string &value) { MemcacheClusterInfo data; bool ret = data.ParseFromString(value); (*this) = static_cast(data); return ret; } -bool MemcacheCluster::SerializeToString(std::string* value) const { +bool MemcacheCluster::SerializeToString(std::string *value) const { return static_cast(*this).SerializeToString(value); } diff --git a/curvefs/src/mds/topology/topology_manager.cpp b/curvefs/src/mds/topology/topology_manager.cpp index 00448ffd02..c546857bbc 100644 --- a/curvefs/src/mds/topology/topology_manager.cpp +++ b/curvefs/src/mds/topology/topology_manager.cpp @@ -76,13 +76,13 @@ void TopologyManager::RegistMetaServer(const MetaServerRegistRequest *request, response->set_metaserverid(ms.GetId()); response->set_token(ms.GetToken()); LOG(WARNING) << "Received duplicated registMetaServer message, " - << "metaserver is empty, hostip = " - << hostIp << ", port = " << port; + << "metaserver is empty, hostip = " << hostIp + << ", port = " << port; } else { response->set_statuscode(TopoStatusCode::TOPO_METASERVER_EXIST); LOG(ERROR) << "Received duplicated registMetaServer message, " - << "metaserver is not empty, hostip = " - << hostIp << ", port = " << port; + << "metaserver is not empty, hostip = " << hostIp + << ", port = " << port; } return; @@ -577,6 +577,7 @@ void TopologyManager::GetPool(const GetPoolRequest *request, void TopologyManager::ListPool(const ListPoolRequest *request, ListPoolResponse *response) { + (void)request; response->set_statuscode(TopoStatusCode::TOPO_OK); auto poolList = topology_->GetPoolInCluster(); for (PoolIdType id : poolList) { @@ -598,15 +599,17 @@ void TopologyManager::ListPool(const ListPoolRequest *request, } } -TopoStatusCode TopologyManager::CreatePartitionsAndGetMinPartition( - FsIdType fsId, PartitionInfo *partition) { +TopoStatusCode +TopologyManager::CreatePartitionsAndGetMinPartition(FsIdType fsId, + PartitionInfo *partition) { CreatePartitionRequest request; CreatePartitionResponse response; request.set_fsid(fsId); request.set_count(option_.createPartitionNumber); CreatePartitions(&request, &response); if (TopoStatusCode::TOPO_OK != response.statuscode() || - response.partitioninfolist_size() != request.count()) { + response.partitioninfolist_size() != + static_cast(request.count())) { return TopoStatusCode::TOPO_CREATE_PARTITION_FAIL; } // return the min one @@ -629,9 +632,8 @@ TopoStatusCode TopologyManager::CreatePartitionsAndGetMinPartition( return TopoStatusCode::TOPO_OK; } -TopoStatusCode TopologyManager::CreatePartitionOnCopyset(FsIdType fsId, - const CopySetInfo& copyset, - PartitionInfo *info) { +TopoStatusCode TopologyManager::CreatePartitionOnCopyset( + FsIdType fsId, const CopySetInfo ©set, PartitionInfo *info) { // get copyset members std::set copysetMembers = copyset.GetCopySetMembers(); std::set copysetMemberAddr; @@ -639,7 +641,7 @@ TopoStatusCode TopologyManager::CreatePartitionOnCopyset(FsIdType fsId, MetaServer metaserver; if (topology_->GetMetaServer(item, &metaserver)) { std::string addr = metaserver.GetInternalIp() + ":" + - std::to_string(metaserver.GetInternalPort()); + std::to_string(metaserver.GetInternalPort()); copysetMemberAddr.emplace(addr); } else { LOG(WARNING) << "Get metaserver info failed."; @@ -662,24 +664,23 @@ TopoStatusCode TopologyManager::CreatePartitionOnCopyset(FsIdType fsId, << ", " << copysetId << "), partitionId = " << partitionId << ", start = " << idStart << ", end = " << idEnd; - FSStatusCode retcode = metaserverClient_->CreatePartition( - fsId, poolId, copysetId, partitionId, idStart, idEnd, - copysetMemberAddr); + FSStatusCode retcode = + metaserverClient_->CreatePartition(fsId, poolId, copysetId, partitionId, + idStart, idEnd, copysetMemberAddr); if (FSStatusCode::OK != retcode) { LOG(ERROR) << "CreatePartition failed, " - << "fsId = " << fsId << ", poolId = " << poolId - << ", copysetId = " << copysetId - << ", partitionId = " << partitionId; + << "fsId = " << fsId << ", poolId = " << poolId + << ", copysetId = " << copysetId + << ", partitionId = " << partitionId; return TopoStatusCode::TOPO_CREATE_PARTITION_FAIL; } - Partition partition(fsId, poolId, copysetId, partitionId, idStart, - idEnd); + Partition partition(fsId, poolId, copysetId, partitionId, idStart, idEnd); TopoStatusCode ret = topology_->AddPartition(partition); if (TopoStatusCode::TOPO_OK != ret) { // TODO(wanghai): delete partition on metaserver LOG(ERROR) << "Add partition failed after create partition." - << " error code = " << ret; + << " error code = " << ret; return ret; } @@ -705,7 +706,7 @@ void TopologyManager::CreatePartitions(const CreatePartitionRequest *request, // get lock and avoid multiMountpoint create concurrently NameLockGuard lock(createPartitionMutex_, std::to_string(fsId)); - while (partitionInfoList->size() < count) { + while (partitionInfoList->size() < static_cast(count)) { int32_t createNum = count - topology_->GetAvailableCopysetNum(); // if available copyset is not enough, create copyset first if (createNum > 0) { @@ -718,28 +719,28 @@ void TopologyManager::CreatePartitions(const CreatePartitionRequest *request, } std::vector copysetVec = - topology_->GetAvailableCopysetList(); + topology_->GetAvailableCopysetList(); if (copysetVec.size() == 0) { LOG(ERROR) << "Get available copyset fail when create partition."; response->set_statuscode( - TopoStatusCode::TOPO_GET_AVAILABLE_COPYSET_ERROR); + TopoStatusCode::TOPO_GET_AVAILABLE_COPYSET_ERROR); return; } // sort copysetVec by partition num desent std::sort(copysetVec.begin(), copysetVec.end(), - [](const CopySetInfo& a, const CopySetInfo& b) { - return a.GetPartitionNum() < b.GetPartitionNum(); - }); + [](const CopySetInfo &a, const CopySetInfo &b) { + return a.GetPartitionNum() < b.GetPartitionNum(); + }); uint32_t copysetNum = copysetVec.size(); - int32_t tempCount = std::min(copysetNum, - count - partitionInfoList->size()); + int32_t tempCount = + std::min(copysetNum, count - partitionInfoList->size()); for (int i = 0; i < tempCount; i++) { PartitionInfo *info = partitionInfoList->Add(); - TopoStatusCode ret = CreatePartitionOnCopyset(fsId, - copysetVec[i], info); + TopoStatusCode ret = + CreatePartitionOnCopyset(fsId, copysetVec[i], info); if (ret != TopoStatusCode::TOPO_OK) { LOG(ERROR) << "create partition on copyset fail, fsId = " << fsId << ", poolId = " << copysetVec[i].GetPoolId() @@ -764,9 +765,8 @@ TopoStatusCode TopologyManager::DeletePartition(uint32_t partitionId) { return TopoStatusCode::TOPO_OK; } -void TopologyManager::DeletePartition( - const DeletePartitionRequest *request, - DeletePartitionResponse *response) { +void TopologyManager::DeletePartition(const DeletePartitionRequest *request, + DeletePartitionResponse *response) { uint32_t partitionId = request->partitionid(); Partition partition; if (!topology_->GetPartition(partitionId, &partition)) { @@ -795,15 +795,14 @@ void TopologyManager::DeletePartition( return; } - auto fret = metaserverClient_->DeletePartition(poolId, copysetId, - partitionId, copysetMemberAddr); + auto fret = metaserverClient_->DeletePartition( + poolId, copysetId, partitionId, copysetMemberAddr); if (fret == FSStatusCode::OK || fret == FSStatusCode::UNDER_DELETING) { - ret = topology_->UpdatePartitionStatus( - partitionId, PartitionStatus::DELETING); + ret = topology_->UpdatePartitionStatus(partitionId, + PartitionStatus::DELETING); if (ret != TopoStatusCode::TOPO_OK) { LOG(ERROR) << "DeletePartition failed, partitionId = " - << partitionId << ", ret = " - << TopoStatusCode_Name(ret); + << partitionId << ", ret = " << TopoStatusCode_Name(ret); } response->set_statuscode(ret); return; @@ -869,8 +868,8 @@ TopoStatusCode TopologyManager::CreateEnoughCopyset(int32_t createNum) { return TopoStatusCode::TOPO_OK; } -TopoStatusCode TopologyManager::CreateCopyset( - const CopysetCreateInfo ©set) { +TopoStatusCode +TopologyManager::CreateCopyset(const CopysetCreateInfo ©set) { LOG(INFO) << "Create new copyset: " << copyset.ToString(); // translate metaserver id to metaserver addr std::set metaServerAddrs; @@ -886,9 +885,8 @@ TopoStatusCode TopologyManager::CreateCopyset( } } - if (TopoStatusCode::TOPO_OK != - topology_->AddCopySetCreating( - CopySetKey(copyset.poolId, copyset.copysetId))) { + if (TopoStatusCode::TOPO_OK != topology_->AddCopySetCreating(CopySetKey( + copyset.poolId, copyset.copysetId))) { LOG(WARNING) << "the copyset key = (" << copyset.poolId << ", " << copyset.copysetId << ") is already creating."; } @@ -918,8 +916,8 @@ TopoStatusCode TopologyManager::CreateCopyset( return TopoStatusCode::TOPO_OK; } -TopoStatusCode TopologyManager::CommitTxId( - const std::vector& txIds) { +TopoStatusCode +TopologyManager::CommitTxId(const std::vector &txIds) { if (txIds.size() == 0) { return TopoStatusCode::TOPO_OK; } @@ -1004,7 +1002,7 @@ void TopologyManager::GetLatestPartitionsTxId( for (auto iter = txIds.begin(); iter != txIds.end(); iter++) { Partition out; - topology_ ->GetPartition(iter->partitionid(), &out); + topology_->GetPartition(iter->partitionid(), &out); if (out.GetTxId() != iter->txid()) { PartitionTxId tmp; tmp.set_partitionid(iter->partitionid()); @@ -1015,7 +1013,7 @@ void TopologyManager::GetLatestPartitionsTxId( } void TopologyManager::ListPartitionOfFs(FsIdType fsId, - std::list* list) { + std::list *list) { for (auto &partition : topology_->GetPartitionOfFs(fsId)) { list->emplace_back(partition.ToPartitionInfo()); } @@ -1060,9 +1058,10 @@ void TopologyManager::GetCopysetOfPartition( response->set_statuscode(TopoStatusCode::TOPO_OK); } -TopoStatusCode TopologyManager::GetCopysetMembers( - const PoolIdType poolId, const CopySetIdType copysetId, - std::set *addrs) { +TopoStatusCode +TopologyManager::GetCopysetMembers(const PoolIdType poolId, + const CopySetIdType copysetId, + std::set *addrs) { CopySetKey key(poolId, copysetId); CopySetInfo info; if (topology_->GetCopySet(key, &info)) { @@ -1086,9 +1085,9 @@ TopoStatusCode TopologyManager::GetCopysetMembers( return TopoStatusCode::TOPO_OK; } -void TopologyManager::GetCopysetInfo(const uint32_t& poolId, - const uint32_t& copysetId, - CopysetValue* copysetValue) { +void TopologyManager::GetCopysetInfo(const uint32_t &poolId, + const uint32_t ©setId, + CopysetValue *copysetValue) { // default is ok, when find error set to error code copysetValue->set_statuscode(TopoStatusCode::TOPO_OK); CopySetKey key(poolId, copysetId); @@ -1098,10 +1097,10 @@ void TopologyManager::GetCopysetInfo(const uint32_t& poolId, valueCopysetInfo->set_poolid(info.GetPoolId()); valueCopysetInfo->set_copysetid(info.GetId()); // set peers - for (auto const& msId : info.GetCopySetMembers()) { + for (auto const &msId : info.GetCopySetMembers()) { MetaServer ms; if (topology_->GetMetaServer(msId, &ms)) { - common::Peer* peer = valueCopysetInfo->add_peers(); + common::Peer *peer = valueCopysetInfo->add_peers(); peer->set_id(ms.GetId()); peer->set_address(BuildPeerIdWithIpPort(ms.GetInternalIp(), ms.GetInternalPort())); @@ -1133,12 +1132,12 @@ void TopologyManager::GetCopysetInfo(const uint32_t& poolId, valueCopysetInfo->set_allocated_leaderpeer(peer); // set partitioninfolist - for (auto const& i : info.GetPartitionIds()) { + for (auto const &i : info.GetPartitionIds()) { Partition tmp; if (!topology_->GetPartition(i, &tmp)) { - LOG(WARNING) << "poolId=" << poolId - << " copysetid=" << copysetId - << " has pattition error, partitionId=" << i; + LOG(WARNING) + << "poolId=" << poolId << " copysetid=" << copysetId + << " has pattition error, partitionId=" << i; copysetValue->set_statuscode( TopoStatusCode::TOPO_PARTITION_NOT_FOUND); } else { @@ -1167,9 +1166,9 @@ void TopologyManager::GetCopysetInfo(const uint32_t& poolId, } } -void TopologyManager::GetCopysetsInfo(const GetCopysetsInfoRequest* request, - GetCopysetsInfoResponse* response) { - for (auto const& i : request->copysetkeys()) { +void TopologyManager::GetCopysetsInfo(const GetCopysetsInfoRequest *request, + GetCopysetsInfoResponse *response) { + for (auto const &i : request->copysetkeys()) { GetCopysetInfo(i.poolid(), i.copysetid(), response->add_copysetvalues()); } @@ -1185,10 +1184,10 @@ void TopologyManager::ListCopysetsInfo(ListCopysetInfoResponse *response) { valueCopysetInfo->set_poolid(i.GetPoolId()); valueCopysetInfo->set_copysetid(i.GetId()); // set peers - for (auto const& msId : i.GetCopySetMembers()) { + for (auto const &msId : i.GetCopySetMembers()) { MetaServer ms; if (topology_->GetMetaServer(msId, &ms)) { - common::Peer* peer = valueCopysetInfo->add_peers(); + common::Peer *peer = valueCopysetInfo->add_peers(); peer->set_id(ms.GetId()); peer->set_address(BuildPeerIdWithIpPort(ms.GetInternalIp(), ms.GetInternalPort())); @@ -1220,12 +1219,12 @@ void TopologyManager::ListCopysetsInfo(ListCopysetInfoResponse *response) { valueCopysetInfo->set_allocated_leaderpeer(peer); // set partitioninfolist - for (auto const& j : i.GetPartitionIds()) { + for (auto const &j : i.GetPartitionIds()) { Partition tmp; if (!topology_->GetPartition(j, &tmp)) { - LOG(WARNING) << "poolId=" << i.GetPoolId() - << " copysetid=" << i.GetId() - << " has pattition error, partitionId=" << j; + LOG(WARNING) + << "poolId=" << i.GetPoolId() << " copysetid=" << i.GetId() + << " has pattition error, partitionId=" << j; copysetValue->set_statuscode( TopoStatusCode::TOPO_PARTITION_NOT_FOUND); } else { @@ -1259,10 +1258,10 @@ void TopologyManager::GetTopology(ListTopologyResponse *response) { ListMetaserverOfCluster(response->mutable_metaservers()); } -void TopologyManager::ListZone(ListZoneResponse* response) { +void TopologyManager::ListZone(ListZoneResponse *response) { response->set_statuscode(TopoStatusCode::TOPO_OK); auto zoneIdVec = topology_->GetZoneInCluster(); - for (auto const& zoneId : zoneIdVec) { + for (auto const &zoneId : zoneIdVec) { Zone zone; if (topology_->GetZone(zoneId, &zone)) { auto zoneInfo = response->add_zoneinfos(); @@ -1278,10 +1277,10 @@ void TopologyManager::ListZone(ListZoneResponse* response) { } } -void TopologyManager::ListServer(ListServerResponse* response) { +void TopologyManager::ListServer(ListServerResponse *response) { response->set_statuscode(TopoStatusCode::TOPO_OK); auto serverIdVec = topology_->GetServerInCluster(); - for (auto const& serverId : serverIdVec) { + for (auto const &serverId : serverIdVec) { Server server; if (topology_->GetServer(serverId, &server)) { auto serverInfo = response->add_serverinfos(); @@ -1303,13 +1302,13 @@ void TopologyManager::ListServer(ListServerResponse* response) { } void TopologyManager::ListMetaserverOfCluster( - ListMetaServerResponse* response) { + ListMetaServerResponse *response) { response->set_statuscode(TopoStatusCode::TOPO_OK); auto metaserverIdList = topology_->GetMetaServerInCluster(); - for (auto const& id : metaserverIdList) { + for (auto const &id : metaserverIdList) { MetaServer ms; if (topology_->GetMetaServer(id, &ms)) { - MetaServerInfo* msInfo = response->add_metaserverinfos(); + MetaServerInfo *msInfo = response->add_metaserverinfos(); msInfo->set_metaserverid(ms.GetId()); msInfo->set_hostname(ms.GetHostName()); msInfo->set_internalip(ms.GetInternalIp()); @@ -1328,14 +1327,15 @@ void TopologyManager::ListMetaserverOfCluster( } } -TopoStatusCode TopologyManager::UpdatePartitionStatus( - PartitionIdType partitionId, PartitionStatus status) { +TopoStatusCode +TopologyManager::UpdatePartitionStatus(PartitionIdType partitionId, + PartitionStatus status) { return topology_->UpdatePartitionStatus(partitionId, status); } void TopologyManager::RegistMemcacheCluster( - const RegistMemcacheClusterRequest* request, - RegistMemcacheClusterResponse* response) { + const RegistMemcacheClusterRequest *request, + RegistMemcacheClusterResponse *response) { response->set_statuscode(TopoStatusCode::TOPO_OK); // register memcacheCluster as server WriteLockGuard lock(registMemcacheClusterMutex_); @@ -1345,7 +1345,7 @@ void TopologyManager::RegistMemcacheCluster( MemcacheCluster mCluster( 0, std::list(request->servers().begin(), request->servers().end())); - for (auto const& cluster : clusterList) { + for (auto const &cluster : clusterList) { mCluster.SetId(cluster.GetId()); if (cluster == mCluster) { // has registered memcache cluster @@ -1357,10 +1357,8 @@ void TopologyManager::RegistMemcacheCluster( // Guarantee the uniqueness of memcacheServer std::list serverRegisted = topology_->ListMemcacheServers(); std::list serverList; - for (auto const& server : request->servers()) { - auto cmp = [server](const MemcacheServer& ms) { - return ms == server; - }; + for (auto const &server : request->servers()) { + auto cmp = [server](const MemcacheServer &ms) { return ms == server; }; if (std::find_if(serverRegisted.begin(), serverRegisted.end(), cmp) != serverRegisted.end()) { LOG(ERROR) << "Regist MemcacheCluster failed! Server[" @@ -1388,11 +1386,11 @@ void TopologyManager::RegistMemcacheCluster( } void TopologyManager::ListMemcacheCluster( - ListMemcacheClusterResponse* response) { + ListMemcacheClusterResponse *response) { std::list clusterList = topology_->ListMemcacheClusters(); if (!clusterList.empty()) { response->set_statuscode(TopoStatusCode::TOPO_OK); - for (auto& cluster : clusterList) { + for (auto &cluster : clusterList) { (*response->add_memcacheclusters()) = std::move(cluster); } } else { @@ -1402,8 +1400,8 @@ void TopologyManager::ListMemcacheCluster( } void TopologyManager::AllocOrGetMemcacheCluster( - const AllocOrGetMemcacheClusterRequest* request, - AllocOrGetMemcacheClusterResponse* response) { + const AllocOrGetMemcacheClusterRequest *request, + AllocOrGetMemcacheClusterResponse *response) { auto statusCode = topology_->AllocOrGetMemcacheCluster( request->fsid(), response->mutable_cluster()); response->set_statuscode(statusCode); diff --git a/curvefs/src/mds/topology/topology_metric.cpp b/curvefs/src/mds/topology/topology_metric.cpp index 592d8d51b0..042c2896bd 100644 --- a/curvefs/src/mds/topology/topology_metric.cpp +++ b/curvefs/src/mds/topology/topology_metric.cpp @@ -40,8 +40,11 @@ std::map gFsMetrics; void TopologyMetricService::UpdateTopologyMetrics() { // process metaserver - std::vector metaservers = topo_->GetMetaServerInCluster( - [](const MetaServer &ms) { return true; }); + std::vector metaservers = + topo_->GetMetaServerInCluster([](const MetaServer &ms) { + (void)ms; + return true; + }); for (auto msId : metaservers) { auto it = gMetaServerMetrics.find(msId); @@ -103,10 +106,10 @@ void TopologyMetricService::UpdateTopologyMetrics() { auto fileType2InodeNum = pit->GetFileType2InodeNum(); auto itFsId2FileType2InodeNum = fsId2FileType2InodeNum.find(fsId); if (itFsId2FileType2InodeNum == fsId2FileType2InodeNum.end()) { - fsId2FileType2InodeNum.emplace( - fsId, std::move(fileType2InodeNum)); + fsId2FileType2InodeNum.emplace(fsId, + std::move(fileType2InodeNum)); } else { - for (auto const& fileType2Inode : fileType2InodeNum) { + for (auto const &fileType2Inode : fileType2InodeNum) { auto itFileType2InodeNum = itFsId2FileType2InodeNum->second.find( fileType2Inode.first); @@ -202,7 +205,7 @@ void TopologyMetricService::UpdateTopologyMetrics() { } // set fsId2FileType2InodeNum metric - for (auto const& fsId2FileType2InodeNumPair : fsId2FileType2InodeNum) { + for (auto const &fsId2FileType2InodeNumPair : fsId2FileType2InodeNum) { auto it = gFsMetrics.find(fsId2FileType2InodeNumPair.first); if (it == gFsMetrics.end()) { FsMetricPtr cptr(new FsMetric(fsId2FileType2InodeNumPair.first)); @@ -211,7 +214,7 @@ void TopologyMetricService::UpdateTopologyMetrics() { .first; } // set according to fstype - for (auto const& fileType2InodeNumPair : + for (auto const &fileType2InodeNumPair : fsId2FileType2InodeNumPair.second) { auto it2 = it->second->fileType2InodeNum_.find( fileType2InodeNumPair.first); // find file type diff --git a/curvefs/src/mds/topology/topology_service.cpp b/curvefs/src/mds/topology/topology_service.cpp index 6205b440e4..590253afa7 100644 --- a/curvefs/src/mds/topology/topology_service.cpp +++ b/curvefs/src/mds/topology/topology_service.cpp @@ -755,6 +755,8 @@ void TopologyServiceImpl::StatMetadataUsage( const ::curvefs::mds::topology::StatMetadataUsageRequest* request, ::curvefs::mds::topology::StatMetadataUsageResponse* response, ::google::protobuf::Closure* done) { + (void)controller; + (void)request; brpc::ClosureGuard guard(done); LOG(INFO) << "start to state metadata usage."; topologyManager_->GetMetaServersSpace(response->mutable_metadatausages()); diff --git a/curvefs/src/mds/topology/topology_storge_etcd.cpp b/curvefs/src/mds/topology/topology_storge_etcd.cpp index 2b3708bb36..b59eb23a09 100644 --- a/curvefs/src/mds/topology/topology_storge_etcd.cpp +++ b/curvefs/src/mds/topology/topology_storge_etcd.cpp @@ -490,8 +490,8 @@ bool TopologyStorageEtcd::UpdatePartitions( OpType::OpPut, const_cast(keys[i].data()), const_cast(values[i].data()), - keys[i].size(), - values[i].size() + static_cast(keys[i].size()), + static_cast(values[i].size()) }; ops.emplace_back(op); } diff --git a/curvefs/src/metaserver/copyset/apply_queue.cpp b/curvefs/src/metaserver/copyset/apply_queue.cpp deleted file mode 100644 index 0615d05419..0000000000 --- a/curvefs/src/metaserver/copyset/apply_queue.cpp +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright (c) 2021 NetEase Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Project: curve - * Date: Thu Sep 2 14:49:04 CST 2021 - * Author: wuhanqing - */ - -#include "curvefs/src/metaserver/copyset/apply_queue.h" - -#include - -#include "absl/memory/memory.h" -#include "absl/strings/str_cat.h" -#include "curvefs/src/common/threading.h" -#include "curvefs/src/metaserver/copyset/copyset_node.h" - -namespace curvefs { -namespace metaserver { -namespace copyset { - -using ::curvefs::common::SetThreadName; - -void ApplyQueue::StartWorkers() { - for (uint32_t i = 0; i < option_.workerCount; ++i) { - std::string name = [this, i]() -> std::string { - if (option_.copysetNode == nullptr) { - return "apply"; - } - - return absl::StrCat("apply", ":", option_.copysetNode->GetPoolId(), - "_", option_.copysetNode->GetCopysetId(), ":", - i); - }(); - auto taskThread = - absl::make_unique(option_.queueDepth, std::move(name)); - taskThread->Start(); - workers_.emplace_back(std::move(taskThread)); - } -} - -bool ApplyQueue::Start(const ApplyQueueOption& option) { - if (running_) { - return true; - } - - if (option.queueDepth < 1 || option.workerCount < 1) { - LOG(ERROR) << "ApplyQueue start failed, invalid argument, queue depth: " - << option.queueDepth - << ", worker count: " << option.workerCount; - return false; - } - - option_ = option; - - StartWorkers(); - running_.store(true); - return true; -} - -void ApplyQueue::Flush() { - if (!running_.load(std::memory_order_relaxed)) { - return; - } - - curve::common::CountDownEvent event(option_.workerCount); - - auto flush = [&event]() { event.Signal(); }; - - for (auto& worker : workers_) { - worker->tasks.Push(flush); - } - - event.Wait(); -} - -void ApplyQueue::Stop() { - if (!running_.exchange(false)) { - return; - } - - LOG(INFO) << "Going to stop apply queue"; - - for (auto& worker : workers_) { - worker->Stop(); - } - - workers_.clear(); - LOG(INFO) << "Apply queue stopped"; -} - -void ApplyQueue::TaskWorker::Start() { - if (running.exchange(true)) { - return; - } - - worker = std::thread(&TaskWorker::Work, this); -} - -void ApplyQueue::TaskWorker::Stop() { - if (!running.exchange(false)) { - return; - } - - auto wakeup = []() {}; - tasks.Push(wakeup); - - worker.join(); -} - -void ApplyQueue::TaskWorker::Work() { - SetThreadName(workerName_.c_str()); - - while (running.load(std::memory_order_relaxed)) { - tasks.Pop()(); - } -} - -} // namespace copyset -} // namespace metaserver -} // namespace curvefs diff --git a/curvefs/src/metaserver/copyset/apply_queue.h b/curvefs/src/metaserver/copyset/apply_queue.h deleted file mode 100644 index a3dafd9526..0000000000 --- a/curvefs/src/metaserver/copyset/apply_queue.h +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright (c) 2021 NetEase Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Project: curve - * Date: Thu Sep 2 14:49:04 CST 2021 - * Author: wuhanqing - */ - -#ifndef CURVEFS_SRC_METASERVER_COPYSET_APPLY_QUEUE_H_ -#define CURVEFS_SRC_METASERVER_COPYSET_APPLY_QUEUE_H_ - -#include -#include -#include -#include -#include -#include -#include - -#include "include/curve_compiler_specific.h" -#include "src/common/concurrent/count_down_event.h" -#include "src/common/concurrent/task_queue.h" - -namespace curvefs { -namespace metaserver { -namespace copyset { - -class CopysetNode; - -struct ApplyQueueOption { - uint32_t workerCount = 1; - uint32_t queueDepth = 1; - CopysetNode* copysetNode = nullptr; -}; - -class CURVE_CACHELINE_ALIGNMENT ApplyQueue { - public: - ApplyQueue() : option_(), running_(false), workers_() {} - - bool Start(const ApplyQueueOption& option); - - template - void Push(uint64_t hash, Func&& f, Args&&... args) { - workers_[hash % option_.workerCount]->tasks.Push( - std::forward(f), std::forward(args)...); - } - - void Flush(); - - void Stop(); - - private: - void StartWorkers(); - - struct TaskWorker { - TaskWorker(size_t cap, std::string workerName) - : running(false), - worker(), - tasks(cap), - workerName_(std::move(workerName)) {} - - void Start(); - - void Stop(); - - void Work(); - - std::atomic running; - std::thread worker; - curve::common::TaskQueue tasks; - std::string workerName_; - }; - - private: - ApplyQueueOption option_; - std::atomic running_; - std::vector> workers_; -}; - -} // namespace copyset -} // namespace metaserver -} // namespace curvefs - -#endif // CURVEFS_SRC_METASERVER_COPYSET_APPLY_QUEUE_H_ diff --git a/curvefs/src/metaserver/copyset/concurrent_apply_queue.cpp b/curvefs/src/metaserver/copyset/concurrent_apply_queue.cpp new file mode 100644 index 0000000000..a058163439 --- /dev/null +++ b/curvefs/src/metaserver/copyset/concurrent_apply_queue.cpp @@ -0,0 +1,198 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: curve + * File Created: 20230521 + * Author: Xinlong-Chen + */ + + +#include "curvefs/src/metaserver/copyset/concurrent_apply_queue.h" + +#include + +namespace curvefs { +namespace metaserver { +namespace copyset { +bool ApplyQueue::Init(const ApplyOption &opt) { + if (start_) { + LOG(WARNING) << "concurrent module already start!"; + return true; + } + + if (false == CheckOptAndInit(opt)) { + return false; + } + + start_ = true; + cond_.Reset(opt.rconcurrentsize + opt.wconcurrentsize); + InitThreadPool(ThreadPoolType::READ, rconcurrentsize_, rqueuedepth_); + InitThreadPool(ThreadPoolType::WRITE, wconcurrentsize_, wqueuedepth_); + + if (!cond_.WaitFor(5000)) { + LOG(ERROR) << "init concurrent module's threads fail"; + start_ = false; + } + + LOG(INFO) << "Init concurrent module's threads success"; + return start_; +} + +bool ApplyQueue::CheckOptAndInit(const ApplyOption &opt) { + if (opt.rconcurrentsize <= 0 || opt.wconcurrentsize <= 0 || + opt.rqueuedepth <= 0 || opt.wqueuedepth <= 0) { + LOG(INFO) << "init concurrent module fail, params must >=0" + << ", rconcurrentsize=" << opt.rconcurrentsize + << ", wconcurrentsize=" << opt.wconcurrentsize + << ", rqueuedepth=" << opt.rqueuedepth + << ", wconcurrentsize=" << opt.wqueuedepth; + return false; + } + + wconcurrentsize_ = opt.wconcurrentsize; + wqueuedepth_ = opt.wqueuedepth; + rconcurrentsize_ = opt.rconcurrentsize; + rqueuedepth_ = opt.rqueuedepth; + + return true; +} + +void ApplyQueue::InitThreadPool( + ThreadPoolType type, int concurrent, int depth) { + for (int i = 0; i < concurrent; i++) { + auto asyncth = new (std::nothrow) TaskThread(depth); + CHECK(asyncth != nullptr) << "allocate failed!"; + + switch (type) { + case ThreadPoolType::READ: + rapplyMap_.insert(std::make_pair(i, asyncth)); + break; + + case ThreadPoolType::WRITE: + wapplyMap_.insert(std::make_pair(i, asyncth)); + break; + } + } + + for (int i = 0; i < concurrent; i++) { + switch (type) { + case ThreadPoolType::READ: + rapplyMap_[i]->th = + std::thread(&ApplyQueue::Run, this, type, i); + break; + + case ThreadPoolType::WRITE: + wapplyMap_[i]->th = + std::thread(&ApplyQueue::Run, this, type, i); + break; + } + } +} + +void ApplyQueue::Run(ThreadPoolType type, int index) { + cond_.Signal(); + while (start_) { + switch (type) { + case ThreadPoolType::READ: + rapplyMap_[index]->tq.Pop()(); + break; + + case ThreadPoolType::WRITE: + wapplyMap_[index]->tq.Pop()(); + break; + } + } +} + +void ApplyQueue::Stop() { + if (!start_.exchange(false)) { + return; + } + + LOG(INFO) << "stop ApplyQueue..."; + auto wakeup = []() {}; + for (auto iter : rapplyMap_) { + iter.second->tq.Push(wakeup); + iter.second->th.join(); + delete iter.second; + } + rapplyMap_.clear(); + + for (auto iter : wapplyMap_) { + iter.second->tq.Push(wakeup); + iter.second->th.join(); + delete iter.second; + } + wapplyMap_.clear(); + + LOG(INFO) << "stop ApplyQueue ok."; +} + +void ApplyQueue::Flush() { + if (!start_.load(std::memory_order_relaxed)) { + return; + } + + CountDownEvent event(wconcurrentsize_); + auto flushtask = [&event]() { + event.Signal(); + }; + + for (int i = 0; i < wconcurrentsize_; i++) { + wapplyMap_[i]->tq.Push(flushtask); + } + + event.Wait(); +} + +void ApplyQueue::FlushAll() { + if (!start_.load(std::memory_order_relaxed)) { + return; + } + + CountDownEvent event(wconcurrentsize_ + rconcurrentsize_); + auto flushtask = [&event]() { + event.Signal(); + }; + + for (int i = 0; i < wconcurrentsize_; i++) { + wapplyMap_[i]->tq.Push(flushtask); + } + + for (int i = 0; i < rconcurrentsize_; i++) { + rapplyMap_[i]->tq.Push(flushtask); + } + + event.Wait(); +} + +ThreadPoolType ApplyQueue::Schedule(OperatorType optype) { + switch (optype) { + case OperatorType::GetDentry: + case OperatorType::ListDentry: + case OperatorType::GetInode: + case OperatorType::BatchGetInodeAttr: + case OperatorType::BatchGetXAttr: + case OperatorType::GetVolumeExtent: + return ThreadPoolType::READ; + default: + return ThreadPoolType::WRITE; + } +} +} // namespace copyset +} // namespace metaserver +} // namespace curvefs diff --git a/curvefs/src/metaserver/copyset/concurrent_apply_queue.h b/curvefs/src/metaserver/copyset/concurrent_apply_queue.h new file mode 100644 index 0000000000..785d893406 --- /dev/null +++ b/curvefs/src/metaserver/copyset/concurrent_apply_queue.h @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: curve + * File Created: 20230521 + * Author: Xinlong-Chen + */ + +#ifndef CURVEFS_SRC_METASERVER_COPYSET_CONCURRENT_APPLY_QUEUE_H_ +#define CURVEFS_SRC_METASERVER_COPYSET_CONCURRENT_APPLY_QUEUE_H_ + +#include +#include +#include + +#include +#include +#include +#include + +#include "include/curve_compiler_specific.h" +#include "src/common/concurrent/count_down_event.h" +#include "src/common/concurrent/task_queue.h" +#include "curvefs/src/metaserver/copyset/operator_type.h" + +namespace curvefs { +namespace metaserver { +namespace copyset { + +using curve::common::CountDownEvent; +using curve::common::GenericTaskQueue; + +struct ApplyOption { + int wconcurrentsize = 3; + int wqueuedepth = 1; + int rconcurrentsize = 1; + int rqueuedepth = 1; + ApplyOption(int wsize, int wdepth, int rsize, int rdepth) : + wconcurrentsize(wsize), + wqueuedepth(wdepth), + rconcurrentsize(rsize), + rqueuedepth(rdepth) {} + ApplyOption() {} +}; + +enum class ThreadPoolType {READ, WRITE}; + +/* +TODO: this moudle is same as curvebs's ConcurrentApplyModule, + only Schedule function is different. + we can make ApplyQueue to a base class, + and define Schedule with virtual function, + derive class override Schedule. +*/ + +class CURVE_CACHELINE_ALIGNMENT ApplyQueue { + public: + ApplyQueue(): start_(false), + rconcurrentsize_(0), + rqueuedepth_(0), + wconcurrentsize_(0), + wqueuedepth_(0), + cond_(0) {} + + /** + * Init: initialize ApplyQueue + * @param[in] wconcurrentsize: num of write threads + * @param[in] wqueuedepth: depth of write queue in ervery thread + * @param[in] rconcurrentsizee: num of read threads + * @param[in] wqueuedephth: depth of read queue in every thread + */ + bool Init(const ApplyOption &opt); + + /** + * Push: apply task will be push to ApplyQueue + * @param[in] key: used to hash task to specified queue + * @param[in] optype: operation type defined in proto + * @param[in] f: task + * @param[in] args: param to excute task + */ + template + bool Push(uint64_t key, OperatorType optype, F&& f, Args&&... args) { + switch (Schedule(optype)) { + case ThreadPoolType::READ: + rapplyMap_[Hash(key, rconcurrentsize_)]->tq.Push( + std::forward(f), std::forward(args)...); + break; + case ThreadPoolType::WRITE: + wapplyMap_[Hash(key, wconcurrentsize_)]->tq.Push( + std::forward(f), std::forward(args)...); + break; + } + + return true; + } + + /** + * Flush: finish all task in write threads + */ + void Flush(); + void FlushAll(); + + void Stop(); + + private: + bool CheckOptAndInit(const ApplyOption &option); + + void Run(ThreadPoolType type, int index); + + static ThreadPoolType Schedule(OperatorType optype); + + void InitThreadPool(ThreadPoolType type, int concorrent, int depth); + + static int Hash(uint64_t key, int concurrent) { + return key % concurrent; + } + + private: + struct TaskThread { + std::thread th; + GenericTaskQueue tq; + explicit TaskThread(size_t capacity) : tq(capacity) {} + }; + + std::atomic start_; + int rconcurrentsize_; + int rqueuedepth_; + int wconcurrentsize_; + int wqueuedepth_; + CountDownEvent cond_; + CURVE_CACHELINE_ALIGNMENT std::unordered_map wapplyMap_; + CURVE_CACHELINE_ALIGNMENT std::unordered_map rapplyMap_; +}; +} // namespace copyset +} // namespace metaserver +} // namespace curvefs + +#endif // CURVEFS_SRC_METASERVER_COPYSET_CONCURRENT_APPLY_QUEUE_H_ diff --git a/curvefs/src/metaserver/copyset/config.h b/curvefs/src/metaserver/copyset/config.h index ec60615462..b1fef4077b 100644 --- a/curvefs/src/metaserver/copyset/config.h +++ b/curvefs/src/metaserver/copyset/config.h @@ -31,7 +31,7 @@ #include #include -#include "curvefs/src/metaserver/copyset/apply_queue.h" +#include "curvefs/src/metaserver/copyset/concurrent_apply_queue.h" #include "curvefs/src/metaserver/copyset/trash.h" #include "curvefs/src/metaserver/storage/config.h" #include "src/fs/local_filesystem.h" @@ -74,7 +74,7 @@ struct CopysetNodeOptions { uint32_t checkLoadMarginIntervalMs; // apply queue options - ApplyQueueOption applyQueueOption; + ApplyOption applyQueueOption; // filesystem adaptor curve::fs::LocalFileSystem* localFileSystem; diff --git a/curvefs/src/metaserver/copyset/copyset_node.cpp b/curvefs/src/metaserver/copyset/copyset_node.cpp index 48848577de..71f081c450 100644 --- a/curvefs/src/metaserver/copyset/copyset_node.cpp +++ b/curvefs/src/metaserver/copyset/copyset_node.cpp @@ -120,9 +120,8 @@ bool CopysetNode::Init(const CopysetNodeOptions& options) { // init apply queue applyQueue_ = absl::make_unique(); - options_.applyQueueOption.copysetNode = this; - if (!applyQueue_->Start(options_.applyQueueOption)) { - LOG(ERROR) << "Start apply queue failed"; + if (!applyQueue_->Init(options_.applyQueueOption)) { + LOG(ERROR) << "init concurrent apply queue failed"; return false; } @@ -258,6 +257,7 @@ void CopysetNode::on_apply(braft::Iterator& iter) { iter.index(), doneGuard.release(), TimeUtility::GetTimeofDayUs()); applyQueue_->Push(metaClosure->GetOperator()->HashCode(), + metaClosure->GetOperator()->GetOperatorType(), std::move(task)); timer.stop(); g_concurrent_apply_wait_latency << timer.u_elapsed(); @@ -268,10 +268,11 @@ void CopysetNode::on_apply(braft::Iterator& iter) { butil::Timer timer; timer.start(); auto hashcode = metaOperator->HashCode(); + auto type = metaOperator->GetOperatorType(); auto task = std::bind(&MetaOperator::OnApplyFromLog, metaOperator.release(), TimeUtility::GetTimeofDayUs()); - applyQueue_->Push(hashcode, std::move(task)); + applyQueue_->Push(hashcode, type, std::move(task)); timer.stop(); g_concurrent_apply_from_log_wait_latency << timer.u_elapsed(); } @@ -426,6 +427,9 @@ int CopysetNode::on_snapshot_load(braft::SnapshotReader* reader) { } void CopysetNode::on_leader_start(int64_t term) { + LOG(INFO) << "Copyset: " << name_ << ", peer id: " << peerId_.to_string() + << " going to flush apply queue, term is " << term; + applyQueue_->Flush(); leaderTerm_.store(term, std::memory_order_release); LOG(INFO) << "Copyset: " << name_ << ", peer id: " << peerId_.to_string() diff --git a/curvefs/src/metaserver/copyset/copyset_node.h b/curvefs/src/metaserver/copyset/copyset_node.h index 9cf8b9f75a..7ab1f46dd6 100644 --- a/curvefs/src/metaserver/copyset/copyset_node.h +++ b/curvefs/src/metaserver/copyset/copyset_node.h @@ -31,7 +31,7 @@ #include #include "curvefs/src/metaserver/common/types.h" -#include "curvefs/src/metaserver/copyset/apply_queue.h" +#include "curvefs/src/metaserver/copyset/concurrent_apply_queue.h" #include "curvefs/src/metaserver/copyset/conf_epoch_file.h" #include "curvefs/src/metaserver/copyset/config.h" #include "curvefs/src/metaserver/copyset/copyset_conf_change.h" @@ -123,7 +123,7 @@ class CopysetNode : public braft::StateMachine { #ifdef UNIT_TEST void SetMetaStore(MetaStore* metastore) { metaStore_.reset(metastore); } - void FlushApplyQueue() { applyQueue_->Flush(); } + void FlushApplyQueue() { applyQueue_->FlushAll(); } void SetRaftNode(RaftNode* raftNode) { raftNode_.reset(raftNode); } #endif // UNIT_TEST diff --git a/curvefs/src/metaserver/copyset/meta_operator.cpp b/curvefs/src/metaserver/copyset/meta_operator.cpp index ef88f4a601..06671f7ca5 100644 --- a/curvefs/src/metaserver/copyset/meta_operator.cpp +++ b/curvefs/src/metaserver/copyset/meta_operator.cpp @@ -37,8 +37,8 @@ #include "curvefs/src/metaserver/streaming_utils.h" #include "src/common/timeutility.h" -static bvar::LatencyRecorder g_concurrent_fast_apply_wait_latency( - "concurrent_fast_apply_wait"); +static bvar::LatencyRecorder + g_concurrent_fast_apply_wait_latency("concurrent_fast_apply_wait"); namespace curvefs { @@ -77,9 +77,7 @@ void MetaOperator::Propose() { } } -void MetaOperator::RedirectRequest() { - Redirect(); -} +void MetaOperator::RedirectRequest() { Redirect(); } bool MetaOperator::ProposeTask() { timerPropose.start(); @@ -109,73 +107,73 @@ void MetaOperator::FastApplyTask() { auto task = std::bind(&MetaOperator::OnApply, this, node_->GetAppliedIndex(), new MetaOperatorClosure(this), TimeUtility::GetTimeofDayUs()); - node_->GetApplyQueue()->Push(HashCode(), std::move(task)); + node_->GetApplyQueue()->Push(HashCode(), + GetOperatorType(), std::move(task)); timer.stop(); g_concurrent_fast_apply_wait_latency << timer.u_elapsed(); } bool GetInodeOperator::CanBypassPropose() const { - auto* req = static_cast(request_); + auto *req = static_cast(request_); return req->has_appliedindex() && node_->GetAppliedIndex() >= req->appliedindex(); } bool ListDentryOperator::CanBypassPropose() const { - auto* req = static_cast(request_); + auto *req = static_cast(request_); return req->has_appliedindex() && node_->GetAppliedIndex() >= req->appliedindex(); } bool BatchGetInodeAttrOperator::CanBypassPropose() const { - auto* req = static_cast(request_); + auto *req = static_cast(request_); return req->has_appliedindex() && node_->GetAppliedIndex() >= req->appliedindex(); } bool BatchGetXAttrOperator::CanBypassPropose() const { - auto* req = static_cast(request_); + auto *req = static_cast(request_); return req->has_appliedindex() && node_->GetAppliedIndex() >= req->appliedindex(); } bool GetDentryOperator::CanBypassPropose() const { - auto* req = static_cast(request_); + auto *req = static_cast(request_); return req->has_appliedindex() && node_->GetAppliedIndex() >= req->appliedindex(); } bool GetVolumeExtentOperator::CanBypassPropose() const { - const auto* req = static_cast(request_); + const auto *req = static_cast(request_); return req->has_appliedindex() && node_->GetAppliedIndex() >= req->appliedindex(); } -#define OPERATOR_ON_APPLY(TYPE) \ - void TYPE##Operator::OnApply(int64_t index, \ - google::protobuf::Closure* done, \ - uint64_t startTimeUs) { \ - brpc::ClosureGuard doneGuard(done); \ - uint64_t timeUs = TimeUtility::GetTimeofDayUs(); \ - node_->GetMetric()->WaitInQueueLatency( \ - OperatorType::TYPE, timeUs - startTimeUs); \ - auto status = node_->GetMetaStore()->TYPE( \ - static_cast(request_), \ - static_cast(response_)); \ - uint64_t executeTime = TimeUtility::GetTimeofDayUs() - timeUs; \ - node_->GetMetric()->ExecuteLatency( \ - OperatorType::TYPE, executeTime); \ - if (status == MetaStatusCode::OK) { \ - node_->UpdateAppliedIndex(index); \ - static_cast(response_)->set_appliedindex( \ - std::max(index, node_->GetAppliedIndex())); \ - node_->GetMetric()->OnOperatorComplete( \ - OperatorType::TYPE, \ - TimeUtility::GetTimeofDayUs() - startTimeUs, true); \ - } else { \ - node_->GetMetric()->OnOperatorComplete( \ - OperatorType::TYPE, \ - TimeUtility::GetTimeofDayUs() - startTimeUs, false); \ - } \ +#define OPERATOR_ON_APPLY(TYPE) \ + void TYPE##Operator::OnApply(int64_t index, \ + google::protobuf::Closure *done, \ + uint64_t startTimeUs) { \ + brpc::ClosureGuard doneGuard(done); \ + uint64_t timeUs = TimeUtility::GetTimeofDayUs(); \ + node_->GetMetric()->WaitInQueueLatency(OperatorType::TYPE, \ + timeUs - startTimeUs); \ + auto status = node_->GetMetaStore()->TYPE( \ + static_cast(request_), \ + static_cast(response_)); \ + uint64_t executeTime = TimeUtility::GetTimeofDayUs() - timeUs; \ + node_->GetMetric()->ExecuteLatency(OperatorType::TYPE, executeTime); \ + if (status == MetaStatusCode::OK) { \ + node_->UpdateAppliedIndex(index); \ + static_cast(response_)->set_appliedindex( \ + std::max(index, node_->GetAppliedIndex())); \ + node_->GetMetric()->OnOperatorComplete( \ + OperatorType::TYPE, \ + TimeUtility::GetTimeofDayUs() - startTimeUs, true); \ + } else { \ + node_->GetMetric()->OnOperatorComplete( \ + OperatorType::TYPE, \ + TimeUtility::GetTimeofDayUs() - startTimeUs, false); \ + } \ } OPERATOR_ON_APPLY(GetDentry); @@ -193,7 +191,7 @@ OPERATOR_ON_APPLY(CreateManageInode); OPERATOR_ON_APPLY(CreatePartition); OPERATOR_ON_APPLY(DeletePartition); OPERATOR_ON_APPLY(PrepareRenameTx); -OPERATOR_ON_APPLY(UpdateVolumeExtent);; +OPERATOR_ON_APPLY(UpdateVolumeExtent); #undef OPERATOR_ON_APPLY @@ -201,11 +199,11 @@ OPERATOR_ON_APPLY(UpdateVolumeExtent);; // so we redefine OnApply() and OnApplyFromLog() instead of using macro. // It may not be an elegant implementation, can you provide a better idea? void GetOrModifyS3ChunkInfoOperator::OnApply(int64_t index, - google::protobuf::Closure* done, + google::protobuf::Closure *done, uint64_t startTimeUs) { MetaStatusCode rc; - auto request = static_cast(request_); - auto response = static_cast(response_); + auto request = static_cast(request_); + auto response = static_cast(response_); auto metastore = node_->GetMetaStore(); std::shared_ptr connection; std::shared_ptr iterator; @@ -228,9 +226,8 @@ void GetOrModifyS3ChunkInfoOperator::OnApply(int64_t index, TimeUtility::GetTimeofDayUs() - startTimeUs, false); } - brpc::Controller* cntl = static_cast(cntl_); - if (rc != MetaStatusCode::OK || - !request->returns3chunkinfomap() || + brpc::Controller *cntl = static_cast(cntl_); + if (rc != MetaStatusCode::OK || !request->returns3chunkinfomap() || !request->supportstreaming()) { return; } @@ -251,12 +248,12 @@ void GetOrModifyS3ChunkInfoOperator::OnApply(int64_t index, } void GetVolumeExtentOperator::OnApply(int64_t index, - google::protobuf::Closure* done, + google::protobuf::Closure *done, uint64_t startTimeUs) { brpc::ClosureGuard doneGuard(done); - const auto* request = static_cast(request_); - auto* response = static_cast(response_); - auto* metaStore = node_->GetMetaStore(); + const auto *request = static_cast(request_); + auto *response = static_cast(response_); + auto *metaStore = node_->GetMetaStore(); auto st = metaStore->GetVolumeExtent(request, response); node_->GetMetric()->OnOperatorComplete( @@ -278,7 +275,7 @@ void GetVolumeExtentOperator::OnApply(int64_t index, response->clear_slices(); // accept client's streaming request - auto* cntl = static_cast(cntl_); + auto *cntl = static_cast(cntl_); auto streamingServer = metaStore->GetStreamServer(); auto connection = streamingServer->Accept(cntl); if (connection == nullptr) { @@ -298,15 +295,15 @@ void GetVolumeExtentOperator::OnApply(int64_t index, } } -#define OPERATOR_ON_APPLY_FROM_LOG(TYPE) \ - void TYPE##Operator::OnApplyFromLog(uint64_t startTimeUs) { \ - std::unique_ptr selfGuard(this); \ - TYPE##Response response; \ - auto status = node_->GetMetaStore()->TYPE( \ - static_cast(request_), &response); \ - node_->GetMetric()->OnOperatorCompleteFromLog( \ - OperatorType::TYPE, TimeUtility::GetTimeofDayUs() - startTimeUs, \ - status == MetaStatusCode::OK); \ +#define OPERATOR_ON_APPLY_FROM_LOG(TYPE) \ + void TYPE##Operator::OnApplyFromLog(uint64_t startTimeUs) { \ + std::unique_ptr selfGuard(this); \ + TYPE##Response response; \ + auto status = node_->GetMetaStore()->TYPE( \ + static_cast(request_), &response); \ + node_->GetMetric()->OnOperatorCompleteFromLog( \ + OperatorType::TYPE, TimeUtility::GetTimeofDayUs() - startTimeUs, \ + status == MetaStatusCode::OK); \ } OPERATOR_ON_APPLY_FROM_LOG(CreateDentry); @@ -328,7 +325,7 @@ void GetOrModifyS3ChunkInfoOperator::OnApplyFromLog(uint64_t startTimeUs) { GetOrModifyS3ChunkInfoRequest request; GetOrModifyS3ChunkInfoResponse response; std::shared_ptr iterator; - request = *static_cast(request_); + request = *static_cast(request_); request.set_returns3chunkinfomap(false); auto status = node_->GetMetaStore()->GetOrModifyS3ChunkInfo( &request, &response, &iterator); @@ -338,9 +335,10 @@ void GetOrModifyS3ChunkInfoOperator::OnApplyFromLog(uint64_t startTimeUs) { status == MetaStatusCode::OK); } -#define READONLY_OPERATOR_ON_APPLY_FROM_LOG(TYPE) \ - void TYPE##Operator::OnApplyFromLog(uint64_t startTimeUs) { \ - std::unique_ptr selfGuard(this); \ +#define READONLY_OPERATOR_ON_APPLY_FROM_LOG(TYPE) \ + void TYPE##Operator::OnApplyFromLog(uint64_t startTimeUs) { \ + (void)startTimeUs; \ + std::unique_ptr selfGuard(this); \ } // below operator are readonly, so on apply from log do nothing @@ -353,10 +351,10 @@ READONLY_OPERATOR_ON_APPLY_FROM_LOG(GetVolumeExtent); #undef READONLY_OPERATOR_ON_APPLY_FROM_LOG -#define OPERATOR_REDIRECT(TYPE) \ - void TYPE##Operator::Redirect() { \ - static_cast(response_)->set_statuscode( \ - MetaStatusCode::REDIRECTED); \ +#define OPERATOR_REDIRECT(TYPE) \ + void TYPE##Operator::Redirect() { \ + static_cast(response_)->set_statuscode( \ + MetaStatusCode::REDIRECTED); \ } OPERATOR_REDIRECT(GetDentry); @@ -380,9 +378,9 @@ OPERATOR_REDIRECT(UpdateVolumeExtent); #undef OPERATOR_REDIRECT -#define OPERATOR_ON_FAILED(TYPE) \ - void TYPE##Operator::OnFailed(MetaStatusCode code) { \ - static_cast(response_)->set_statuscode(code); \ +#define OPERATOR_ON_FAILED(TYPE) \ + void TYPE##Operator::OnFailed(MetaStatusCode code) { \ + static_cast(response_)->set_statuscode(code); \ } OPERATOR_ON_FAILED(GetDentry); @@ -406,9 +404,9 @@ OPERATOR_ON_FAILED(UpdateVolumeExtent); #undef OPERATOR_ON_FAILED -#define OPERATOR_HASH_CODE(TYPE) \ - uint64_t TYPE##Operator::HashCode() const { \ - return static_cast(request_)->partitionid(); \ +#define OPERATOR_HASH_CODE(TYPE) \ + uint64_t TYPE##Operator::HashCode() const { \ + return static_cast(request_)->partitionid(); \ } OPERATOR_HASH_CODE(GetDentry); @@ -431,20 +429,20 @@ OPERATOR_HASH_CODE(UpdateVolumeExtent); #undef OPERATOR_HASH_CODE -#define PARTITION_OPERATOR_HASH_CODE(TYPE) \ - uint64_t TYPE##Operator::HashCode() const { \ - return static_cast(request_) \ - ->partition() \ - .partitionid(); \ +#define PARTITION_OPERATOR_HASH_CODE(TYPE) \ + uint64_t TYPE##Operator::HashCode() const { \ + return static_cast(request_) \ + ->partition() \ + .partitionid(); \ } PARTITION_OPERATOR_HASH_CODE(CreatePartition); #undef PARTITION_OPERATOR_HASH_CODE -#define OPERATOR_TYPE(TYPE) \ - OperatorType TYPE##Operator::GetOperatorType() const { \ - return OperatorType::TYPE; \ +#define OPERATOR_TYPE(TYPE) \ + OperatorType TYPE##Operator::GetOperatorType() const { \ + return OperatorType::TYPE; \ } OPERATOR_TYPE(GetDentry); diff --git a/curvefs/src/metaserver/dentry_storage.cpp b/curvefs/src/metaserver/dentry_storage.cpp index dae9d7fd7d..5f4cfb2255 100644 --- a/curvefs/src/metaserver/dentry_storage.cpp +++ b/curvefs/src/metaserver/dentry_storage.cpp @@ -77,7 +77,7 @@ void DentryVector::Insert(const Dentry& dentry) { } void DentryVector::Delete(const Dentry& dentry) { - for (size_t i = 0; i < vec_->dentrys_size(); i++) { + for (int i = 0; i < vec_->dentrys_size(); i++) { if (vec_->dentrys(i) == dentry) { vec_->mutable_dentrys()->DeleteSubrange(i, 1); nPendingDel_ += 1; diff --git a/curvefs/src/metaserver/metaserver.cpp b/curvefs/src/metaserver/metaserver.cpp index 08fc1b717b..b129eba9c9 100644 --- a/curvefs/src/metaserver/metaserver.cpp +++ b/curvefs/src/metaserver/metaserver.cpp @@ -68,7 +68,6 @@ using ::curve::fs::FileSystemType; using ::curve::fs::LocalFsFactory; using ::curve::fs::LocalFileSystemOption; -using ::curvefs::metaserver::copyset::ApplyQueueOption; using ::curvefs::client::rpcclient::MetaServerClientImpl; using ::curvefs::client::rpcclient::ChannelManager; using ::curvefs::client::rpcclient::MetaCache; @@ -653,13 +652,18 @@ void Metaserver::InitCopysetNodeOptions() { "copyset.check_loadmargin_interval_ms", ©setNodeOptions_.checkLoadMarginIntervalMs)); - LOG_IF(FATAL, !conf_->GetUInt32Value( - "applyqueue.worker_count", - ©setNodeOptions_.applyQueueOption.workerCount)); - LOG_IF(FATAL, !conf_->GetUInt32Value( - "applyqueue.queue_depth", - ©setNodeOptions_.applyQueueOption.queueDepth)); - + LOG_IF(FATAL, !conf_->GetIntValue( + "applyqueue.write_worker_count", + ©setNodeOptions_.applyQueueOption.wconcurrentsize)); + LOG_IF(FATAL, !conf_->GetIntValue( + "applyqueue.write_queue_depth", + ©setNodeOptions_.applyQueueOption.wqueuedepth)); + LOG_IF(FATAL, !conf_->GetIntValue( + "applyqueue.read_worker_count", + ©setNodeOptions_.applyQueueOption.rconcurrentsize)); + LOG_IF(FATAL, !conf_->GetIntValue( + "applyqueue.read_queue_depth", + ©setNodeOptions_.applyQueueOption.rqueuedepth)); LOG_IF(FATAL, !conf_->GetStringValue("copyset.trash.uri", ©setNodeOptions_.trashOptions.trashUri)); diff --git a/curvefs/src/metaserver/metaserver.h b/curvefs/src/metaserver/metaserver.h index 70fd9c83ff..1c448f1e81 100644 --- a/curvefs/src/metaserver/metaserver.h +++ b/curvefs/src/metaserver/metaserver.h @@ -28,7 +28,6 @@ #include #include -#include "curvefs/src/metaserver/copyset/apply_queue.h" #include "curvefs/src/metaserver/copyset/config.h" #include "curvefs/src/metaserver/copyset/copyset_node_manager.h" #include "curvefs/src/metaserver/copyset/raft_cli_service2.h" @@ -52,7 +51,6 @@ namespace metaserver { using ::curve::common::Configuration; using ::curvefs::metaserver::storage::StorageOptions; -using ::curvefs::metaserver::copyset::ApplyQueue; using ::curvefs::metaserver::copyset::CopysetNodeManager; using ::curvefs::metaserver::copyset::CopysetNodeOptions; using ::curvefs::metaserver::copyset::CopysetServiceImpl; diff --git a/curvefs/src/metaserver/metastore.cpp b/curvefs/src/metaserver/metastore.cpp index fdb8ff9131..381afd96eb 100644 --- a/curvefs/src/metaserver/metastore.cpp +++ b/curvefs/src/metaserver/metastore.cpp @@ -23,6 +23,7 @@ #include #include +#include #include #include // NOLINT @@ -63,12 +64,13 @@ using ::curvefs::metaserver::storage::RocksDBStorage; using ::curvefs::metaserver::storage::StorageOptions; namespace { -const char* const kMetaDataFilename = "metadata"; +const char *const kMetaDataFilename = "metadata"; bvar::LatencyRecorder g_storage_checkpoint_latency("storage_checkpoint"); } // namespace -std::unique_ptr MetaStoreImpl::Create( - copyset::CopysetNode* node, const StorageOptions& storageOptions) { +std::unique_ptr +MetaStoreImpl::Create(copyset::CopysetNode *node, + const StorageOptions &storageOptions) { auto store = absl::WrapUnique(new MetaStoreImpl(node, storageOptions)); auto succ = store->InitStorage(); if (succ) { @@ -79,13 +81,12 @@ std::unique_ptr MetaStoreImpl::Create( return nullptr; } -MetaStoreImpl::MetaStoreImpl(copyset::CopysetNode* node, - const StorageOptions& storageOptions) - : copysetNode_(node), - streamServer_(std::make_shared()), +MetaStoreImpl::MetaStoreImpl(copyset::CopysetNode *node, + const StorageOptions &storageOptions) + : copysetNode_(node), streamServer_(std::make_shared()), storageOptions_(storageOptions) {} -bool MetaStoreImpl::Load(const std::string& pathname) { +bool MetaStoreImpl::Load(const std::string &pathname) { // Load from raft snap file to memory WriteLockGuard writeLockGuard(rwLock_); MetaStoreFStream fstream(&partitionMap_, kvStorage_, @@ -113,12 +114,12 @@ bool MetaStoreImpl::Load(const std::string& pathname) { std::shared_ptr recycleCleaner = std::make_shared(GetPartition(partitionId)); RecycleManager::GetInstance().Add(partitionId, recycleCleaner, - copysetNode_); + copysetNode_); } } auto startCompacts = [this]() { - for (auto& part : partitionMap_) { + for (auto &part : partitionMap_) { part.second->StartS3Compact(); } }; @@ -139,9 +140,9 @@ bool MetaStoreImpl::Load(const std::string& pathname) { return true; } -void MetaStoreImpl::SaveBackground(const std::string& path, - DumpFileClosure* child, - OnSnapshotSaveDoneClosure* done) { +void MetaStoreImpl::SaveBackground(const std::string &path, + DumpFileClosure *child, + OnSnapshotSaveDoneClosure *done) { LOG(INFO) << "Save metadata to file background."; MetaStoreFStream fstream(&partitionMap_, kvStorage_, copysetNode_->GetPoolId(), @@ -157,8 +158,8 @@ void MetaStoreImpl::SaveBackground(const std::string& path, done->Run(); } -bool MetaStoreImpl::Save(const std::string& dir, - OnSnapshotSaveDoneClosure* done) { +bool MetaStoreImpl::Save(const std::string &dir, + OnSnapshotSaveDoneClosure *done) { brpc::ClosureGuard doneGuard(done); WriteLockGuard writeLockGuard(rwLock_); @@ -188,10 +189,10 @@ bool MetaStoreImpl::Save(const std::string& dir, // add files to snapshot writer // file is a relative path under the given directory - auto* writer = done->GetSnapshotWriter(); + auto *writer = done->GetSnapshotWriter(); writer->add_file(kMetaDataFilename); - for (const auto& f : files) { + for (const auto &f : files) { writer->add_file(f); } @@ -236,11 +237,12 @@ bool MetaStoreImpl::Destroy() { return true; } -MetaStatusCode MetaStoreImpl::CreatePartition( - const CreatePartitionRequest* request, CreatePartitionResponse* response) { +MetaStatusCode +MetaStoreImpl::CreatePartition(const CreatePartitionRequest *request, + CreatePartitionResponse *response) { WriteLockGuard writeLockGuard(rwLock_); MetaStatusCode status; - const auto& partition = request->partition(); + const auto &partition = request->partition(); auto it = partitionMap_.find(partition.partitionid()); if (it != partitionMap_.end()) { // keep idempotence @@ -255,8 +257,9 @@ MetaStatusCode MetaStoreImpl::CreatePartition( return MetaStatusCode::OK; } -MetaStatusCode MetaStoreImpl::DeletePartition( - const DeletePartitionRequest* request, DeletePartitionResponse* response) { +MetaStatusCode +MetaStoreImpl::DeletePartition(const DeletePartitionRequest *request, + DeletePartitionResponse *response) { WriteLockGuard writeLockGuard(rwLock_); uint32_t partitionId = request->partitionid(); auto it = partitionMap_.find(partitionId); @@ -301,13 +304,13 @@ MetaStatusCode MetaStoreImpl::DeletePartition( } bool MetaStoreImpl::GetPartitionInfoList( - std::list* partitionInfoList) { + std::list *partitionInfoList) { // when metastore is loading, it will hold the rwLock_ for a long time. // and heartbeat will stuck when try to GetPartitionInfoList if use // ReadLockGuard to get the rwLock_ int ret = rwLock_.TryRDLock(); if (ret == 0) { - for (const auto& it : partitionMap_) { + for (const auto &it : partitionMap_) { PartitionInfo partitionInfo = it.second->GetPartitionInfo(); partitionInfoList->push_back(std::move(partitionInfo)); } @@ -325,8 +328,8 @@ std::shared_ptr MetaStoreImpl::GetStreamServer() { } // dentry -MetaStatusCode MetaStoreImpl::CreateDentry(const CreateDentryRequest* request, - CreateDentryResponse* response) { +MetaStatusCode MetaStoreImpl::CreateDentry(const CreateDentryRequest *request, + CreateDentryResponse *response) { ReadLockGuard readLockGuard(rwLock_); std::shared_ptr partition = GetPartition(request->partitionid()); if (partition == nullptr) { @@ -339,11 +342,11 @@ MetaStatusCode MetaStoreImpl::CreateDentry(const CreateDentryRequest* request, return status; } -MetaStatusCode MetaStoreImpl::GetDentry(const GetDentryRequest* request, - GetDentryResponse* response) { +MetaStatusCode MetaStoreImpl::GetDentry(const GetDentryRequest *request, + GetDentryResponse *response) { uint32_t fsId = request->fsid(); uint64_t parentInodeId = request->parentinodeid(); - const auto& name = request->name(); + const auto &name = request->name(); auto txId = request->txid(); ReadLockGuard readLockGuard(rwLock_); std::shared_ptr partition = GetPartition(request->partitionid()); @@ -368,8 +371,8 @@ MetaStatusCode MetaStoreImpl::GetDentry(const GetDentryRequest* request, return rc; } -MetaStatusCode MetaStoreImpl::DeleteDentry(const DeleteDentryRequest* request, - DeleteDentryResponse* response) { +MetaStatusCode MetaStoreImpl::DeleteDentry(const DeleteDentryRequest *request, + DeleteDentryResponse *response) { uint32_t fsId = request->fsid(); uint64_t parentInodeId = request->parentinodeid(); std::string name = request->name(); @@ -395,8 +398,8 @@ MetaStatusCode MetaStoreImpl::DeleteDentry(const DeleteDentryRequest* request, return rc; } -MetaStatusCode MetaStoreImpl::ListDentry(const ListDentryRequest* request, - ListDentryResponse* response) { +MetaStatusCode MetaStoreImpl::ListDentry(const ListDentryRequest *request, + ListDentryResponse *response) { uint32_t fsId = request->fsid(); uint64_t parentInodeId = request->dirinodeid(); auto txId = request->txid(); @@ -432,8 +435,9 @@ MetaStatusCode MetaStoreImpl::ListDentry(const ListDentryRequest* request, return rc; } -MetaStatusCode MetaStoreImpl::PrepareRenameTx( - const PrepareRenameTxRequest* request, PrepareRenameTxResponse* response) { +MetaStatusCode +MetaStoreImpl::PrepareRenameTx(const PrepareRenameTxRequest *request, + PrepareRenameTxResponse *response) { ReadLockGuard readLockGuard(rwLock_); MetaStatusCode rc; auto partitionId = request->partitionid(); @@ -451,8 +455,8 @@ MetaStatusCode MetaStoreImpl::PrepareRenameTx( } // inode -MetaStatusCode MetaStoreImpl::CreateInode(const CreateInodeRequest* request, - CreateInodeResponse* response) { +MetaStatusCode MetaStoreImpl::CreateInode(const CreateInodeRequest *request, + CreateInodeResponse *response) { InodeParam param; param.fsId = request->fsid(); param.length = request->length(); @@ -464,7 +468,8 @@ MetaStatusCode MetaStoreImpl::CreateInode(const CreateInodeRequest* request, param.rdev = request->rdev(); if (request->has_create()) { param.timestamp = absl::make_optional( - {request->create().sec(), request->create().nsec()}); + timespec{static_cast(request->create().sec()), + request->create().nsec()}); } param.symlink = ""; @@ -497,8 +502,9 @@ MetaStatusCode MetaStoreImpl::CreateInode(const CreateInodeRequest* request, return status; } -MetaStatusCode MetaStoreImpl::CreateRootInode( - const CreateRootInodeRequest* request, CreateRootInodeResponse* response) { +MetaStatusCode +MetaStoreImpl::CreateRootInode(const CreateRootInodeRequest *request, + CreateRootInodeResponse *response) { InodeParam param; param.fsId = request->fsid(); param.uid = request->uid(); @@ -510,7 +516,8 @@ MetaStatusCode MetaStoreImpl::CreateRootInode( param.parent = 0; if (request->has_create()) { param.timestamp = absl::make_optional( - {request->create().sec(), request->create().nsec()}); + timespec{static_cast(request->create().sec()), + request->create().nsec()}); } ReadLockGuard readLockGuard(rwLock_); @@ -532,9 +539,9 @@ MetaStatusCode MetaStoreImpl::CreateRootInode( return status; } -MetaStatusCode MetaStoreImpl::CreateManageInode( - const CreateManageInodeRequest* request, - CreateManageInodeResponse* response) { +MetaStatusCode +MetaStoreImpl::CreateManageInode(const CreateManageInodeRequest *request, + CreateManageInodeResponse *response) { InodeParam param; param.fsId = request->fsid(); param.uid = request->uid(); @@ -560,9 +567,8 @@ MetaStatusCode MetaStoreImpl::CreateManageInode( return status; } - MetaStatusCode status = - partition->CreateManageInode(param, request->managetype(), - response->mutable_inode()); + MetaStatusCode status = partition->CreateManageInode( + param, request->managetype(), response->mutable_inode()); response->set_statuscode(status); if (status != MetaStatusCode::OK) { LOG(ERROR) << "CreateManageInode fail, fsId = " << param.fsId @@ -585,8 +591,8 @@ MetaStatusCode MetaStoreImpl::CreateManageInode( return MetaStatusCode::OK; } -MetaStatusCode MetaStoreImpl::GetInode(const GetInodeRequest* request, - GetInodeResponse* response) { +MetaStatusCode MetaStoreImpl::GetInode(const GetInodeRequest *request, + GetInodeResponse *response) { uint32_t fsId = request->fsid(); uint64_t inodeId = request->inodeid(); @@ -598,7 +604,7 @@ MetaStatusCode MetaStoreImpl::GetInode(const GetInodeRequest* request, return status; } - Inode* inode = response->mutable_inode(); + Inode *inode = response->mutable_inode(); MetaStatusCode rc = partition->GetInode(fsId, inodeId, inode); // NOTE: the following two cases we should padding inode's s3chunkinfo: // (1): for RPC requests which unsupport streaming @@ -623,9 +629,9 @@ MetaStatusCode MetaStoreImpl::GetInode(const GetInodeRequest* request, return rc; } -MetaStatusCode MetaStoreImpl::BatchGetInodeAttr( - const BatchGetInodeAttrRequest* request, - BatchGetInodeAttrResponse* response) { +MetaStatusCode +MetaStoreImpl::BatchGetInodeAttr(const BatchGetInodeAttrRequest *request, + BatchGetInodeAttrResponse *response) { ReadLockGuard readLockGuard(rwLock_); std::shared_ptr partition = GetPartition(request->partitionid()); if (partition == nullptr) { @@ -649,8 +655,8 @@ MetaStatusCode MetaStoreImpl::BatchGetInodeAttr( return status; } -MetaStatusCode MetaStoreImpl::BatchGetXAttr(const BatchGetXAttrRequest* request, - BatchGetXAttrResponse* response) { +MetaStatusCode MetaStoreImpl::BatchGetXAttr(const BatchGetXAttrRequest *request, + BatchGetXAttrResponse *response) { ReadLockGuard readLockGuard(rwLock_); std::shared_ptr partition = GetPartition(request->partitionid()); if (partition == nullptr) { @@ -674,8 +680,8 @@ MetaStatusCode MetaStoreImpl::BatchGetXAttr(const BatchGetXAttrRequest* request, return status; } -MetaStatusCode MetaStoreImpl::DeleteInode(const DeleteInodeRequest* request, - DeleteInodeResponse* response) { +MetaStatusCode MetaStoreImpl::DeleteInode(const DeleteInodeRequest *request, + DeleteInodeResponse *response) { uint32_t fsId = request->fsid(); uint64_t inodeId = request->inodeid(); @@ -692,8 +698,8 @@ MetaStatusCode MetaStoreImpl::DeleteInode(const DeleteInodeRequest* request, return status; } -MetaStatusCode MetaStoreImpl::UpdateInode(const UpdateInodeRequest* request, - UpdateInodeResponse* response) { +MetaStatusCode MetaStoreImpl::UpdateInode(const UpdateInodeRequest *request, + UpdateInodeResponse *response) { ReadLockGuard readLockGuard(rwLock_); VLOG(9) << "UpdateInode inode " << request->inodeid(); std::shared_ptr partition = GetPartition(request->partitionid()); @@ -709,9 +715,9 @@ MetaStatusCode MetaStoreImpl::UpdateInode(const UpdateInodeRequest* request, } MetaStatusCode MetaStoreImpl::GetOrModifyS3ChunkInfo( - const GetOrModifyS3ChunkInfoRequest* request, - GetOrModifyS3ChunkInfoResponse* response, - std::shared_ptr* iterator) { + const GetOrModifyS3ChunkInfoRequest *request, + GetOrModifyS3ChunkInfoResponse *response, + std::shared_ptr *iterator) { MetaStatusCode rc; ReadLockGuard readLockGuard(rwLock_); auto partition = GetPartition(request->partitionid()); @@ -736,9 +742,9 @@ MetaStatusCode MetaStoreImpl::GetOrModifyS3ChunkInfo( return rc; } -void MetaStoreImpl::PrepareStreamBuffer(butil::IOBuf* buffer, +void MetaStoreImpl::PrepareStreamBuffer(butil::IOBuf *buffer, uint64_t chunkIndex, - const std::string& value) { + const std::string &value) { buffer->clear(); buffer->append(std::to_string(chunkIndex)); buffer->append(":"); @@ -782,8 +788,9 @@ std::shared_ptr MetaStoreImpl::GetPartition(uint32_t partitionId) { return nullptr; } -MetaStatusCode MetaStoreImpl::GetVolumeExtent( - const GetVolumeExtentRequest* request, GetVolumeExtentResponse* response) { +MetaStatusCode +MetaStoreImpl::GetVolumeExtent(const GetVolumeExtentRequest *request, + GetVolumeExtentResponse *response) { ReadLockGuard guard(rwLock_); auto partition = GetPartition(request->partitionid()); if (!partition) { @@ -804,9 +811,9 @@ MetaStatusCode MetaStoreImpl::GetVolumeExtent( return st; } -MetaStatusCode MetaStoreImpl::UpdateVolumeExtent( - const UpdateVolumeExtentRequest* request, - UpdateVolumeExtentResponse* response) { +MetaStatusCode +MetaStoreImpl::UpdateVolumeExtent(const UpdateVolumeExtentRequest *request, + UpdateVolumeExtentResponse *response) { ReadLockGuard guard(rwLock_); auto partition = GetPartition(request->partitionid()); if (!partition) { diff --git a/curvefs/src/metaserver/metastore_fstream.cpp b/curvefs/src/metaserver/metastore_fstream.cpp index 7744eaae4c..edf0cb8c32 100644 --- a/curvefs/src/metaserver/metastore_fstream.cpp +++ b/curvefs/src/metaserver/metastore_fstream.cpp @@ -37,34 +37,32 @@ namespace curvefs { namespace metaserver { using ::curvefs::common::PartitionInfo; -using ::curvefs::metaserver::Inode; using ::curvefs::metaserver::Dentry; +using ::curvefs::metaserver::Inode; +using ::curvefs::metaserver::storage::ContainerIterator; using ::curvefs::metaserver::storage::ENTRY_TYPE; -using ::curvefs::metaserver::storage::SaveToFile; -using ::curvefs::metaserver::storage::LoadFromFile; using ::curvefs::metaserver::storage::IteratorWrapper; -using ::curvefs::metaserver::storage::ContainerIterator; +using ::curvefs::metaserver::storage::LoadFromFile; +using ::curvefs::metaserver::storage::SaveToFile; using ContainerType = std::unordered_map; using STORAGE_TYPE = ::curvefs::metaserver::storage::KVStorage::STORAGE_TYPE; -using ChildrenType = ::curvefs::metaserver::storage::MergeIterator::ChildrenType; // NOLINT +using ChildrenType = + ::curvefs::metaserver::storage::MergeIterator::ChildrenType; // NOLINT using DumpFileClosure = ::curvefs::metaserver::storage::DumpFileClosure; using Key4S3ChunkInfoList = ::curvefs::metaserver::storage::Key4S3ChunkInfoList; using ::curvefs::metaserver::storage::Key4VolumeExtentSlice; -MetaStoreFStream::MetaStoreFStream(PartitionMap* partitionMap, +MetaStoreFStream::MetaStoreFStream(PartitionMap *partitionMap, std::shared_ptr kvStorage, - PoolId poolId, - CopysetId copysetId) - : partitionMap_(partitionMap), - kvStorage_(std::move(kvStorage)), - conv_(std::make_shared()), - poolId_(poolId), + PoolId poolId, CopysetId copysetId) + : partitionMap_(partitionMap), kvStorage_(std::move(kvStorage)), + conv_(std::make_shared()), poolId_(poolId), copysetId_(copysetId) {} -std::shared_ptr MetaStoreFStream::GetPartition( - uint32_t partitionId) { +std::shared_ptr +MetaStoreFStream::GetPartition(uint32_t partitionId) { auto iter = partitionMap_->find(partitionId); if (iter != partitionMap_->end()) { return iter->second; @@ -73,8 +71,9 @@ std::shared_ptr MetaStoreFStream::GetPartition( } bool MetaStoreFStream::LoadPartition(uint32_t partitionId, - const std::string& key, - const std::string& value) { + const std::string &key, + const std::string &value) { + (void)key; PartitionInfo partitionInfo; if (!conv_->ParseFromString(value, &partitionInfo)) { LOG(ERROR) << "Decode PartitionInfo failed"; @@ -97,9 +96,9 @@ bool MetaStoreFStream::LoadPartition(uint32_t partitionId, return true; } -bool MetaStoreFStream::LoadInode(uint32_t partitionId, - const std::string& key, - const std::string& value) { +bool MetaStoreFStream::LoadInode(uint32_t partitionId, const std::string &key, + const std::string &value) { + (void)key; auto partition = GetPartition(partitionId); if (nullptr == partition) { LOG(ERROR) << "Partition not found, partitionId = " << partitionId; @@ -121,10 +120,10 @@ bool MetaStoreFStream::LoadInode(uint32_t partitionId, return true; } -bool MetaStoreFStream::LoadDentry(uint8_t version, - uint32_t partitionId, - const std::string& key, - const std::string& value) { +bool MetaStoreFStream::LoadDentry(uint8_t version, uint32_t partitionId, + const std::string &key, + const std::string &value) { + (void)key; auto partition = GetPartition(partitionId); if (nullptr == partition) { LOG(ERROR) << "Partition not found, partitionId = " << partitionId; @@ -154,8 +153,9 @@ bool MetaStoreFStream::LoadDentry(uint8_t version, } bool MetaStoreFStream::LoadPendingTx(uint32_t partitionId, - const std::string& key, - const std::string& value) { + const std::string &key, + const std::string &value) { + (void)key; auto partition = GetPartition(partitionId); if (nullptr == partition) { LOG(ERROR) << "Partition not found, partitionId = " << partitionId; @@ -176,8 +176,8 @@ bool MetaStoreFStream::LoadPendingTx(uint32_t partitionId, } bool MetaStoreFStream::LoadInodeS3ChunkInfoList(uint32_t partitionId, - const std::string& key, - const std::string& value) { + const std::string &key, + const std::string &value) { auto partition = GetPartition(partitionId); if (nullptr == partition) { LOG(ERROR) << "Partition not found, partitionId = " << partitionId; @@ -209,8 +209,8 @@ bool MetaStoreFStream::LoadInodeS3ChunkInfoList(uint32_t partitionId, } bool MetaStoreFStream::LoadVolumeExtentList(uint32_t partitionId, - const std::string& key, - const std::string& value) { + const std::string &key, + const std::string &value) { auto partition = GetPartition(partitionId); if (!partition) { LOG(ERROR) << "Partition not found, partitionId: " << partitionId; @@ -244,7 +244,7 @@ bool MetaStoreFStream::LoadVolumeExtentList(uint32_t partitionId, std::shared_ptr MetaStoreFStream::NewPartitionIterator() { std::string value; auto container = std::make_shared(); - for (const auto& item : *partitionMap_) { + for (const auto &item : *partitionMap_) { auto partitionId = item.first; auto partition = item.second; auto partitionInfo = partition->GetPartitionInfo(); @@ -256,36 +256,36 @@ std::shared_ptr MetaStoreFStream::NewPartitionIterator() { container->emplace(std::to_string(partitionId), value); } - auto iterator = std::make_shared>( - container); - return std::make_shared( - ENTRY_TYPE::PARTITION, 0, iterator); + auto iterator = + std::make_shared>(container); + return std::make_shared(ENTRY_TYPE::PARTITION, 0, + iterator); } -std::shared_ptr MetaStoreFStream::NewInodeIterator( - std::shared_ptr partition) { +std::shared_ptr +MetaStoreFStream::NewInodeIterator(std::shared_ptr partition) { auto partitionId = partition->GetPartitionId(); auto iterator = partition->GetAllInode(); if (iterator->Status() != 0) { return nullptr; } - return std::make_shared( - ENTRY_TYPE::INODE, partitionId, iterator); + return std::make_shared(ENTRY_TYPE::INODE, partitionId, + iterator); } -std::shared_ptr MetaStoreFStream::NewDentryIterator( - std::shared_ptr partition) { +std::shared_ptr +MetaStoreFStream::NewDentryIterator(std::shared_ptr partition) { auto partitionId = partition->GetPartitionId(); auto iterator = partition->GetAllDentry(); if (iterator->Status() != 0) { return nullptr; } - return std::make_shared( - ENTRY_TYPE::DENTRY, partitionId, iterator); + return std::make_shared(ENTRY_TYPE::DENTRY, partitionId, + iterator); } -std::shared_ptr MetaStoreFStream::NewPendingTxIterator( - std::shared_ptr partition) { +std::shared_ptr +MetaStoreFStream::NewPendingTxIterator(std::shared_ptr partition) { std::string value; PrepareRenameTxRequest pendingTx; auto container = std::make_shared(); @@ -297,10 +297,10 @@ std::shared_ptr MetaStoreFStream::NewPendingTxIterator( } auto partitionId = partition->GetPartitionId(); - auto iterator = std::make_shared>( - container); - return std::make_shared( - ENTRY_TYPE::PENDING_TX, partitionId, iterator); + auto iterator = + std::make_shared>(container); + return std::make_shared(ENTRY_TYPE::PENDING_TX, + partitionId, iterator); } std::shared_ptr MetaStoreFStream::NewInodeS3ChunkInfoListIterator( @@ -310,12 +310,12 @@ std::shared_ptr MetaStoreFStream::NewInodeS3ChunkInfoListIterator( if (iterator->Status() != 0) { return nullptr; } - return std::make_shared( - ENTRY_TYPE::S3_CHUNK_INFO_LIST, partitionId, iterator); + return std::make_shared(ENTRY_TYPE::S3_CHUNK_INFO_LIST, + partitionId, iterator); } -std::shared_ptr MetaStoreFStream::NewVolumeExtentListIterator( - Partition* partition) { +std::shared_ptr +MetaStoreFStream::NewVolumeExtentListIterator(Partition *partition) { auto partitionId = partition->GetPartitionId(); auto iterator = partition->GetAllVolumeExtentList(); if (iterator->Status() != 0) { @@ -326,7 +326,7 @@ std::shared_ptr MetaStoreFStream::NewVolumeExtentListIterator( partitionId, std::move(iterator)); } -bool MetaStoreFStream::Load(const std::string& pathname, uint8_t* version) { +bool MetaStoreFStream::Load(const std::string &pathname, uint8_t *version) { uint64_t totalPartition = 0; uint64_t totalInode = 0; uint64_t totalDentry = 0; @@ -334,32 +334,30 @@ bool MetaStoreFStream::Load(const std::string& pathname, uint8_t* version) { uint64_t totalVolumeExtent = 0; uint64_t totalPendingTx = 0; - auto callback = [&](uint8_t version, - ENTRY_TYPE entryType, - uint32_t partitionId, - const std::string& key, - const std::string& value) -> bool { + auto callback = [&](uint8_t version, ENTRY_TYPE entryType, + uint32_t partitionId, const std::string &key, + const std::string &value) -> bool { switch (entryType) { - case ENTRY_TYPE::PARTITION: - ++totalPartition; - return LoadPartition(partitionId, key, value); - case ENTRY_TYPE::INODE: - ++totalInode; - return LoadInode(partitionId, key, value); - case ENTRY_TYPE::DENTRY: - ++totalDentry; - return LoadDentry(version, partitionId, key, value); - case ENTRY_TYPE::PENDING_TX: - ++totalPendingTx; - return LoadPendingTx(partitionId, key, value); - case ENTRY_TYPE::S3_CHUNK_INFO_LIST: - ++totalS3ChunkInfoList; - return LoadInodeS3ChunkInfoList(partitionId, key, value); - case ENTRY_TYPE::VOLUME_EXTENT: - ++totalVolumeExtent; - return LoadVolumeExtentList(partitionId, key, value); - case ENTRY_TYPE::UNKNOWN: - break; + case ENTRY_TYPE::PARTITION: + ++totalPartition; + return LoadPartition(partitionId, key, value); + case ENTRY_TYPE::INODE: + ++totalInode; + return LoadInode(partitionId, key, value); + case ENTRY_TYPE::DENTRY: + ++totalDentry; + return LoadDentry(version, partitionId, key, value); + case ENTRY_TYPE::PENDING_TX: + ++totalPendingTx; + return LoadPendingTx(partitionId, key, value); + case ENTRY_TYPE::S3_CHUNK_INFO_LIST: + ++totalS3ChunkInfoList; + return LoadInodeS3ChunkInfoList(partitionId, key, value); + case ENTRY_TYPE::VOLUME_EXTENT: + ++totalVolumeExtent; + return LoadVolumeExtentList(partitionId, key, value); + case ENTRY_TYPE::UNKNOWN: + break; } LOG(ERROR) << "Load failed, unknown entry type"; @@ -388,16 +386,15 @@ bool MetaStoreFStream::Load(const std::string& pathname, uint8_t* version) { return ret; } -bool MetaStoreFStream::Save(const std::string& path, - DumpFileClosure* done) { +bool MetaStoreFStream::Save(const std::string &path, DumpFileClosure *done) { ChildrenType children; children.push_back(NewPartitionIterator()); - for (const auto& item : *partitionMap_) { + for (const auto &item : *partitionMap_) { children.push_back(NewPendingTxIterator(item.second)); } - for (const auto& child : children) { + for (const auto &child : children) { if (nullptr == child) { if (done != nullptr) { done->Runned(); diff --git a/curvefs/src/metaserver/partition_cleaner.cpp b/curvefs/src/metaserver/partition_cleaner.cpp index 1d3d830f62..99960ca169 100644 --- a/curvefs/src/metaserver/partition_cleaner.cpp +++ b/curvefs/src/metaserver/partition_cleaner.cpp @@ -113,6 +113,7 @@ MetaStatusCode PartitionCleaner::CleanDataAndDeleteInode(const Inode& inode) { s3Adaptor_->GetS3ClientAdaptorOption(&clientAdaptorOption); clientAdaptorOption.blockSize = s3Info.blocksize(); clientAdaptorOption.chunkSize = s3Info.chunksize(); + clientAdaptorOption.objectPrefix = s3Info.objectprefix(); s3Adaptor_->Reinit(clientAdaptorOption, s3Info.ak(), s3Info.sk(), s3Info.endpoint(), s3Info.bucketname()); int retVal = s3Adaptor_->Delete(inode); diff --git a/curvefs/src/metaserver/recycle_cleaner.cpp b/curvefs/src/metaserver/recycle_cleaner.cpp index 95341206f5..bc087a704a 100644 --- a/curvefs/src/metaserver/recycle_cleaner.cpp +++ b/curvefs/src/metaserver/recycle_cleaner.cpp @@ -63,7 +63,7 @@ bool RecycleCleaner::IsDirTimeOut(const std::string& dir) { struct tm tmDir; memset(&tmDir, 0, sizeof(tmDir)); - char* c = strptime(dir.c_str(), "%Y-%m-%d-%H", &tmDir); + (void)strptime(dir.c_str(), "%Y-%m-%d-%H", &tmDir); time_t dirTime = mktime(&tmDir); if (dirTime <= 0) { diff --git a/curvefs/src/metaserver/s3/metaserver_s3_adaptor.cpp b/curvefs/src/metaserver/s3/metaserver_s3_adaptor.cpp index 3ff34fb261..e671ec8ec5 100644 --- a/curvefs/src/metaserver/s3/metaserver_s3_adaptor.cpp +++ b/curvefs/src/metaserver/s3/metaserver_s3_adaptor.cpp @@ -33,6 +33,7 @@ void S3ClientAdaptorImpl::Init(const S3ClientAdaptorOption &option, chunkSize_ = option.chunkSize; batchSize_ = option.batchSize; enableDeleteObjects_ = option.enableDeleteObjects; + objectPrefix_ = option.objectPrefix; client_ = client; } @@ -43,6 +44,7 @@ void S3ClientAdaptorImpl::Reinit(const S3ClientAdaptorOption& option, chunkSize_ = option.chunkSize; batchSize_ = option.batchSize; enableDeleteObjects_ = option.enableDeleteObjects; + objectPrefix_ = option.objectPrefix; client_->Reinit(ak, sk, endpoint, bucketName); } @@ -98,7 +100,7 @@ int S3ClientAdaptorImpl::DeleteChunk(uint64_t fsId, uint64_t inodeId, while (length > blockSize_ * count - blockPos || count == 0) { // divide chunks to blocks, and delete these blocks std::string objectName = curvefs::common::s3util::GenObjName( - chunkId, blockIndex, compaction, fsId, inodeId); + chunkId, blockIndex, compaction, fsId, inodeId, objectPrefix_); int delStat = client_->Delete(objectName); if (delStat < 0) { // fail @@ -197,7 +199,7 @@ void S3ClientAdaptorImpl::GenObjNameListForChunkInfo( for (int i = 0; i < count; i++) { // divide chunks to blocks, and delete these blocks std::string objectName = curvefs::common::s3util::GenObjName( - chunkId, blockIndex, compaction, fsId, inodeId); + chunkId, blockIndex, compaction, fsId, inodeId, objectPrefix_); objList->push_back(objectName); ++blockIndex; @@ -211,6 +213,7 @@ void S3ClientAdaptorImpl::GetS3ClientAdaptorOption( option->chunkSize = chunkSize_; option->batchSize = batchSize_; option->enableDeleteObjects = enableDeleteObjects_; + option->objectPrefix = objectPrefix_; } } // namespace metaserver diff --git a/curvefs/src/metaserver/s3/metaserver_s3_adaptor.h b/curvefs/src/metaserver/s3/metaserver_s3_adaptor.h index d3f31d86b1..2e97fc372e 100644 --- a/curvefs/src/metaserver/s3/metaserver_s3_adaptor.h +++ b/curvefs/src/metaserver/s3/metaserver_s3_adaptor.h @@ -35,6 +35,7 @@ struct S3ClientAdaptorOption { uint64_t blockSize; uint64_t chunkSize; uint64_t batchSize; + uint32_t objectPrefix; bool enableDeleteObjects; }; @@ -148,6 +149,7 @@ class S3ClientAdaptorImpl : public S3ClientAdaptor { uint64_t blockSize_; uint64_t chunkSize_; uint64_t batchSize_; + uint32_t objectPrefix_; bool enableDeleteObjects_; }; } // namespace metaserver diff --git a/curvefs/src/metaserver/s3compact_inode.cpp b/curvefs/src/metaserver/s3compact_inode.cpp index 4d7dcedd0b..feb195883a 100644 --- a/curvefs/src/metaserver/s3compact_inode.cpp +++ b/curvefs/src/metaserver/s3compact_inode.cpp @@ -195,7 +195,8 @@ void CompactInodeJob::GenS3ReadRequests( beginRoundDown + index * blockSize <= curr->end; index++) { // read the block obj std::string objName = curvefs::common::s3util::GenObjName( - curr->chunkid, index, curr->compaction, ctx.fsId, ctx.inodeId); + curr->chunkid, index, curr->compaction, ctx.fsId, + ctx.inodeId, ctx.objectPrefix); uint64_t s3objBegin = std::max(curr->chunkoff, beginRoundDown + index * blockSize); uint64_t s3objEnd = @@ -349,7 +350,7 @@ int CompactInodeJob::WriteFullChunk( index * blockSize + offRoundDown < newOff + chunkLen; index += 1) { std::string objName = curvefs::common::s3util::GenObjName( newChunkInfo.newChunkId, index, newChunkInfo.newCompaction, - ctx.fsId, ctx.inodeId); + ctx.fsId, ctx.inodeId, ctx.objectPrefix); const Aws::String aws_key(objName.c_str(), objName.size()); int ret; uint64_t s3objBegin = @@ -407,7 +408,8 @@ bool CompactInodeJob::CompactPrecheck(const struct S3CompactTask& task, S3Adapter* CompactInodeJob::SetupS3Adapter(uint64_t fsId, uint64_t* s3adapterIndex, uint64_t* blockSize, - uint64_t* chunkSize) { + uint64_t* chunkSize, + uint32_t* objectPrefix) { auto pairResult = opts_->s3adapterManager->GetS3Adapter(); *s3adapterIndex = pairResult.first; auto s3adapter = pairResult.second; @@ -421,6 +423,7 @@ S3Adapter* CompactInodeJob::SetupS3Adapter(uint64_t fsId, if (status == 0) { *blockSize = s3info.blocksize(); *chunkSize = s3info.chunksize(); + *objectPrefix = s3info.objectprefix(); if (s3adapter->GetS3Ak() != s3info.ak() || s3adapter->GetS3Sk() != s3info.sk() || s3adapter->GetS3Endpoint() != s3info.endpoint()) { @@ -528,7 +531,7 @@ void CompactInodeJob::DeleteObjsOfS3ChunkInfoList( offRoundDown + index * ctx.blockSize < off + len; index++) { std::string objName = curvefs::common::s3util::GenObjName( chunkinfo.chunkid(), index, chunkinfo.compaction(), ctx.fsId, - ctx.inodeId); + ctx.inodeId, ctx.objectPrefix); VLOG(6) << "s3compact: delete " << objName; const Aws::String aws_key(objName.c_str(), objName.size()); int r = ctx.s3adapter->DeleteObject( @@ -555,8 +558,9 @@ void CompactInodeJob::CompactChunks(const S3CompactTask& task) { uint64_t blockSize; uint64_t chunkSize; uint64_t s3adapterIndex; + uint32_t objectPrefix; S3Adapter* s3adapter = SetupS3Adapter(task.inodeKey.fsId, &s3adapterIndex, - &blockSize, &chunkSize); + &blockSize, &chunkSize, &objectPrefix); if (s3adapter == nullptr) return; // need compact? std::vector needCompact = @@ -570,7 +574,7 @@ void CompactInodeJob::CompactChunks(const S3CompactTask& task) { // 1. read full chunk & write new objs, each chunk one by one struct S3CompactCtx compactCtx { task.inodeKey.inodeId, task.inodeKey.fsId, task.pinfo, blockSize, - chunkSize, s3adapterIndex, s3adapter + chunkSize, s3adapterIndex, objectPrefix, s3adapter }; std::unordered_map> objsAddedMap; ::google::protobuf::Map s3ChunkInfoAdd; diff --git a/curvefs/src/metaserver/s3compact_inode.h b/curvefs/src/metaserver/s3compact_inode.h index 288e072107..5b79c6263c 100644 --- a/curvefs/src/metaserver/s3compact_inode.h +++ b/curvefs/src/metaserver/s3compact_inode.h @@ -98,6 +98,7 @@ class CompactInodeJob { uint64_t blockSize; uint64_t chunkSize; uint64_t s3adapterIndex; + uint32_t objectPrefix; S3Adapter* s3adapter; }; @@ -170,7 +171,8 @@ class CompactInodeJob { uint64_t inodeLen, uint64_t chunkSize); bool CompactPrecheck(const struct S3CompactTask& task, Inode* inode); S3Adapter* SetupS3Adapter(uint64_t fsid, uint64_t* s3adapterIndex, - uint64_t* blockSize, uint64_t* chunkSize); + uint64_t* blockSize, uint64_t* chunkSize, + uint32_t* objectPrefix); void DeleteObjs(const std::vector& objsAdded, S3Adapter* s3adapter); std::list BuildValidList( diff --git a/curvefs/src/metaserver/s3compact_manager.cpp b/curvefs/src/metaserver/s3compact_manager.cpp index c41a95d659..4c5bea7648 100644 --- a/curvefs/src/metaserver/s3compact_manager.cpp +++ b/curvefs/src/metaserver/s3compact_manager.cpp @@ -22,6 +22,7 @@ #include "curvefs/src/metaserver/s3compact_manager.h" +#include #include #include @@ -45,7 +46,7 @@ void S3AdapterManager::Init() { std::lock_guard lock(mtx_); if (inited_) return; used_.resize(size_); - for (int i = 0; i < size_; i++) { + for (uint64_t i = 0; i < size_; i++) { s3adapters_.emplace_back(new S3Adapter()); } for (auto& s3adapter : s3adapters_) { diff --git a/curvefs/src/metaserver/storage/dumpfile.cpp b/curvefs/src/metaserver/storage/dumpfile.cpp index 3465c1919e..dd2d03c4f0 100644 --- a/curvefs/src/metaserver/storage/dumpfile.cpp +++ b/curvefs/src/metaserver/storage/dumpfile.cpp @@ -178,7 +178,7 @@ DUMPFILE_ERROR DumpFile::Write(const char* buffer, if (ret < 0) { LOG(ERROR) << "Write file failed, retCode = " << ret; return DUMPFILE_ERROR::WRITE_FAILED; - } else if (ret != length) { + } else if (ret != static_cast(length)) { LOG(ERROR) << "Write file failed, expect write " << length << " bytes, actual write " << ret << " bytes"; return DUMPFILE_ERROR::WRITE_FAILED; @@ -192,7 +192,7 @@ DUMPFILE_ERROR DumpFile::Read(char* buffer, off_t offset, size_t length) { if (ret < 0) { LOG(ERROR) << "Read file failed, retCode = " << ret; return DUMPFILE_ERROR::READ_FAILED; - } else if (ret != length) { + } else if (ret != static_cast(length)) { LOG(ERROR) << "Read file failed, expect read " << length << " bytes, actual read " << ret << " bytes"; return DUMPFILE_ERROR::READ_FAILED; @@ -375,6 +375,7 @@ DUMPFILE_ERROR DumpFile::WaitSaveDone(pid_t childpid) { } void DumpFile::SignalHandler(int signo, siginfo_t* siginfo, void* ucontext) { + (void)ucontext; auto pid = (siginfo && siginfo->si_pid) ? siginfo->si_pid : -1; LOG(INFO) << "Signal " << signo << " received from " << pid; _exit(2); @@ -398,7 +399,6 @@ DUMPFILE_ERROR DumpFile::InitSignals() { DUMPFILE_ERROR DumpFile::CloseSockets() { std::vector names; - pid_t pid = getpid(); if (fs_->List("/proc/self/fd", &names) != 0) { return DUMPFILE_ERROR::LIST_FAILED; } diff --git a/curvefs/src/metaserver/storage/iterator.h b/curvefs/src/metaserver/storage/iterator.h index 0c591cf2e6..cd6e7393fe 100644 --- a/curvefs/src/metaserver/storage/iterator.h +++ b/curvefs/src/metaserver/storage/iterator.h @@ -50,11 +50,11 @@ class Iterator { virtual std::string Value() = 0; - virtual const ValueType* RawValue() const { return nullptr; } + virtual const ValueType *RawValue() const { return nullptr; } virtual int Status() = 0; - virtual bool ParseFromValue(ValueType* value) { return true; } + virtual bool ParseFromValue(ValueType * /*value*/) { return true; } virtual void DisablePrefixChecking() {} }; @@ -64,24 +64,21 @@ class MergeIterator : public Iterator { using ChildrenType = std::vector>; public: - explicit MergeIterator(const ChildrenType& children) - : current_(nullptr), children_(children) { - } + explicit MergeIterator(const ChildrenType &children) + : children_(children), current_(nullptr) {} uint64_t Size() override { uint64_t size = 0; - for (const auto& child : children_) { + for (const auto &child : children_) { size += child->Size(); } return size; } - bool Valid() override { - return (current_ != nullptr) && (Status() == 0); - } + bool Valid() override { return (current_ != nullptr) && (Status() == 0); } void SeekToFirst() override { - for (const auto& child : children_) { + for (const auto &child : children_) { child->SeekToFirst(); } FindCurrent(); @@ -92,16 +89,12 @@ class MergeIterator : public Iterator { FindCurrent(); } - std::string Key() override { - return current_->Key(); - } + std::string Key() override { return current_->Key(); } - std::string Value() override { - return current_->Value(); - } + std::string Value() override { return current_->Value(); } int Status() override { - for (const auto& child : children_) { + for (const auto &child : children_) { if (child->Status() != 0) { return child->Status(); } @@ -112,7 +105,7 @@ class MergeIterator : public Iterator { private: void FindCurrent() { current_ = nullptr; - for (const auto& child : children_) { + for (const auto &child : children_) { if (child->Valid()) { current_ = child; break; @@ -125,39 +118,24 @@ class MergeIterator : public Iterator { std::shared_ptr current_; }; -template -class ContainerIterator : public Iterator { +template class ContainerIterator : public Iterator { public: explicit ContainerIterator(std::shared_ptr container) : container_(container) {} - uint64_t Size() override { - return container_->size(); - } + uint64_t Size() override { return container_->size(); } - bool Valid() override { - return iter_ != container_->end(); - } + bool Valid() override { return iter_ != container_->end(); } - void SeekToFirst() override { - iter_ = container_->begin(); - } + void SeekToFirst() override { iter_ = container_->begin(); } - void Next() override { - iter_++; - } + void Next() override { iter_++; } - std::string Key() override { - return iter_->first; - } + std::string Key() override { return iter_->first; } - std::string Value() override { - return iter_->second; - } + std::string Value() override { return iter_->second; } - int Status() override { - return 0; - } + int Status() override { return 0; } protected: const std::shared_ptr container_; diff --git a/curvefs/src/metaserver/storage/memory_storage.cpp b/curvefs/src/metaserver/storage/memory_storage.cpp index 5979d8f977..feef149e1f 100644 --- a/curvefs/src/metaserver/storage/memory_storage.cpp +++ b/curvefs/src/metaserver/storage/memory_storage.cpp @@ -301,11 +301,14 @@ StorageOptions MemoryStorage::GetStorageOptions() const { bool MemoryStorage::Checkpoint(const std::string& dir, std::vector* files) { + (void)dir; + (void)files; LOG(WARNING) << "Not supported"; return false; } bool MemoryStorage::Recover(const std::string& dir) { + (void)dir; LOG(WARNING) << "Not supported"; return false; } diff --git a/curvefs/src/metaserver/storage/memory_storage.h b/curvefs/src/metaserver/storage/memory_storage.h index f5c18a6474..127d51da80 100644 --- a/curvefs/src/metaserver/storage/memory_storage.h +++ b/curvefs/src/metaserver/storage/memory_storage.h @@ -149,10 +149,10 @@ class MemoryStorageIterator : public Iterator { public: MemoryStorageIterator(std::shared_ptr container, const std::string& prefix) - : container_(container), - prefix_(prefix), + : prefix_(prefix), + status_(0), prefixChecking_(true), - status_(0) {} + container_(container) {} // NOTE: now we can't caclute the size for range operate uint64_t Size() override { diff --git a/curvefs/src/metaserver/storage/rocksdb_event_listener.cpp b/curvefs/src/metaserver/storage/rocksdb_event_listener.cpp index 61d5b80442..dbbae10828 100644 --- a/curvefs/src/metaserver/storage/rocksdb_event_listener.cpp +++ b/curvefs/src/metaserver/storage/rocksdb_event_listener.cpp @@ -44,6 +44,7 @@ MetricEventListener::MetricEventListener() void MetricEventListener::OnFlushBegin(rocksdb::DB* db, const rocksdb::FlushJobInfo& /*info*/) { + (void)db; flushing_ << 1; rocksdbFlushStart = butil::cpuwide_time_us(); } @@ -51,6 +52,7 @@ void MetricEventListener::OnFlushBegin(rocksdb::DB* db, void MetricEventListener::OnFlushCompleted( rocksdb::DB* db, const rocksdb::FlushJobInfo& info) { + (void)db; flushing_ << -1; flushLatency_ << (butil::cpuwide_time_us() - rocksdbFlushStart); flushedBytes_ << info.table_properties.data_size; @@ -64,6 +66,7 @@ void MetricEventListener::OnMemTableSealed( void MetricEventListener::OnCompactionBegin( rocksdb::DB* db, const rocksdb::CompactionJobInfo& /*info*/) { + (void)db; compacting_ << 1; rocksdbCompactionStart = butil::cpuwide_time_us(); } @@ -71,6 +74,7 @@ void MetricEventListener::OnCompactionBegin( void MetricEventListener::OnCompactionCompleted( rocksdb::DB* db, const rocksdb::CompactionJobInfo& /*info*/) { + (void)db; compacting_ << -1; compactionLatency_ << (butil::cpuwide_time_us() - rocksdbCompactionStart); } diff --git a/curvefs/src/metaserver/storage/rocksdb_storage.h b/curvefs/src/metaserver/storage/rocksdb_storage.h index 5ab36cc5a1..e0023dd8e2 100644 --- a/curvefs/src/metaserver/storage/rocksdb_storage.h +++ b/curvefs/src/metaserver/storage/rocksdb_storage.h @@ -374,13 +374,13 @@ class RocksDBStorageIterator : public Iterator { } private: + RocksDBStorage* storage_; std::string prefix_; uint64_t size_; int status_; - bool ordered_; bool prefixChecking_; + bool ordered_; std::unique_ptr iter_; - RocksDBStorage* storage_; rocksdb::ReadOptions readOptions_; }; diff --git a/curvefs/src/metaserver/storage/storage_fstream.h b/curvefs/src/metaserver/storage/storage_fstream.h index 1b2d3551b5..c4f24aa28d 100644 --- a/curvefs/src/metaserver/storage/storage_fstream.h +++ b/curvefs/src/metaserver/storage/storage_fstream.h @@ -37,8 +37,8 @@ namespace curvefs { namespace metaserver { namespace storage { -using ::curve::common::StringToUl; using ::curve::common::SplitString; +using ::curve::common::StringToUl; using ::curvefs::common::PartitionInfo; enum class ENTRY_TYPE { @@ -63,7 +63,7 @@ static const std::vector pairs{ }; static std::string Type2Str(ENTRY_TYPE t) { - for (const auto& pair : pairs) { + for (const auto &pair : pairs) { if (pair.first == t) { return pair.second; } @@ -71,8 +71,8 @@ static std::string Type2Str(ENTRY_TYPE t) { return ""; } -static ENTRY_TYPE Str2Type(const std::string& s) { - for (const auto& pair : pairs) { +static ENTRY_TYPE Str2Type(const std::string &s) { + for (const auto &pair : pairs) { if (pair.second == s) { return pair.first; } @@ -80,13 +80,12 @@ static ENTRY_TYPE Str2Type(const std::string& s) { return ENTRY_TYPE::UNKNOWN; } -static std::string InternalKey(ENTRY_TYPE t, - uint32_t partitionId, - const std::string& ukey) { +static std::string InternalKey(ENTRY_TYPE t, uint32_t partitionId, + const std::string &ukey) { return absl::StrCat(Type2Str(t), partitionId, ":", ukey); } -static std::pair UserKey(const std::string& ikey) { +static std::pair UserKey(const std::string &ikey) { std::string prefix, ukey; std::vector items; SplitString(ikey, ":", &items); @@ -99,14 +98,14 @@ static std::pair UserKey(const std::string& ikey) { return std::make_pair(prefix, ukey); } -static std::pair Extract(const std::string& prefix) { +static std::pair Extract(const std::string &prefix) { if (prefix.size() == 0) { return std::make_pair(ENTRY_TYPE::UNKNOWN, 0); } std::vector items{ prefix.substr(0, 1), // eg: i - prefix.substr(1), // eg: 100 + prefix.substr(1), // eg: 100 }; ENTRY_TYPE entryType = Str2Type(items[0]); @@ -117,10 +116,9 @@ static std::pair Extract(const std::string& prefix) { return std::make_pair(entryType, partitionId); } -inline bool SaveToFile(const std::string& pathname, - std::shared_ptr iterator, - bool background, - DumpFileClosure* done = nullptr) { +inline bool SaveToFile(const std::string &pathname, + std::shared_ptr iterator, bool background, + DumpFileClosure *done = nullptr) { auto dumpfile = DumpFile(pathname); if (dumpfile.Open() != DUMPFILE_ERROR::OK) { LOG(ERROR) << "Open dumpfile failed"; @@ -144,15 +142,12 @@ inline bool SaveToFile(const std::string& pathname, return (rc == DUMPFILE_ERROR::OK) && (iterator->Status() == 0); } -template -inline bool InvokeCallback(uint8_t version, - ENTRY_TYPE entryType, - uint32_t partitionId, - const std::string& key, - const std::string& value, - Callback&& callback) { - bool succ = std::forward(callback)( - version, entryType, partitionId, key, value); +template +inline bool InvokeCallback(uint8_t version, ENTRY_TYPE entryType, + uint32_t partitionId, const std::string &key, + const std::string &value, Callback &&callback) { + bool succ = std::forward(callback)(version, entryType, + partitionId, key, value); if (!succ) { LOG(ERROR) << "Invoke callback for entry failed."; return false; @@ -160,16 +155,16 @@ inline bool InvokeCallback(uint8_t version, return true; } -#define CASE_TYPE_CALLBACK(TYPE) \ - case ENTRY_TYPE::TYPE: \ - if (!InvokeCallback(version, entryType, partitionId, \ - key, value, callback)) { \ - return false; \ - } \ +#define CASE_TYPE_CALLBACK(TYPE) \ + case ENTRY_TYPE::TYPE: \ + if (!InvokeCallback(version, entryType, partitionId, key, value, \ + callback)) { \ + return false; \ + } \ break template -inline bool LoadFromFile(const std::string& pathname, uint8_t* version, +inline bool LoadFromFile(const std::string &pathname, uint8_t *version, Callback callback) { auto dumpfile = DumpFile(pathname); if (dumpfile.Open() != DUMPFILE_ERROR::OK) { @@ -179,8 +174,8 @@ inline bool LoadFromFile(const std::string& pathname, uint8_t* version, auto iter = dumpfile.Load(); for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { - auto ikey = iter->Key(); // internal key - auto ukey = UserKey(ikey); // + auto ikey = iter->Key(); // internal key + auto ukey = UserKey(ikey); // auto pair = Extract(ukey.first); // prefix ENTRY_TYPE entryType = pair.first; @@ -195,9 +190,9 @@ inline bool LoadFromFile(const std::string& pathname, uint8_t* version, CASE_TYPE_CALLBACK(PENDING_TX); CASE_TYPE_CALLBACK(S3_CHUNK_INFO_LIST); CASE_TYPE_CALLBACK(VOLUME_EXTENT); - default: - LOG(ERROR) << "Unknown entry type, key = " << key; - return false; + default: + LOG(ERROR) << "Unknown entry type, key = " << key; + return false; } } @@ -209,45 +204,29 @@ inline bool LoadFromFile(const std::string& pathname, uint8_t* version, // contain entry type and partition id. class IteratorWrapper : public Iterator { public: - IteratorWrapper(ENTRY_TYPE entryType, - uint32_t partitionId, + IteratorWrapper(ENTRY_TYPE entryType, uint32_t partitionId, std::shared_ptr iterator) - : entryType_(entryType), - partitionId_(partitionId), + : entryType_(entryType), partitionId_(partitionId), iterator_(std::move(iterator)) {} - uint64_t Size() override { - return iterator_->Size(); - } + uint64_t Size() override { return iterator_->Size(); } - bool Valid() override { - return iterator_->Valid(); - } + bool Valid() override { return iterator_->Valid(); } - void SeekToFirst() override { - iterator_->SeekToFirst(); - } + void SeekToFirst() override { iterator_->SeekToFirst(); } - void Next() override { - iterator_->Next(); - } + void Next() override { iterator_->Next(); } std::string Key() override { auto key = iterator_->Key(); return InternalKey(entryType_, partitionId_, key); } - std::string Value() override { - return iterator_->Value(); - } + std::string Value() override { return iterator_->Value(); } - bool ParseFromValue(ValueType* value) override { - return true; - } + bool ParseFromValue(ValueType * /*value*/) override { return true; } - int Status() override { - return iterator_->Status(); - } + int Status() override { return iterator_->Status(); } protected: ENTRY_TYPE entryType_; diff --git a/curvefs/src/metaserver/transaction.cpp b/curvefs/src/metaserver/transaction.cpp index 22c637a91f..fb3659f96d 100644 --- a/curvefs/src/metaserver/transaction.cpp +++ b/curvefs/src/metaserver/transaction.cpp @@ -81,7 +81,7 @@ inline bool RenameTx::operator==(const RenameTx& rhs) { std::ostream& operator<<(std::ostream& os, const RenameTx& renameTx) { auto dentrys = renameTx.dentrys_; os << "txId = " << renameTx.txId_; - for (int i = 0; i < dentrys.size(); i++) { + for (size_t i = 0; i < dentrys.size(); i++) { os << ", dentry[" << i << "] = (" << dentrys[i].ShortDebugString() << ")"; } diff --git a/curvefs/src/metaserver/trash.cpp b/curvefs/src/metaserver/trash.cpp index 56aba00a3d..dbd01908e7 100644 --- a/curvefs/src/metaserver/trash.cpp +++ b/curvefs/src/metaserver/trash.cpp @@ -205,6 +205,7 @@ MetaStatusCode TrashImpl::DeleteInodeAndData(const TrashItem &item) { s3Adaptor_->GetS3ClientAdaptorOption(&clientAdaptorOption); clientAdaptorOption.blockSize = s3Info.blocksize(); clientAdaptorOption.chunkSize = s3Info.chunksize(); + clientAdaptorOption.objectPrefix = s3Info.objectprefix(); s3Adaptor_->Reinit(clientAdaptorOption, s3Info.ak(), s3Info.sk(), s3Info.endpoint(), s3Info.bucketname()); int retVal = s3Adaptor_->Delete(inode); diff --git a/curvefs/src/tools/create/curvefs_create_fs.cpp b/curvefs/src/tools/create/curvefs_create_fs.cpp index 42e47fab8f..d8fd0641fd 100644 --- a/curvefs/src/tools/create/curvefs_create_fs.cpp +++ b/curvefs/src/tools/create/curvefs_create_fs.cpp @@ -57,6 +57,7 @@ DECLARE_string(s3_endpoint); DECLARE_string(s3_bucket_name); DECLARE_uint64(s3_blocksize); DECLARE_uint64(s3_chunksize); +DECLARE_uint32(s3_objectPrefix); DECLARE_uint32(rpcTimeoutMs); DECLARE_uint32(rpcRetryTimes); DECLARE_bool(enableSumInDir); @@ -98,6 +99,7 @@ void CreateFsTool::PrintHelp() { << " -s3_bucket_name=" << FLAGS_s3_bucket_name << " -s3_blocksize=" << FLAGS_s3_blocksize << " -s3_chunksize=" << FLAGS_s3_chunksize + << " -s3_objectPrefix=" << FLAGS_s3_objectPrefix << "]\n[-fsType=hybrid -volumeBlockGroupSize=" << FLAGS_volumeBlockGroupSize << " -volumeBlockSize=" << FLAGS_volumeBlockSize @@ -109,7 +111,9 @@ void CreateFsTool::PrintHelp() { << " -s3_endpoint=" << FLAGS_s3_endpoint << " -s3_bucket_name=" << FLAGS_s3_bucket_name << " -s3_blocksize=" << FLAGS_s3_blocksize - << " -s3_chunksize=" << FLAGS_s3_chunksize << "]" << std::endl; + << " -s3_chunksize=" << FLAGS_s3_chunksize + << " -s3_objectPrefix=" << FLAGS_s3_objectPrefix + << "]" << std::endl; } void CreateFsTool::AddUpdateFlags() { @@ -129,6 +133,7 @@ void CreateFsTool::AddUpdateFlags() { AddUpdateFlagsFunc(curvefs::tools::SetS3_bucket_name); AddUpdateFlagsFunc(curvefs::tools::SetS3_blocksize); AddUpdateFlagsFunc(curvefs::tools::SetS3_chunksize); + AddUpdateFlagsFunc(curvefs::tools::SetS3_objectPrefix); AddUpdateFlagsFunc(curvefs::tools::SetRpcTimeoutMs); AddUpdateFlagsFunc(curvefs::tools::SetRpcRetryTimes); AddUpdateFlagsFunc(curvefs::tools::SetEnableSumInDir); @@ -173,6 +178,7 @@ int CreateFsTool::Init() { s3->set_bucketname(FLAGS_s3_bucket_name); s3->set_blocksize(FLAGS_s3_blocksize); s3->set_chunksize(FLAGS_s3_chunksize); + s3->set_objectprefix(FLAGS_s3_objectPrefix); request.mutable_fsdetail()->set_allocated_s3info(s3); return 0; }; diff --git a/curvefs/src/tools/curvefs_tool_define.cpp b/curvefs/src/tools/curvefs_tool_define.cpp index a67fa7de2b..221eaee4e3 100644 --- a/curvefs/src/tools/curvefs_tool_define.cpp +++ b/curvefs/src/tools/curvefs_tool_define.cpp @@ -74,6 +74,7 @@ DEFINE_string(s3_endpoint, "endpoint", "s3 endpoint"); DEFINE_string(s3_bucket_name, "bucketname", "s3 bucket name"); DEFINE_uint64(s3_blocksize, 1048576, "s3 block size"); DEFINE_uint64(s3_chunksize, 4194304, "s3 chunk size"); +DEFINE_uint32(s3_objectPrefix, 0, "object prefix"); DEFINE_bool(enableSumInDir, false, "statistic info in xattr"); DEFINE_uint64(capacity, (uint64_t)100 * 1024 * 1024 * 1024, "capacity of fs, default 100G"); @@ -249,6 +250,12 @@ std::function std::placeholders::_2, "s3_chunksize", "s3.chunksize", &FLAGS_s3_chunksize); +std::function + SetS3_objectPrefix = + std::bind(&SetDiffFlagInfo, std::placeholders::_1, + std::placeholders::_2, "s3_objectPrefix", "s3.objectPrefix", + &FLAGS_s3_objectPrefix); + std::function SetEnableSumInDir = std::bind(&SetFlagInfo, std::placeholders::_1, std::placeholders::_2, "enableSumInDir", diff --git a/curvefs/src/tools/curvefs_tool_define.h b/curvefs/src/tools/curvefs_tool_define.h index bfed146a03..819588a539 100644 --- a/curvefs/src/tools/curvefs_tool_define.h +++ b/curvefs/src/tools/curvefs_tool_define.h @@ -271,6 +271,9 @@ extern std::function SetS3_chunksize; +extern std::function + SetS3_objectPrefix; extern std::function SetEnableSumInDir; diff --git a/curvefs/src/tools/list/curvefs_copysetinfo_list.cpp b/curvefs/src/tools/list/curvefs_copysetinfo_list.cpp index d8e04c5f63..3ddc1d5a62 100644 --- a/curvefs/src/tools/list/curvefs_copysetinfo_list.cpp +++ b/curvefs/src/tools/list/curvefs_copysetinfo_list.cpp @@ -49,11 +49,10 @@ int CopysetInfoListTool::Init() { return 0; } -void CopysetInfoListTool::AddUpdateFlags() { - AddUpdateFlagsFunc(SetMdsAddr); -} +void CopysetInfoListTool::AddUpdateFlags() { AddUpdateFlagsFunc(SetMdsAddr); } -bool CopysetInfoListTool::AfterSendRequestToHost(const std::string& host) { +bool CopysetInfoListTool::AfterSendRequestToHost(const std::string &host) { + (void)host; bool ret = true; if (controller_->Failed()) { errorOutput_ << "get all copysetInfo from [ " << FLAGS_mdsAddr @@ -62,11 +61,12 @@ bool CopysetInfoListTool::AfterSendRequestToHost(const std::string& host) { << std::endl; ret = false; } else if (show_) { - for (auto const& i : response_->copysetvalues()) { + for (auto const &i : response_->copysetvalues()) { std::cout << "copyset[" << copyset::GetCopysetKey(i.copysetinfo().poolid(), i.copysetinfo().copysetid()) - << "]:" << std::endl << i.DebugString() << std::endl; + << "]:" << std::endl + << i.DebugString() << std::endl; } std::cout << std::endl; } diff --git a/curvefs/src/tools/query/curvefs_copyset_query.cpp b/curvefs/src/tools/query/curvefs_copyset_query.cpp index 2c2947e298..ab7c308643 100644 --- a/curvefs/src/tools/query/curvefs_copyset_query.cpp +++ b/curvefs/src/tools/query/curvefs_copyset_query.cpp @@ -126,7 +126,7 @@ bool CopysetQueryTool::AfterSendRequestToHost(const std::string& host) { for (auto const& j : key2Status_[i]) { std::cout << j.ShortDebugString() << std::endl; } - if (key2Status_[i].size() != + if (static_cast(key2Status_[i].size()) != key2Info_[i][0].copysetinfo().peers().size()) { std::cerr << "copysetStatus not match the number of " "copysetInfo's peers!" diff --git a/curvefs/src/volume/block_device_aio.cpp b/curvefs/src/volume/block_device_aio.cpp index 922d0424af..8ad673beaf 100644 --- a/curvefs/src/volume/block_device_aio.cpp +++ b/curvefs/src/volume/block_device_aio.cpp @@ -41,29 +41,29 @@ using ::curve::common::is_aligned; namespace { -const char* ToString(LIBCURVE_OP op) { +const char *ToString(LIBCURVE_OP op) { switch (op) { - case LIBCURVE_OP_READ: - return "Read"; - case LIBCURVE_OP_WRITE: - return "Write"; - case LIBCURVE_OP_DISCARD: - return "Discard"; - default: - return "Unknown"; + case LIBCURVE_OP_READ: + return "Read"; + case LIBCURVE_OP_WRITE: + return "Write"; + case LIBCURVE_OP_DISCARD: + return "Discard"; + default: + return "Unknown"; } } -std::ostream& operator<<(std::ostream& os, CurveAioContext* aio) { +std::ostream &operator<<(std::ostream &os, CurveAioContext *aio) { os << "[off: " << aio->offset << ", len: " << aio->length << ", ret: " << aio->ret << ", type: " << ToString(aio->op) << "]"; return os; } -void AioReadCallBack(CurveAioContext* aio) { - AioRead* read = reinterpret_cast(reinterpret_cast(aio) - - offsetof(AioRead, aio)); +void AioReadCallBack(CurveAioContext *aio) { + AioRead *read = reinterpret_cast(reinterpret_cast(aio) - + offsetof(AioRead, aio)); { std::lock_guard lock(read->mtx); @@ -72,9 +72,9 @@ void AioReadCallBack(CurveAioContext* aio) { read->cond.notify_one(); } -void AioWriteCallBack(CurveAioContext* aio) { - AioWrite* write = reinterpret_cast(reinterpret_cast(aio) - - offsetof(AioWrite, aio)); +void AioWriteCallBack(CurveAioContext *aio) { + AioWrite *write = reinterpret_cast( + reinterpret_cast(aio) - offsetof(AioWrite, aio)); { std::lock_guard lock(write->mtx); @@ -83,19 +83,16 @@ void AioWriteCallBack(CurveAioContext* aio) { write->cond.notify_one(); } -void AioWritePaddingReadCallBack(CurveAioContext* aio) { - AioWrite::PaddingRead* padding = reinterpret_cast( - reinterpret_cast(aio) - offsetof(AioWrite::PaddingRead, aio)); +void AioWritePaddingReadCallBack(CurveAioContext *aio) { + AioWrite::PaddingRead *padding = reinterpret_cast( + reinterpret_cast(aio) - offsetof(AioWrite::PaddingRead, aio)); padding->base->OnPaddingReadComplete(aio); } } // namespace -AioRead::AioRead(off_t offset, - size_t length, - char* data, - FileClient* dev, +AioRead::AioRead(off_t offset, size_t length, char *data, FileClient *dev, int fd) : aio(), offset(offset), length(length), data(data), dev(dev), fd(fd) {} @@ -158,11 +155,8 @@ ssize_t AioRead::Wait() { return length; } -AioWrite::AioWrite(off_t offset, - size_t length, - const char* data, - FileClient* dev, - int fd) +AioWrite::AioWrite(off_t offset, size_t length, const char *data, + FileClient *dev, int fd) : offset(offset), length(length), data(data), dev(dev), fd(fd) {} void AioWrite::Issue() { @@ -173,7 +167,7 @@ void AioWrite::Issue() { aio.cb = AioWriteCallBack; aio.offset = offset; aio.length = length; - aio.buf = const_cast(data); + aio.buf = const_cast(data); int ret = dev->AioWrite(fd, &aio); if (ret < 0) { @@ -207,7 +201,8 @@ void AioWrite::Issue() { ++idx; } - if (offset + length > lastPaddingEnd && offset + length != alignedEnd) { + if (static_cast(offset + length) > lastPaddingEnd && + offset + length != alignedEnd) { off_t start = alignedEnd - IO_ALIGNED_BLOCK_SIZE; if (paddingStart && start == lastPaddingEnd) { aux->paddingReads[idx - 1].length += IO_ALIGNED_BLOCK_SIZE; @@ -226,7 +221,7 @@ void AioWrite::Issue() { aux->npadding.store(idx, std::memory_order_release); for (int i = 0; i < idx; ++i) { - auto& pad = aux->paddingReads[i]; + auto &pad = aux->paddingReads[i]; pad.aio.ret = -1; pad.aio.op = LIBCURVE_OP_READ; @@ -255,7 +250,7 @@ ssize_t AioWrite::Wait() { return length; } -void AioWrite::OnPaddingReadComplete(CurveAioContext* read) { +void AioWrite::OnPaddingReadComplete(CurveAioContext *read) { if (static_cast(read->ret) != static_cast(read->length)) { LOG(ERROR) << "AioRead error: " << read; aux->error.store(true, std::memory_order_release); diff --git a/curvefs/src/volume/block_device_client.cpp b/curvefs/src/volume/block_device_client.cpp index 001bf48aab..206282e3f4 100644 --- a/curvefs/src/volume/block_device_client.cpp +++ b/curvefs/src/volume/block_device_client.cpp @@ -22,6 +22,7 @@ #include "curvefs/src/volume/block_device_client.h" +#include #include #include @@ -52,10 +53,10 @@ BlockDeviceClientImpl::BlockDeviceClientImpl() : fd_(-1), fileClient_(std::make_shared()) {} BlockDeviceClientImpl::BlockDeviceClientImpl( - const std::shared_ptr& fileClient) + const std::shared_ptr &fileClient) : fd_(-1), fileClient_(fileClient) {} -bool BlockDeviceClientImpl::Init(const BlockDeviceClientOptions& options) { +bool BlockDeviceClientImpl::Init(const BlockDeviceClientOptions &options) { auto ret = fileClient_->Init(options.configPath); if (ret != LIBCURVE_ERROR::OK) { LOG(ERROR) << "Init file client error: " << ret; @@ -65,12 +66,10 @@ bool BlockDeviceClientImpl::Init(const BlockDeviceClientOptions& options) { return true; } -void BlockDeviceClientImpl::UnInit() { - fileClient_->UnInit(); -} +void BlockDeviceClientImpl::UnInit() { fileClient_->UnInit(); } -bool BlockDeviceClientImpl::Open(const std::string& filename, - const std::string& owner) { +bool BlockDeviceClientImpl::Open(const std::string &filename, + const std::string &owner) { UserInfo userInfo(owner); curve::client::OpenFlags flags; auto retCode = fileClient_->Open(filename, userInfo, flags); @@ -101,9 +100,9 @@ bool BlockDeviceClientImpl::Close() { return true; } -bool BlockDeviceClientImpl::Stat(const std::string& filename, - const std::string& owner, - BlockDeviceStat* statInfo) { +bool BlockDeviceClientImpl::Stat(const std::string &filename, + const std::string &owner, + BlockDeviceStat *statInfo) { FileStatInfo fileStatInfo; UserInfo userInfo(owner); auto retCode = fileClient_->StatFile(filename, userInfo, &fileStatInfo); @@ -122,7 +121,7 @@ bool BlockDeviceClientImpl::Stat(const std::string& filename, return true; } -ssize_t BlockDeviceClientImpl::Read(char* buf, off_t offset, size_t length) { +ssize_t BlockDeviceClientImpl::Read(char *buf, off_t offset, size_t length) { VLOG(9) << "read request, offset: " << offset << ", length: " << length; LatencyUpdater updater(&g_read_latency); @@ -138,7 +137,7 @@ ssize_t BlockDeviceClientImpl::Read(char* buf, off_t offset, size_t length) { return request.Wait(); } -ssize_t BlockDeviceClientImpl::Readv(const std::vector& iov) { +ssize_t BlockDeviceClientImpl::Readv(const std::vector &iov) { if (iov.size() == 1) { VLOG(9) << "read block offset: " << iov[0].offset << ", length: " << iov[0].length; @@ -148,7 +147,7 @@ ssize_t BlockDeviceClientImpl::Readv(const std::vector& iov) { std::vector> requests; requests.reserve(iov.size()); - for (const auto& io : iov) { + for (const auto &io : iov) { requests.push_back(absl::make_unique( io.offset, io.length, io.data, fileClient_.get(), fd_)); @@ -157,7 +156,7 @@ ssize_t BlockDeviceClientImpl::Readv(const std::vector& iov) { bool error = false; ssize_t total = 0; - for (const auto& r : requests) { + for (const auto &r : requests) { auto nr = r->Wait(); if (nr < 0) { error = true; @@ -171,8 +170,7 @@ ssize_t BlockDeviceClientImpl::Readv(const std::vector& iov) { return error ? -1 : total; } -ssize_t BlockDeviceClientImpl::Write(const char* buf, - off_t offset, +ssize_t BlockDeviceClientImpl::Write(const char *buf, off_t offset, size_t length) { VLOG(9) << "write request, offset: " << offset << ", length: " << length; @@ -189,7 +187,7 @@ ssize_t BlockDeviceClientImpl::Write(const char* buf, return request.Wait(); } -ssize_t BlockDeviceClientImpl::Writev(const std::vector& iov) { +ssize_t BlockDeviceClientImpl::Writev(const std::vector &iov) { if (iov.size() == 1) { return Write(iov[0].data, iov[0].offset, iov[0].length); } @@ -197,7 +195,7 @@ ssize_t BlockDeviceClientImpl::Writev(const std::vector& iov) { std::vector> requests; requests.reserve(iov.size()); - for (const auto& io : iov) { + for (const auto &io : iov) { requests.push_back(absl::make_unique( io.offset, io.length, io.data, fileClient_.get(), fd_)); @@ -206,7 +204,7 @@ ssize_t BlockDeviceClientImpl::Writev(const std::vector& iov) { bool error = false; ssize_t total = 0; - for (const auto& r : requests) { + for (const auto &r : requests) { auto nr = r->Wait(); if (nr < 0) { error = true; @@ -220,12 +218,11 @@ ssize_t BlockDeviceClientImpl::Writev(const std::vector& iov) { return error ? -1 : total; } -bool BlockDeviceClientImpl::WritePadding(char* writeBuffer, - off_t writeStart, +bool BlockDeviceClientImpl::WritePadding(char *writeBuffer, off_t writeStart, off_t writeEnd, off_t offset, // actual offset size_t length) { // actual length - std::vector> readvec; // Align reads + std::vector> readvec; // Align reads off_t readEnd = 0; // Padding leading @@ -235,7 +232,8 @@ bool BlockDeviceClientImpl::WritePadding(char* writeBuffer, } // Padding trailing - if (offset + length > readEnd && offset + length != writeEnd) { + if (static_cast(offset + length) > readEnd && + static_cast(offset + length) != writeEnd) { off_t readStart = writeEnd - IO_ALIGNED_BLOCK_SIZE; if (readvec.size() == 1 && readStart == readEnd) { readvec[0].second = IO_ALIGNED_BLOCK_SIZE * 2; @@ -244,10 +242,10 @@ bool BlockDeviceClientImpl::WritePadding(char* writeBuffer, } } - for (const auto& item : readvec) { + for (const auto &item : readvec) { auto retCode = AlignRead(writeBuffer + item.first - writeStart, item.first, item.second); - if (retCode != item.second) { + if (retCode != static_cast(item.second)) { return false; } } @@ -255,14 +253,13 @@ bool BlockDeviceClientImpl::WritePadding(char* writeBuffer, return true; } -ssize_t BlockDeviceClientImpl::AlignRead(char* buf, - off_t offset, +ssize_t BlockDeviceClientImpl::AlignRead(char *buf, off_t offset, size_t length) { auto ret = fileClient_->Read(fd_, buf, offset, length); if (ret < 0) { LOG(ERROR) << "Read file failed, retCode = " << ret; return -1; - } else if (ret != length) { + } else if (static_cast(ret) != length) { LOG(ERROR) << "Read file failed, expect read " << length << " bytes, actual read " << ret << " bytes"; return -1; @@ -271,14 +268,13 @@ ssize_t BlockDeviceClientImpl::AlignRead(char* buf, return length; } -ssize_t BlockDeviceClientImpl::AlignWrite(const char* buf, - off_t offset, +ssize_t BlockDeviceClientImpl::AlignWrite(const char *buf, off_t offset, size_t length) { auto ret = fileClient_->Write(fd_, buf, offset, length); if (ret < 0) { LOG(ERROR) << "Write file failed, retCode = " << ret; return -1; - } else if (ret != length) { + } else if (static_cast(ret) != length) { LOG(ERROR) << "Write file failed, expect write " << length << " bytes, actual write " << ret << " bytes"; return -1; @@ -288,15 +284,14 @@ ssize_t BlockDeviceClientImpl::AlignWrite(const char* buf, } bool BlockDeviceClientImpl::ConvertFileStatus(int fileStatus, - BlockDeviceStatus* bdStatus) { - static const std::map fileStatusMap { - { 0, BlockDeviceStatus::CREATED }, - { 1, BlockDeviceStatus::DELETING }, - { 2, BlockDeviceStatus::CLONING }, - { 3, BlockDeviceStatus::CLONE_META_INSTALLED }, - { 4, BlockDeviceStatus::CLONED }, - { 5, BlockDeviceStatus::BEING_CLONED } - }; + BlockDeviceStatus *bdStatus) { + static const std::map fileStatusMap{ + {0, BlockDeviceStatus::CREATED}, + {1, BlockDeviceStatus::DELETING}, + {2, BlockDeviceStatus::CLONING}, + {3, BlockDeviceStatus::CLONE_META_INSTALLED}, + {4, BlockDeviceStatus::CLONED}, + {5, BlockDeviceStatus::BEING_CLONED}}; auto iter = fileStatusMap.find(fileStatus); if (iter == fileStatusMap.end()) { diff --git a/curvefs/test/client/chunk_cache_manager_test.cpp b/curvefs/test/client/chunk_cache_manager_test.cpp index 5fe4b5e3bf..4f87ba1781 100644 --- a/curvefs/test/client/chunk_cache_manager_test.cpp +++ b/curvefs/test/client/chunk_cache_manager_test.cpp @@ -47,17 +47,20 @@ class ChunkCacheManagerTest : public testing::Test { S3ClientAdaptorOption option; option.blockSize = 1 * 1024 * 1024; option.chunkSize = 4 * 1024 * 1024; + option.baseSleepUs = 500; + option.objectPrefix = 0; option.pageSize = 64 * 1024; option.intervalSec = 5000; option.flushIntervalSec = 5000; option.readCacheMaxByte = 104857600; option.writeCacheMaxByte = 10485760000; + option.readCacheThreads = 5; option.chunkFlushThreads = 5; option.diskCacheOpt.diskCacheType = (DiskCacheType)0; s3ClientAdaptor_ = new S3ClientAdaptorImpl(); auto fsCacheManager_ = std::make_shared( s3ClientAdaptor_, option.readCacheMaxByte, option.writeCacheMaxByte, - nullptr); + option.readCacheThreads, nullptr); s3ClientAdaptor_->Init(option, nullptr, nullptr, nullptr, fsCacheManager_, nullptr, nullptr); chunkCacheManager_ = std::make_shared( @@ -76,7 +79,6 @@ class ChunkCacheManagerTest : public testing::Test { TEST_F(ChunkCacheManagerTest, test_write_new_data) { uint64_t offset = 0; uint64_t len = 1024; - int length = len; char *buf = new char[len]; chunkCacheManager_->WriteNewDataCache(s3ClientAdaptor_, offset, len, buf); diff --git a/curvefs/test/client/client_s3_adaptor_Integration.cpp b/curvefs/test/client/client_s3_adaptor_Integration.cpp index 7ac01856ea..fc9e30dd97 100644 --- a/curvefs/test/client/client_s3_adaptor_Integration.cpp +++ b/curvefs/test/client/client_s3_adaptor_Integration.cpp @@ -131,21 +131,24 @@ class ClientS3IntegrationTest : public testing::Test { S3ClientAdaptorOption option; option.blockSize = 1 * 1024 * 1024; option.chunkSize = 4 * 1024 * 1024; + option.baseSleepUs = 500; option.pageSize = 64 * 1024; option.intervalSec = 5000; option.flushIntervalSec = 5000; option.readCacheMaxByte = 104857600; option.writeCacheMaxByte = 10485760000; + option.readCacheThreads = 5; option.diskCacheOpt.diskCacheType = (DiskCacheType)0; option.chunkFlushThreads = 5; + option.objectPrefix = 0; std::shared_ptr mockInodeManager( &mockInodeManager_); std::shared_ptr mockMdsClient(&mockMdsClient_); std::shared_ptr mockS3Client(&mockS3Client_); s3ClientAdaptor_ = new S3ClientAdaptorImpl(); auto fsCacheManager = std::make_shared( - s3ClientAdaptor_, option.readCacheMaxByte, - option.writeCacheMaxByte, kvClientManager_); + s3ClientAdaptor_, option.readCacheMaxByte, option.writeCacheMaxByte, + option.readCacheThreads, kvClientManager_); s3ClientAdaptor_->Init(option, mockS3Client, mockInodeManager, mockMdsClient, fsCacheManager, nullptr, kvClientManager_); diff --git a/curvefs/test/client/client_s3_adaptor_test.cpp b/curvefs/test/client/client_s3_adaptor_test.cpp index 82a3cfffeb..7430a14856 100644 --- a/curvefs/test/client/client_s3_adaptor_test.cpp +++ b/curvefs/test/client/client_s3_adaptor_test.cpp @@ -64,13 +64,15 @@ class ClientS3AdaptorTest : public testing::Test { S3ClientAdaptorOption option; option.blockSize = 1 * 1024 * 1024; option.chunkSize = 4 * 1024 * 1024; + option.baseSleepUs = 500; option.pageSize = 64 * 1024; option.intervalSec = 5000; option.flushIntervalSec = 5000; option.readCacheMaxByte = 104857600; option.writeCacheMaxByte = 10485760000; - option.fuseMaxSize = 131072; + option.readCacheThreads = 5; option.chunkFlushThreads = 5; + option.objectPrefix = 0; option.diskCacheOpt.diskCacheType = (DiskCacheType)0; kvClientManager_ = nullptr; s3ClientAdaptor_->Init(option, mockS3Client_, mockInodeManager_, @@ -122,7 +124,7 @@ TEST_F(ClientS3AdaptorTest, write_success) { uint64_t inodeId = 1; uint64_t offset = 0; uint64_t length = 1024; - char buf[length] = {0}; + char *buf = new char[length]; memset(buf, 'a', length); auto fileCache = std::make_shared(); EXPECT_CALL(*mockFsCacheManager_, FindOrCreateFileCacheManager(_, _)) @@ -134,32 +136,35 @@ TEST_F(ClientS3AdaptorTest, write_success) { EXPECT_CALL(*mockFsCacheManager_, MemCacheRatio()).WillOnce(Return(10)); EXPECT_CALL(*fileCache, Write(_, _, _)).WillOnce(Return(length)); ASSERT_EQ(length, s3ClientAdaptor_->Write(inodeId, offset, length, buf)); + delete[] buf; } TEST_F(ClientS3AdaptorTest, read_success) { uint64_t inodeId = 1; uint64_t offset = 0; uint64_t length = 1024; - char buf[length] = {0}; + char *buf = new char[length]; memset(buf, 'a', length); auto fileCache = std::make_shared(); EXPECT_CALL(*mockFsCacheManager_, FindOrCreateFileCacheManager(_, _)) .WillOnce(Return(fileCache)); EXPECT_CALL(*fileCache, Read(_, _, _, _)).WillOnce(Return(length)); ASSERT_EQ(length, s3ClientAdaptor_->Read(inodeId, offset, length, buf)); + delete[] buf; } TEST_F(ClientS3AdaptorTest, read_fail) { uint64_t inodeId = 1; uint64_t offset = 0; uint64_t length = 1024; - char buf[length] = {0}; + char *buf = new char[length]; memset(buf, 'a', length); auto fileCache = std::make_shared(); EXPECT_CALL(*mockFsCacheManager_, FindOrCreateFileCacheManager(_, _)) .WillOnce(Return(fileCache)); EXPECT_CALL(*fileCache, Read(_, _, _, _)).WillOnce(Return(-1)); ASSERT_EQ(-1, s3ClientAdaptor_->Read(inodeId, offset, length, buf)); + delete[] buf; } TEST_F(ClientS3AdaptorTest, truncate_small) { diff --git a/curvefs/test/client/client_s3_test.cpp b/curvefs/test/client/client_s3_test.cpp index 4acbf89037..fc59fef990 100644 --- a/curvefs/test/client/client_s3_test.cpp +++ b/curvefs/test/client/client_s3_test.cpp @@ -111,7 +111,6 @@ TEST_F(ClientS3Test, uploadync) { TEST_F(ClientS3Test, downloadAsync) { const std::string obj("test"); - uint64_t offset = 0; uint64_t len = 1024; char* buf = new char[len]; diff --git a/curvefs/test/client/common/test_s3util.cpp b/curvefs/test/client/common/test_s3util.cpp index 47536e99b9..973673c8d9 100644 --- a/curvefs/test/client/common/test_s3util.cpp +++ b/curvefs/test/client/common/test_s3util.cpp @@ -29,23 +29,50 @@ namespace common { TEST(ValidNameOfInodeTest, test) { LOG(INFO) << "inode = 1, name = 1_16777216_2_0_0"; ASSERT_FALSE( - curvefs::common::s3util::ValidNameOfInode("1", "1_16777216_2_0_0")); + curvefs::common::s3util::ValidNameOfInode("1", "1_16777216_2_0_0", 0)); LOG(INFO) << "inode = 16777216, name = 1_16777216_2_0_0"; - ASSERT_TRUE(curvefs::common::s3util::ValidNameOfInode("16777216", - "1_16777216_2_0_0")); + ASSERT_TRUE( + curvefs::common::s3util::ValidNameOfInode("16777216", + "1_16777216_2_0_0", 0)); LOG(INFO) << "inode = 1, name = 1_16777216_2_0_0"; ASSERT_FALSE( - curvefs::common::s3util::ValidNameOfInode("1", "1_16777216_2_0_0")); + curvefs::common::s3util::ValidNameOfInode("1", "1_16777216_2_0_0", 0)); LOG(INFO) << "inode = 16777216, name = 1_1_1_16777216_0"; ASSERT_FALSE(curvefs::common::s3util::ValidNameOfInode("16777216", - "1_1_1_16777216_0")); + "1_1_1_16777216_0", 0)); LOG(INFO) << "inode = 16777216, name = 1_1_1_16777216"; ASSERT_FALSE(curvefs::common::s3util::ValidNameOfInode("16777216", - "1_1_1_16777216")); + "1_1_1_16777216", 0)); +} + +TEST(ValidNameOfInodeTest1, test) { + LOG(INFO) << "inode = 1, name = 1_16777216_2_0_0"; + ASSERT_FALSE( + curvefs::common::s3util::ValidNameOfInode("1", + "1/16/16777/1_16777216_2_0_0", 1)); + + LOG(INFO) << "inode = 16777216, name = 1_16777216_2_0_0"; + ASSERT_TRUE( + curvefs::common::s3util::ValidNameOfInode("16777216", + "1/16/16777/1_16777216_2_0_0", 1)); + + LOG(INFO) << "inode = 1, name = 1_16777216_2_0_0"; + ASSERT_FALSE( + curvefs::common::s3util::ValidNameOfInode("1", + "1/16/16777/1_16777216_2_0_0", 1)); + + LOG(INFO) << "inode = 16777216, name = 1_1_1_16777216_0"; + ASSERT_FALSE( + curvefs::common::s3util::ValidNameOfInode("16777216", + "1/16/16777/1_1_1_16777216_0", 1)); + + LOG(INFO) << "inode = 16777216, name = 1_1_1_16777216"; + ASSERT_FALSE(curvefs::common::s3util::ValidNameOfInode("16777216", + "1/16/16777/1_1_1_16777216", 1)); } } // namespace common diff --git a/curvefs/test/client/data_cache_test.cpp b/curvefs/test/client/data_cache_test.cpp index 107335b28c..5127b4170e 100644 --- a/curvefs/test/client/data_cache_test.cpp +++ b/curvefs/test/client/data_cache_test.cpp @@ -45,16 +45,19 @@ class DataCacheTest : public testing::Test { S3ClientAdaptorOption option; option.blockSize = 1 * 1024 * 1024; option.chunkSize = 4 * 1024 * 1024; + option.baseSleepUs = 500; + option.objectPrefix = 0; option.pageSize = 64 * 1024; option.intervalSec = 5000; option.flushIntervalSec = 5000; option.readCacheMaxByte = 104857600; + option.readCacheThreads = 5; option.diskCacheOpt.diskCacheType = (DiskCacheType)0; option.chunkFlushThreads = 5; s3ClientAdaptor_ = new S3ClientAdaptorImpl(); auto fsCacheManager = std::make_shared( - s3ClientAdaptor_, option.readCacheMaxByte, - option.writeCacheMaxByte, nullptr); + s3ClientAdaptor_, option.readCacheMaxByte, option.writeCacheMaxByte, + option.readCacheThreads, nullptr); s3ClientAdaptor_->Init(option, nullptr, nullptr, nullptr, fsCacheManager, nullptr, nullptr); mockChunkCacheManager_ = std::make_shared(); diff --git a/curvefs/test/client/file_cache_manager_test.cpp b/curvefs/test/client/file_cache_manager_test.cpp index 7d2e8be887..cff432124b 100644 --- a/curvefs/test/client/file_cache_manager_test.cpp +++ b/curvefs/test/client/file_cache_manager_test.cpp @@ -28,6 +28,7 @@ #include "curvefs/test/client/mock_client_s3_cache_manager.h" #include "curvefs/test/client/mock_inode_cache_manager.h" #include "curvefs/test/client/mock_client_s3.h" +#include "src/common/concurrent/task_thread_pool.h" namespace curvefs { namespace client { @@ -47,6 +48,7 @@ using ::testing::Return; using ::testing::SetArgPointee; using ::testing::SetArgReferee; using ::testing::WithArg; +using curve::common::TaskThreadPool; // extern KVClientManager *g_kvClientManager; @@ -60,24 +62,29 @@ class FileCacheManagerTest : public testing::Test { S3ClientAdaptorOption option; option.blockSize = 1 * 1024 * 1024; option.chunkSize = 4 * 1024 * 1024; + option.baseSleepUs = 500; + option.objectPrefix = 0; option.pageSize = 64 * 1024; option.intervalSec = 5000; option.flushIntervalSec = 5000; option.readCacheMaxByte = 104857600; option.writeCacheMaxByte = 10485760000; + option.readCacheThreads = 5; option.diskCacheOpt.diskCacheType = (DiskCacheType)0; option.chunkFlushThreads = 5; s3ClientAdaptor_ = new S3ClientAdaptorImpl(); - auto fsCacheManager_ = std::make_shared( + auto fsCacheManager = std::make_shared( s3ClientAdaptor_, option.readCacheMaxByte, option.writeCacheMaxByte, - nullptr); + option.readCacheThreads, nullptr); mockInodeManager_ = std::make_shared(); mockS3Client_ = std::make_shared(); s3ClientAdaptor_->Init(option, mockS3Client_, mockInodeManager_, - nullptr, fsCacheManager_, nullptr, nullptr); + nullptr, fsCacheManager, nullptr, nullptr); s3ClientAdaptor_->SetFsId(fsId); + + threadPool_->Start(option.readCacheThreads); fileCacheManager_ = std::make_shared( - fsId, inodeId, s3ClientAdaptor_, nullptr); + fsId, inodeId, s3ClientAdaptor_, nullptr, threadPool_); mockChunkCacheManager_ = std::make_shared(); curvefs::client::common::FLAGS_enableCto = false; kvClientManager_ = nullptr; @@ -96,6 +103,8 @@ class FileCacheManagerTest : public testing::Test { std::shared_ptr mockInodeManager_; std::shared_ptr mockS3Client_; std::shared_ptr kvClientManager_; + std::shared_ptr> threadPool_ = + std::make_shared>(); }; TEST_F(FileCacheManagerTest, test_FindOrCreateChunkCacheManager) { @@ -140,7 +149,7 @@ TEST_F(FileCacheManagerTest, test_flush_fail) { TEST_F(FileCacheManagerTest, test_new_write) { uint64_t offset = 0; - uint64_t len = 5 * 1024 * 1024; + const uint64_t len = 5 * 1024 * 1024; char buf[len] = {0}; memset(buf, 'a', len); @@ -157,9 +166,10 @@ TEST_F(FileCacheManagerTest, test_new_write) { TEST_F(FileCacheManagerTest, test_old_write) { uint64_t offset = 0; - uint64_t len = 1024; + const uint64_t len = 1024; char buf[len] = {0}; + memset(buf, 0, len); auto dataCache = std::make_shared( s3ClientAdaptor_, nullptr, offset, 0, nullptr, nullptr); EXPECT_CALL(*dataCache, Write(_, _, _, _)).WillOnce(Return()); @@ -174,9 +184,10 @@ TEST_F(FileCacheManagerTest, test_old_write) { TEST_F(FileCacheManagerTest, test_read_cache) { uint64_t inodeId = 1; uint64_t offset = 0; - uint64_t len = 5 * 1024 * 1024; + const uint64_t len = 5 * 1024 * 1024; char buf[len] = {0}; ReadRequest request; + memset(buf, 0, len); std::vector requests; std::vector emptyRequests; requests.emplace_back(request); @@ -195,9 +206,10 @@ TEST_F(FileCacheManagerTest, test_read_cache) { TEST_F(FileCacheManagerTest, test_read_getinode_fail) { uint64_t inodeId = 1; uint64_t offset = 0; - uint64_t len = 1024; + const uint64_t len = 1024; char buf[len] = {0}; + memset(buf, 0, len); ReadRequest request; std::vector requests; request.index = 0; @@ -216,21 +228,15 @@ TEST_F(FileCacheManagerTest, test_read_getinode_fail) { } TEST_F(FileCacheManagerTest, test_read_s3) { - uint64_t inodeId = 1; - uint64_t offset = 0; - uint64_t len = 1024; - int length = len; - char *buf = new char[len]; - char *tmpbuf = new char[len]; + const uint64_t inodeId = 1; + const uint64_t offset = 0; + const uint64_t len = 1024; - memset(tmpbuf, 'a', len); - ReadRequest request; - std::vector requests; - request.index = 0; - request.chunkPos = offset; - request.len = len; - request.bufOffset = 0; - requests.emplace_back(request); + std::vector buf(len); + std::vector tmpBuf(len, 'a'); + + ReadRequest req{.index = 0, .chunkPos = offset, .len = len, .bufOffset = 0}; + std::vector requests{req}; EXPECT_CALL(*mockChunkCacheManager_, ReadByWriteCache(_, _, _, _, _)) .WillOnce(DoAll(SetArgPointee<4>(requests), Return())) .WillOnce(DoAll(SetArgPointee<4>(requests), Return())); @@ -242,9 +248,9 @@ TEST_F(FileCacheManagerTest, test_read_s3) { fileCacheManager_->SetChunkCacheManagerForTest(0, mockChunkCacheManager_); Inode inode; inode.set_length(len); - auto s3ChunkInfoMap = inode.mutable_s3chunkinfomap(); - S3ChunkInfoList *s3ChunkInfoList = new S3ChunkInfoList(); - S3ChunkInfo *s3ChunkInfo = s3ChunkInfoList->add_s3chunks(); + auto *s3ChunkInfoMap = inode.mutable_s3chunkinfomap(); + auto *s3ChunkInfoList = new S3ChunkInfoList(); + auto *s3ChunkInfo = s3ChunkInfoList->add_s3chunks(); s3ChunkInfo->set_chunkid(25); s3ChunkInfo->set_compaction(0); s3ChunkInfo->set_offset(offset); @@ -259,14 +265,11 @@ TEST_F(FileCacheManagerTest, test_read_s3) { .WillOnce( DoAll(SetArgReferee<1>(inodeWrapper), Return(CURVEFS_ERROR::OK))); EXPECT_CALL(*mockS3Client_, Download(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<1>(*tmpbuf), Return(len))) + .WillOnce(DoAll(SetArgPointee<1>(*tmpBuf.data()), Return(len))) .WillOnce(Return(-1)); - ASSERT_EQ(len, fileCacheManager_->Read(inodeId, offset, len, buf)); - ASSERT_EQ(-1, fileCacheManager_->Read(inodeId, offset, len, buf)); - - delete buf; - delete tmpbuf; + ASSERT_EQ(len, fileCacheManager_->Read(inodeId, offset, len, buf.data())); + ASSERT_EQ(-1, fileCacheManager_->Read(inodeId, offset, len, buf.data())); } } // namespace client diff --git a/curvefs/test/client/filesystem/BUILD b/curvefs/test/client/filesystem/BUILD new file mode 100644 index 0000000000..b1975ae4be --- /dev/null +++ b/curvefs/test/client/filesystem/BUILD @@ -0,0 +1,34 @@ +# +# Copyright (c) 2023 NetEase Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +load("//:copts.bzl", "CURVE_TEST_COPTS") + +cc_test( + name = "curvefs_client_filesystem_test", + srcs = glob([ + "*.cpp", + "*.h", + "helper/*.h", + "helper/*.cpp", + ]), + copts = CURVE_TEST_COPTS, + deps = [ + "//curvefs/src/client:fuse_client_lib", + "//curvefs/test/client:mock", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) diff --git a/curvefs/test/client/filesystem/attrwatcher_test.cpp b/curvefs/test/client/filesystem/attrwatcher_test.cpp new file mode 100644 index 0000000000..0e43f89ac0 --- /dev/null +++ b/curvefs/test/client/filesystem/attrwatcher_test.cpp @@ -0,0 +1,78 @@ + +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-29 + * Author: Jingli Chen (Wine93) + */ + +#include "curvefs/src/client/filesystem/attr_watcher.h" +#include "curvefs/test/client/filesystem/helper/helper.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +class AttrWatcherTest : public ::testing::Test { + protected: + void SetUp() override {} + + void TearDown() override {} +}; + +TEST_F(AttrWatcherTest, RememberMtime) { + auto option = AttrWatcherOption(); + auto attrWatcher = std::make_shared(option, nullptr, nullptr); + + // remeber mtime + InodeAttr attr = MkAttr(100, AttrOption().mtime(123, 456)); + attrWatcher->RemeberMtime(attr); + + // get mtime + TimeSpec time; + bool yes = attrWatcher->GetMtime(100, &time); + ASSERT_TRUE(yes); + ASSERT_EQ(time, TimeSpec(123, 456)); +} + +TEST_F(AttrWatcherTest, EvitAttr) { + auto option = AttrWatcherOption{lruSize: 1}; + auto attrWatcher = std::make_shared(option, nullptr, nullptr); + + // remeber mtime + for (const Ino& ino : std::vector{100, 200}) { + InodeAttr attr = MkAttr(ino, AttrOption().mtime(123, 456)); + attrWatcher->RemeberMtime(attr); + } + + // get mtime + TimeSpec time; + bool yes = attrWatcher->GetMtime(100, &time); + ASSERT_FALSE(yes); + + yes = attrWatcher->GetMtime(200, &time); + ASSERT_TRUE(yes); + ASSERT_EQ(time, TimeSpec(123, 456)); +} + +TEST_F(AttrWatcherTest, UpdateDirEntryAttr) { +} + +} // namespace filesystem +} // namespace client +} // namespace curvefs diff --git a/curvefs/test/client/filesystem/defer_sync_test.cpp b/curvefs/test/client/filesystem/defer_sync_test.cpp new file mode 100644 index 0000000000..0aabdac1df --- /dev/null +++ b/curvefs/test/client/filesystem/defer_sync_test.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-29 + * Author: Jingli Chen (Wine93) + */ + +#include "curvefs/src/client/filesystem/defer_sync.h" +#include "curvefs/test/client/filesystem/helper/helper.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +class DeferSyncTest : public ::testing::Test { + protected: + void SetUp() override { + metaClient_ = std::make_shared(); + } + + void TearDown() override { + metaClient_ = nullptr; + } + + protected: + std::shared_ptr metaClient_; +}; + +TEST_F(DeferSyncTest, Basic) { + auto builder = DeferSyncBuilder(); + auto deferSync = builder.SetOption([&](DeferSyncOption* option){ + option->delay = 3; + }).Build(); + deferSync->Start(); + + auto inode = MkInode(100, InodeOption().metaClient(metaClient_)); + // EXPECT_CALL_INDOE_SYNC_TIMES(*metaClient_, 100 /* ino */, 1 /* times */); + inode->SetLength(100); // make inode ditry to trigger sync + deferSync->Push(inode); + deferSync->Stop(); +} + +TEST_F(DeferSyncTest, Dirty) { + auto builder = DeferSyncBuilder(); + auto deferSync = builder.SetOption([&](DeferSyncOption* option){ + option->delay = 3; + }).Build(); + deferSync->Start(); + + auto inode = MkInode(100, InodeOption().metaClient(metaClient_)); + EXPECT_CALL_INDOE_SYNC_TIMES(*metaClient_, 100 /* ino */, 0 /* times */); + deferSync->Push(inode); + deferSync->Stop(); +} + +} // namespace filesystem +} // namespace client +} // namespace curvefs diff --git a/curvefs/test/client/filesystem/dir_cache_test.cpp b/curvefs/test/client/filesystem/dir_cache_test.cpp new file mode 100644 index 0000000000..b83dbac740 --- /dev/null +++ b/curvefs/test/client/filesystem/dir_cache_test.cpp @@ -0,0 +1,218 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-29 + * Author: Jingli Chen (Wine93) + */ + +#include + +#include "curvefs/src/client/filesystem/utils.h" +#include "curvefs/test/client/filesystem/helper/helper.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +class DirEntryListTest : public ::testing::Test { + protected: + void SetUp() override {} + + void TearDown() override {} +}; + +class DirCacheTest : public ::testing::Test { + protected: + void SetUp() override {} + + void TearDown() override {} +}; + +TEST_F(DirEntryListTest, Size) { + DirEntryList entries; + ASSERT_EQ(entries.Size(), 0); + + entries.Add(MkDirEntry(100, "f1")); + ASSERT_EQ(entries.Size(), 1); + + entries.Clear(); + ASSERT_EQ(entries.Size(), 0); +} + +TEST_F(DirEntryListTest, Iterate) { + DirEntryList entries; + + std::vector> items{ + { 100, "f1" }, + { 200, "f2" }, + { 300, "f3" }, + }; + + for (const auto& item : items) { + entries.Add(MkDirEntry(item.first, item.second)); + } + ASSERT_EQ(entries.Size(), 3); + + std::vector> out; + entries.Iterate([&](DirEntry* dirEntry){ + out.push_back({dirEntry->ino, dirEntry->name}); + }); + ASSERT_EQ(items, out); +} + +TEST_F(DirEntryListTest, Get) { + DirEntryList entries; + InodeAttr attr = MkAttr(100, AttrOption().length(1024)); + entries.Add(MkDirEntry(100, "f1", attr)); + + // CASE 1: directory entry exit + { + DirEntry dirEntry; + bool yes = entries.Get(100, &dirEntry); + ASSERT_TRUE(yes); + ASSERT_EQ(dirEntry.attr.inodeid(), 100); + ASSERT_EQ(dirEntry.attr.length(), 1024); + } + + // CASE 2: directory entry not exit + { + DirEntry dirEntry; + bool yes = entries.Get(200, &dirEntry); + ASSERT_FALSE(yes); + } +} + +TEST_F(DirEntryListTest, UpdateAttr) { + DirEntryList entries; + + InodeAttr attr = MkAttr(100, AttrOption().length(1024)); + DirEntry dirEntry(100, "f1", attr); + entries.Add(dirEntry); + + // CASE 1: update attribute success + { + InodeAttr attr = MkAttr(100, AttrOption().length(2048)); + bool yes = entries.UpdateAttr(100, attr); + ASSERT_TRUE(yes); + + yes = entries.Get(100, &dirEntry); + ASSERT_TRUE(yes); + ASSERT_EQ(dirEntry.attr.inodeid(), 100); + ASSERT_EQ(dirEntry.attr.length(), 2048); + } + + // CASE 2: (update attribute failed) / (ino not found) + { + bool yes = entries.UpdateAttr(200, attr); + ASSERT_FALSE(yes); + } +} + +TEST_F(DirEntryListTest, UpdateLength) { + DirEntryList entries; + + DirEntry dirEntry = MkDirEntry(100, "f1", MkAttr(100, AttrOption() + .length(1024) + .mtime(100, 101) + .ctime(200, 201))); + entries.Add(dirEntry); + + // CASE 1: ino not found + { + InodeAttr open = MkAttr(200); + bool yes = entries.UpdateLength(200, open); + ASSERT_FALSE(yes); + } + + // CASE 2: update length and mtime + { + InodeAttr open = MkAttr(100, AttrOption() + .length(2048) + .mtime(100, 101) + .ctime(200, 202)); + bool yes = entries.UpdateLength(100, open); + ASSERT_TRUE(yes); + + DirEntry dirEntry; + yes = entries.Get(100, &dirEntry); + ASSERT_TRUE(yes); + ASSERT_EQ(dirEntry.attr.inodeid(), 100); + ASSERT_EQ(dirEntry.attr.length(), 2048); + // ASSERT_EQ(AttrMtime(dirEntry.attr), TimeSpec()); + // ASSERT_EQ(AttrCtime(dirEntry.attr), TimeSpec()); + } + + // CASE 3: update length, mtime, ctime + { + InodeAttr open = MkAttr(100, AttrOption() + .length(2048) + .mtime(100, 101) + .ctime(200, 202)); + bool yes = entries.UpdateLength(100, open); + ASSERT_TRUE(yes); + + DirEntry dirEntry; + yes = entries.Get(100, &dirEntry); + ASSERT_TRUE(yes); + ASSERT_EQ(dirEntry.attr.length(), 2048); + // ASSERT_EQ(AttrMtime(dirEntry.attr), TimeSpec()); + // ASSERT_EQ(AttrCtime(dirEntry.attr), TimeSpec()); + } +} + +TEST_F(DirEntryListTest, Clear) { + DirEntryList entries; + std::vector> items{ + { 100, "f1" }, + { 200, "f2" }, + { 300, "f3" }, + }; + + for (const auto& item : items) { + entries.Add(MkDirEntry(item.first, item.second)); + } + ASSERT_EQ(entries.Size(), 3); + + DirEntry dirEntry; + for (const auto& item : items) { + bool yes = entries.Get(item.first, &dirEntry); + ASSERT_TRUE(yes); + ASSERT_EQ(dirEntry.name, item.second); + } + + entries.Clear(); + ASSERT_EQ(entries.Size(), 0); + for (const auto& item : items) { + bool yes = entries.Get(item.first, &dirEntry); + ASSERT_FALSE(yes); + } +} + +TEST_F(DirEntryListTest, Mtime) { + DirEntryList entries; + TimeSpec time = entries.GetMtime(); + ASSERT_EQ(time, TimeSpec(0, 0)); + + entries.SetMtime(TimeSpec(123, 456)); + time = entries.GetMtime(); + ASSERT_EQ(time, TimeSpec(123, 456)); +} + +} // namespace filesystem +} // namespace client +} // namespace curvefs diff --git a/curvefs/test/client/filesystem/error_test.cpp b/curvefs/test/client/filesystem/error_test.cpp new file mode 100644 index 0000000000..74347de1ab --- /dev/null +++ b/curvefs/test/client/filesystem/error_test.cpp @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-29 + * Author: Jingli Chen (Wine93) + */ + +#include + +#include "curvefs/src/client/filesystem/error.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +class ErrorTest : public ::testing::Test { + protected: + void SetUp() override {} + void TearDown() override {} +}; + +TEST_F(ErrorTest, StrErr) { + ASSERT_EQ(StrErr(CURVEFS_ERROR::OK), "OK"); + ASSERT_EQ(StrErr(CURVEFS_ERROR::INTERNAL), "internal error"); + ASSERT_EQ(StrErr(CURVEFS_ERROR::INVALIDPARAM), "invalid argument"); + ASSERT_EQ(StrErr(CURVEFS_ERROR::STALE), "stale file handler"); + ASSERT_EQ(StrErr(CURVEFS_ERROR::UNKNOWN), "unknown"); +} + +TEST_F(ErrorTest, SysErr) { + ASSERT_EQ(SysErr(CURVEFS_ERROR::OK), 0); + ASSERT_EQ(SysErr(CURVEFS_ERROR::INTERNAL), EIO); + ASSERT_EQ(SysErr(CURVEFS_ERROR::INVALIDPARAM), EINVAL); + ASSERT_EQ(SysErr(CURVEFS_ERROR::STALE), ESTALE); + ASSERT_EQ(SysErr(CURVEFS_ERROR::UNKNOWN), EIO); +} + +TEST_F(ErrorTest, ToFSError) { + ASSERT_EQ(ToFSError(MetaStatusCode::OK), CURVEFS_ERROR::OK); + ASSERT_EQ(ToFSError(MetaStatusCode::NOT_FOUND), CURVEFS_ERROR::NOTEXIST); +} + +} // namespace filesystem +} // namespace client +} // namespace curvefs diff --git a/curvefs/test/client/filesystem/filesystem_test.cpp b/curvefs/test/client/filesystem/filesystem_test.cpp new file mode 100644 index 0000000000..e8087818fe --- /dev/null +++ b/curvefs/test/client/filesystem/filesystem_test.cpp @@ -0,0 +1,459 @@ + +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-04-03 + * Author: Jingli Chen (Wine93) + */ + +#include + +#include "curvefs/test/client/filesystem/helper/helper.h" +#include "curvefs/src/client/filesystem/filesystem.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +class FileSystemTest : public ::testing::Test { + protected: + void SetUp() override {} + void TearDown() override {} +}; + +TEST_F(FileSystemTest, Attr2Stat) { + InodeAttr attr = MkAttr(100, AttrOption() + .type(FsFileType::TYPE_S3) + .mode(33216) + .nlink(2) + .uid(1000) + .gid(1001) + .length(4096) + .rdev(2048) + .atime(100, 101) + .mtime(200, 201) + .ctime(300, 301)); + + // CASE 1: check stat field one by one + struct stat stat; + auto fs = FileSystemBuilder().Build(); + fs->Attr2Stat(&attr, &stat); + ASSERT_EQ(stat.st_ino, 100); + ASSERT_EQ(stat.st_mode, 33216); + ASSERT_EQ(stat.st_nlink, 2); + ASSERT_EQ(stat.st_uid, 1000); + ASSERT_EQ(stat.st_gid, 1001); + ASSERT_EQ(stat.st_size, 4096); + ASSERT_EQ(stat.st_rdev, 2048); + ASSERT_EQ(stat.st_atim.tv_sec, 100); + ASSERT_EQ(stat.st_atim.tv_nsec, 101); + ASSERT_EQ(stat.st_mtim.tv_sec, 200); + ASSERT_EQ(stat.st_mtim.tv_nsec, 201); + ASSERT_EQ(stat.st_ctim.tv_sec, 300); + ASSERT_EQ(stat.st_ctim.tv_nsec, 301); + ASSERT_EQ(stat.st_blksize, 0x10000u); + ASSERT_EQ(stat.st_blocks, 8); + + // CASE 2: convert all kind of file types + std::vector types = { + FsFileType::TYPE_DIRECTORY, + FsFileType::TYPE_FILE, + FsFileType::TYPE_SYM_LINK, + }; + for (const auto& type : types) { + attr.set_type(type); + fs->Attr2Stat(&attr, &stat); + ASSERT_EQ(stat.st_blocks, 0); + } +} + +TEST_F(FileSystemTest, Entry2Param) { + EntryOut entryOut; + entryOut.attr = MkAttr(100, AttrOption().length(4096)); + entryOut.entryTimeout = 1; + entryOut.attrTimeout = 2; + + fuse_entry_param e; + auto fs = FileSystemBuilder().Build(); + fs->Entry2Param(&entryOut, &e); + ASSERT_EQ(e.ino, 100); + ASSERT_EQ(e.attr.st_size, 4096); + ASSERT_EQ(e.generation, 0); + ASSERT_EQ(e.entry_timeout, 1); + ASSERT_EQ(e.attr_timeout, 2); +} + +TEST_F(FileSystemTest, SetEntryTimeout) { + auto builder = FileSystemBuilder(); + auto fs = builder.SetOption([](FileSystemOption* option){ + option->kernelCacheOption.entryTimeoutSec = 1; + option->kernelCacheOption.attrTimeoutSec = 2; + option->kernelCacheOption.dirEntryTimeoutSec = 3; + option->kernelCacheOption.dirAttrTimeoutSec = 4; + }).Build(); + + // CASE 1: set kernel cache timeout for regular file or symbol link + std::vector types = { + FsFileType::TYPE_S3, + FsFileType::TYPE_FILE, + FsFileType::TYPE_SYM_LINK, + }; + for (const auto& type : types) { + auto attr = MkAttr(100, AttrOption().type(type)); + auto entryOut = EntryOut(attr); + fs->SetEntryTimeout(&entryOut); + ASSERT_EQ(entryOut.entryTimeout, 1); + ASSERT_EQ(entryOut.attrTimeout, 2); + } + + // CASE 2: set kernel cache timeout for directory + auto attr = MkAttr(100, AttrOption().type(FsFileType::TYPE_DIRECTORY)); + auto entryOut = EntryOut(attr); + fs->SetEntryTimeout(&entryOut); + ASSERT_EQ(entryOut.entryTimeout, 3); + ASSERT_EQ(entryOut.attrTimeout, 4); +} + +TEST_F(FileSystemTest, SetAttrTimeout) { + auto builder = FileSystemBuilder(); + auto fs = builder.SetOption([](FileSystemOption* option){ + option->kernelCacheOption.attrTimeoutSec = 1; + option->kernelCacheOption.dirAttrTimeoutSec = 2; + }).Build(); + + // CASE 1: set kernel attribute cache timeout + // for regular file or symbol link + std::vector types = { + FsFileType::TYPE_S3, + FsFileType::TYPE_FILE, + FsFileType::TYPE_SYM_LINK, + }; + for (const auto& typ : types) { + auto attr = MkAttr(100, AttrOption().type(typ)); + auto attrOut = AttrOut(attr); + fs->SetAttrTimeout(&attrOut); + ASSERT_EQ(attrOut.attrTimeout, 1); + } + + // CASE 2: set kernel attribute cache timeout for directory + auto attr = MkAttr(100, AttrOption().type(FsFileType::TYPE_DIRECTORY)); + auto attrOut = AttrOut(attr); + fs->SetAttrTimeout(&attrOut); + ASSERT_EQ(attrOut.attrTimeout, 2); +} + +TEST_F(FileSystemTest, Reply) { + // TODO(Wine93): make it works +} + +TEST_F(FileSystemTest, Lookup_Basic) { + auto builder = FileSystemBuilder(); + auto fs = builder.Build(); + + EXPECT_CALL_RETURN_GetDentry(*builder.GetDentryManager(), + CURVEFS_ERROR::OK); + EXPECT_CALL_RETURN_GetInodeAttr(*builder.GetInodeManager(), + CURVEFS_ERROR::OK); + + EntryOut entryOut; + auto rc = fs->Lookup(Request(), 1, "f1", &entryOut); + ASSERT_EQ(rc, CURVEFS_ERROR::OK); +} + +TEST_F(FileSystemTest, Lookup_NameTooLong) { + auto builder = FileSystemBuilder(); + auto fs = builder.SetOption([](FileSystemOption* option) { + option->maxNameLength = 255; + }).Build(); + + EntryOut entryOut; + auto rc = fs->Lookup(Request(), 1, std::string(256, 'x'), &entryOut); + ASSERT_EQ(rc, CURVEFS_ERROR::NAMETOOLONG); +} + +TEST_F(FileSystemTest, Lookup_NegativeCache) { + auto builder = FileSystemBuilder(); + auto fs = builder.SetOption([](FileSystemOption* option) { + option->lookupCacheOption.negativeTimeoutSec = 1; + option->lookupCacheOption.lruSize = 100000; + }).Build(); + + EXPECT_CALL_RETURN_GetDentry(*builder.GetDentryManager(), + CURVEFS_ERROR::NOTEXIST); + + EntryOut entryOut; + auto rc = fs->Lookup(Request(), 1, "f1", &entryOut); + ASSERT_EQ(rc, CURVEFS_ERROR::NOTEXIST); + + rc = fs->Lookup(Request(), 1, "f1", &entryOut); + ASSERT_EQ(rc, CURVEFS_ERROR::NOTEXIST); +} + +TEST_F(FileSystemTest, GetAttr_Basic) { + auto builder = FileSystemBuilder(); + auto fs = builder.Build(); + + EXPECT_CALL_INVOKE_GetInodeAttr(*builder.GetInodeManager(), + [&](uint64_t ino, InodeAttr* attr) -> CURVEFS_ERROR { + attr->set_inodeid(ino); + attr->set_length(4096); + attr->set_mtime(123); + attr->set_mtime_ns(456); + return CURVEFS_ERROR::OK; + }); + + AttrOut attrOut; + auto rc = fs->GetAttr(Request(), 100, &attrOut); + ASSERT_EQ(rc, CURVEFS_ERROR::OK); + ASSERT_EQ(attrOut.attr.inodeid(), 100); + ASSERT_EQ(attrOut.attr.length(), 4096); + ASSERT_EQ(attrOut.attr.mtime(), 123); + ASSERT_EQ(attrOut.attr.mtime_ns(), 456); +} + +TEST_F(FileSystemTest, OpenDir_Basic) { + auto builder = FileSystemBuilder(); + auto fs = builder.Build(); + + EXPECT_CALL_RETURN_GetInodeAttr(*builder.GetInodeManager(), + CURVEFS_ERROR::OK); + + auto fi = FileInfo(); + auto rc = fs->OpenDir(Request(), 1, &fi); + ASSERT_EQ(rc, CURVEFS_ERROR::OK); +} + +TEST_F(FileSystemTest, ReadDir_Basic) { + auto builder = FileSystemBuilder(); + auto fs = builder.Build(); + + // mock what opendir() does: + auto handler = fs->NewHandler(); + auto fi = FileInfo(); + fi.fh = handler->fh; + + // CASE 1: readdir success + EXPECT_CALL_RETURN_ListDentry(*builder.GetDentryManager(), + CURVEFS_ERROR::OK); + EXPECT_CALL_RETURN_BatchGetInodeAttrAsync(*builder.GetInodeManager(), + CURVEFS_ERROR::OK); + + auto entries = std::make_shared(); + auto rc = fs->ReadDir(Request(), 1, &fi, &entries); + ASSERT_EQ(rc, CURVEFS_ERROR::OK); +} + +TEST_F(FileSystemTest, ReadDir_CheckEntries) { + auto builder = FileSystemBuilder(); + auto fs = builder.Build(); + Ino ino(1); + + // mock what opendir() does: + auto handler = fs->NewHandler(); + auto fi = FileInfo(); + fi.fh = handler->fh; + + auto CHECK_ENTRIES = [&](const std::shared_ptr& entries) { + std::vector out; + entries->Iterate([&](DirEntry* dirEntry){ + out.push_back(*dirEntry); + }); + ASSERT_EQ(out.size(), 3); + + int idx = 0; + for (auto ino = 100; ino <= 102; ino++) { + ASSERT_EQ(out[idx].ino, ino); + ASSERT_EQ(out[idx].name, StrFormat("f%d", ino)); + ASSERT_EQ(out[idx].attr.mtime(), 123); + ASSERT_EQ(out[idx].attr.mtime_ns(), ino); + idx++; + } + }; + + // CASE 1: check entries + { + EXPECT_CALL_INVOKE_ListDentry(*builder.GetDentryManager(), + [&](uint64_t parent, + std::list* dentries, + uint32_t limit, + bool only, + uint32_t nlink) -> CURVEFS_ERROR { + for (auto ino = 100; ino <= 102; ino++) { + dentries->push_back(MkDentry(ino, StrFormat("f%d", ino))); + } + return CURVEFS_ERROR::OK; + }); + + EXPECT_CALL_INVOKE_BatchGetInodeAttrAsync(*builder.GetInodeManager(), + [&](uint64_t parentId, + std::set* inos, + std::map* attrs) -> CURVEFS_ERROR { + for (const auto& ino : *inos) { + auto attr = MkAttr(ino, AttrOption().mtime(123, ino)); + attrs->emplace(ino, attr); + } + return CURVEFS_ERROR::OK; + }); + + auto entries = std::make_shared(); + auto rc = fs->ReadDir(Request(), ino, &fi, &entries); + ASSERT_EQ(rc, CURVEFS_ERROR::OK); + CHECK_ENTRIES(entries); + } + + // CASE 2: check dir cache + { + // readdir from cache + auto entries = std::make_shared(); + auto rc = fs->ReadDir(Request(), ino, &fi, &entries); + ASSERT_EQ(rc, CURVEFS_ERROR::OK); + CHECK_ENTRIES(entries); + } +} + +TEST_F(FileSystemTest, ReleaseDir_Basic) { + auto builder = FileSystemBuilder(); + auto fs = builder.Build(); + + auto fi = FileInfo(); + auto rc = fs->ReleaseDir(Request(), 1, &fi); + ASSERT_EQ(rc, CURVEFS_ERROR::OK); +} + +TEST_F(FileSystemTest, ReleaseDir_CheckHandler) { + auto builder = FileSystemBuilder(); + auto fs = builder.Build(); + + // mock what opendir() does: + auto handler = fs->NewHandler(); + auto fh = handler->fh; + + // CASE 1: find handler success + ASSERT_TRUE(fs->FindHandler(fh) != nullptr); + + // CASE 2: releasedir will release handler + auto fi = FileInfo(); + fi.fh = fh; + auto rc = fs->ReleaseDir(Request(), 1, &fi); + ASSERT_EQ(rc, CURVEFS_ERROR::OK); + ASSERT_TRUE(fs->FindHandler(fh) == nullptr); +} + +TEST_F(FileSystemTest, Open_Basic) { + auto builder = FileSystemBuilder(); + auto fs = builder.Build(); + + // mock what lookup() does: + Ino ino(100); + auto attrWatcher = fs->BorrowMember().attrWatcher; + attrWatcher->RemeberMtime(MkAttr(ino, AttrOption().mtime(123, 456))); + + // CASE 1: open success + { + EXPECT_CALL_INVOKE_GetInode(*builder.GetInodeManager(), + [&](uint64_t ino, + std::shared_ptr& inode) -> CURVEFS_ERROR { + inode = MkInode(ino, InodeOption().mtime(123, 456)); + return CURVEFS_ERROR::OK; + }); + + auto fi = FileInfo(); + auto rc = fs->Open(Request(), ino, &fi); + ASSERT_EQ(rc, CURVEFS_ERROR::OK); + } + + // CASE 2: file already opened + { + auto fi = FileInfo(); + auto rc = fs->Open(Request(), ino, &fi); + ASSERT_EQ(rc, CURVEFS_ERROR::OK); + } +} + +TEST_F(FileSystemTest, Open_Stale) { + auto builder = FileSystemBuilder(); + auto fs = builder.Build(); + + // mock lookup() does: + Ino ino(100); + auto attrWatcher = fs->BorrowMember().attrWatcher; + attrWatcher->RemeberMtime(MkAttr(ino, AttrOption().mtime(123, 456))); + + // CASE 1: mtime(123, 456) != mtime(123, 789) + EXPECT_CALL_INVOKE_GetInode(*builder.GetInodeManager(), + [&](uint64_t ino, std::shared_ptr& inode) + -> CURVEFS_ERROR { + inode = MkInode(ino, InodeOption().mtime(123, 789)); + return CURVEFS_ERROR::OK; + }); + + auto fi = FileInfo(); + auto rc = fs->Open(Request(), ino, &fi); + ASSERT_EQ(rc, CURVEFS_ERROR::STALE); +} + +TEST_F(FileSystemTest, Open_StaleForAttrNotFound) { + auto builder = FileSystemBuilder(); + auto fs = builder.Build(); + + EXPECT_CALL_INVOKE_GetInode(*builder.GetInodeManager(), + [&](uint64_t ino, std::shared_ptr& inode) + -> CURVEFS_ERROR { + inode = MkInode(ino, InodeOption().mtime(123, 456)); + return CURVEFS_ERROR::OK; + }); + + Ino ino(100); + auto fi = FileInfo(); + auto rc = fs->Open(Request(), ino, &fi); + ASSERT_EQ(rc, CURVEFS_ERROR::STALE); +} + +TEST_F(FileSystemTest, Release_Basic) { + auto builder = FileSystemBuilder(); + auto fs = builder.Build(); + auto rc = fs->Release(Request(), 100); + ASSERT_EQ(rc, CURVEFS_ERROR::OK); +} + +TEST_F(FileSystemTest, Release_CheckOpenStatus) { + auto builder = FileSystemBuilder(); + auto fs = builder.Build(); + + // mock what open() does: + Ino ino(100); + auto inode = MkInode(100); + auto openfiles = fs->BorrowMember().openFiles; + openfiles->Open(ino, inode); + + // CASE 1: ino(100) is opened + auto out = MkInode(0); + bool yes = openfiles->IsOpened(ino, &out); + ASSERT_TRUE(yes); + ASSERT_EQ(inode->GetInodeId(), ino); + + // CASE 2: release will close open file + auto rc = fs->Release(Request(), 100); + ASSERT_EQ(rc, CURVEFS_ERROR::OK); + yes = openfiles->IsOpened(ino, &out); + ASSERT_FALSE(yes); +} + +} // namespace filesystem +} // namespace client +} // namespace curvefs diff --git a/curvefs/test/client/filesystem/helper/builder.h b/curvefs/test/client/filesystem/helper/builder.h new file mode 100644 index 0000000000..dc7c196adb --- /dev/null +++ b/curvefs/test/client/filesystem/helper/builder.h @@ -0,0 +1,247 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-29 + * Author: Jingli Chen (Wine93) + */ + +#ifndef CURVEFS_TEST_CLIENT_FILESYSTEM_HELPER_BUILDER_H_ +#define CURVEFS_TEST_CLIENT_FILESYSTEM_HELPER_BUILDER_H_ + +#include +#include + +#include +#include + +#include "curvefs/src/client/filesystem/meta.h" +#include "curvefs/src/client/filesystem/filesystem.h" +#include "curvefs/test/client/mock_metaserver_client.h" +#include "curvefs/test/client/mock_inode_cache_manager.h" +#include "curvefs/test/client/mock_dentry_cache_mamager.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +using ::curvefs::client::common::KernelCacheOption; + +class DeferSyncBuilder { + public: + using Callback = std::function; + + static DeferSyncOption DefaultOption() { + return DeferSyncOption { + delay: 3, + deferDirMtime: false, + }; + } + + public: + DeferSyncBuilder() + : option_(DefaultOption()), + dentryManager_(std::make_shared()), + inodeManager_(std::make_shared()) {} + + DeferSyncBuilder SetOption(Callback callback) { + callback(&option_); + return *this; + } + + std::shared_ptr Build() { + return std::make_shared(option_); + } + + std::shared_ptr GetDentryManager() { + return dentryManager_; + } + + std::shared_ptr GetInodeManager() { + return inodeManager_; + } + + private: + DeferSyncOption option_; + std::shared_ptr dentryManager_; + std::shared_ptr inodeManager_; +}; + +class DirCacheBuilder { + public: + using Callback = std::function; + + static DirCacheOption DefaultOption() { + return DirCacheOption { + lruSize: 5000000, + }; + } + + public: + DirCacheBuilder() : option_(DefaultOption()) {} + + DirCacheBuilder SetOption(Callback callback) { + callback(&option_); + return *this; + } + + std::shared_ptr Build() { + return std::make_shared(option_); + } + + private: + DirCacheOption option_; +}; + +class OpenFilesBuilder { + public: + using Callback = std::function; + + static OpenFilesOption DefaultOption() { + return OpenFilesOption { + lruSize: 65535, + deferSyncSecond: 3, + }; + } + + public: + OpenFilesBuilder() + : option_(DefaultOption()), + deferSync_(DeferSyncBuilder().Build()) {} + + OpenFilesBuilder SetOption(Callback callback) { + callback(&option_); + return *this; + } + + std::shared_ptr Build() { + return std::make_shared(option_, deferSync_); + } + + private: + std::shared_ptr deferSync_; + OpenFilesOption option_; +}; + +class RPCClientBuilder { + public: + using Callback = std::function; + + static RPCOption DefaultOption() { + return RPCOption{ listDentryLimit: 65535 }; + } + + public: + RPCClientBuilder() + : option_(DefaultOption()), + dentryManager_(std::make_shared()), + inodeManager_(std::make_shared()) {} + + RPCClientBuilder SetOption(Callback callback) { + callback(&option_); + return *this; + } + + std::shared_ptr Build() { + ExternalMember member(dentryManager_, inodeManager_); + return std::make_shared(option_, member); + } + + std::shared_ptr GetDentryManager() { + return dentryManager_; + } + + std::shared_ptr GetInodeManager() { + return inodeManager_; + } + + private: + RPCOption option_; + std::shared_ptr dentryManager_; + std::shared_ptr inodeManager_; +}; + +// build filesystem which you want +class FileSystemBuilder { + public: + using Callback = std::function; + + FileSystemOption DefaultOption() { + auto option = FileSystemOption(); + auto kernelCacheOption = KernelCacheOption { + entryTimeoutSec: 3600, + dirEntryTimeoutSec: 3600, + attrTimeoutSec: 3600, + dirAttrTimeoutSec: 3600, + }; + auto lookupCacheOption = LookupCacheOption { + lruSize: 100000, + negativeTimeoutSec: 0, + }; + auto attrWatcherOption = AttrWatcherOption { + lruSize: 5000000, + }; + + option.cto = true; + option.disableXattr = true; + option.maxNameLength = 255; + option.blockSize = 0x10000u; + option.kernelCacheOption = kernelCacheOption; + option.lookupCacheOption = lookupCacheOption; + option.dirCacheOption = DirCacheBuilder::DefaultOption(); + option.openFilesOption = OpenFilesBuilder::DefaultOption(); + option.attrWatcherOption = attrWatcherOption; + option.rpcOption = RPCClientBuilder::DefaultOption(); + option.deferSyncOption = DeferSyncBuilder::DefaultOption(); + return option; + } + + public: + FileSystemBuilder() + : option_(DefaultOption()), + dentryManager_(std::make_shared()), + inodeManager_(std::make_shared()) {} + + FileSystemBuilder SetOption(Callback callback) { + callback(&option_); + return *this; + } + + std::shared_ptr Build() { + auto member = ExternalMember(dentryManager_, inodeManager_); + return std::make_shared(option_, member); + } + + std::shared_ptr GetDentryManager() { + return dentryManager_; + } + + std::shared_ptr GetInodeManager() { + return inodeManager_; + } + + private: + FileSystemOption option_; + std::shared_ptr dentryManager_; + std::shared_ptr inodeManager_; +}; + +} // namespace filesystem +} // namespace client +} // namespace curvefs + +#endif // CURVEFS_TEST_CLIENT_FILESYSTEM_HELPER_BUILDER_H_ diff --git a/curvefs/test/client/filesystem/helper/expect.h b/curvefs/test/client/filesystem/helper/expect.h new file mode 100644 index 0000000000..5daad73d9c --- /dev/null +++ b/curvefs/test/client/filesystem/helper/expect.h @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-29 + * Author: Jingli Chen (Wine93) + */ + +#ifndef CURVEFS_TEST_CLIENT_FILESYSTEM_HELPER_EXPECT_H_ +#define CURVEFS_TEST_CLIENT_FILESYSTEM_HELPER_EXPECT_H_ + +#include + +#include "curvefs/test/client/mock_metaserver_client.h" +#include "curvefs/test/client/mock_inode_cache_manager.h" +#include "curvefs/test/client/mock_dentry_cache_mamager.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +using ::testing::_; +using ::testing::Return; +using ::testing::Invoke; +using ::curvefs::client::MockDentryCacheManager; +using ::curvefs::client::MockInodeCacheManager; +using ::curvefs::client::rpcclient::MockMetaServerClient; + +/* + * Reference: + * + * DentryCacheManager: + * ListDentry(uint64_t parent, + * std::list *dentryList, + * uint32_t limit, + * bool dirOnly = false, + * uint32_t nlink = 0); + * + * + * InodeCacheManager: + * GetInodeAttr(uint64_t inodeId, InodeAttr *out); + * + * BatchGetInodeAttrAsync(uint64_t parentId, + * std::set* inodeIds, + * std::map *attrs); + * + * GetInode(uint64_t inodeId, + * std::shared_ptr& out); + */ + +// times +#define EXPECT_CALL_INDOE_SYNC_TIMES(CLIENT, INO, TIMES) \ + EXPECT_CALL(CLIENT, UpdateInodeWithOutNlinkAsync_rvr(_, INO, _, _, _)) \ + .Times(TIMES) + +// return +#define EXPECT_CALL_RETURN_GetDentry(MANAGER, CODE) \ +do { \ + EXPECT_CALL(MANAGER, GetDentry(_, _, _)) \ + .WillOnce(Return(CODE)); \ +} while (0) + +#define EXPECT_CALL_RETURN_ListDentry(MANAGER, CODE) \ +do { \ + EXPECT_CALL(MANAGER, ListDentry(_, _, _, _, _)) \ + .WillOnce(Return(CODE)); \ +} while (0) + +#define EXPECT_CALL_RETURN_GetInodeAttr(MANAGER, CODE) \ +do { \ + EXPECT_CALL(MANAGER, GetInodeAttr(_, _)) \ + .WillOnce(Return(CODE)); \ +} while (0) + +#define EXPECT_CALL_RETURN_BatchGetInodeAttrAsync(MANAGER, CODE) \ +do { \ + EXPECT_CALL(MANAGER, BatchGetInodeAttrAsync(_, _, _)) \ + .WillOnce(Return(CODE)); \ +} while (0) + +#define EXPECT_CALL_RETURN_GetInode(MANAGER, CODE) \ +do { \ + EXPECT_CALL(MANAGER, GetInode(_, _)) \ + .WillOnce(Return(CODE)); \ +} while (0) + +// invoke +#define EXPECT_CALL_INVOKE_ListDentry(MANAGER, CALLBACK) \ +do { \ + EXPECT_CALL(MANAGER, ListDentry(_, _, _, _, _)) \ + .WillOnce(Invoke(CALLBACK)); \ +} while (0) + +#define EXPECT_CALL_INVOKE_GetInodeAttr(MANAGER, CALLBACK) \ +do { \ + EXPECT_CALL(MANAGER, GetInodeAttr(_, _)) \ + .WillOnce(Invoke(CALLBACK)); \ +} while (0) + +#define EXPECT_CALL_INVOKE_BatchGetInodeAttrAsync(MANAGER, CALLBACK) \ +do { \ + EXPECT_CALL(MANAGER, BatchGetInodeAttrAsync(_, _, _)) \ + .WillOnce(Invoke(CALLBACK)); \ +} while (0) + +#define EXPECT_CALL_INVOKE_GetInode(MANAGER, CALLBACK) \ +do { \ + EXPECT_CALL(MANAGER, GetInode(_, _)) \ + .WillOnce(Invoke(CALLBACK)); \ +} while (0) + +} // namespace filesystem +} // namespace client +} // namespace curvefs + +#endif // CURVEFS_TEST_CLIENT_FILESYSTEM_HELPER_EXPECT_H_ diff --git a/curvefs/test/client/filesystem/helper/helper.h b/curvefs/test/client/filesystem/helper/helper.h new file mode 100644 index 0000000000..26fa1338b9 --- /dev/null +++ b/curvefs/test/client/filesystem/helper/helper.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-29 + * Author: Jingli Chen (Wine93) + */ + +#ifndef CURVEFS_TEST_CLIENT_FILESYSTEM_HELPER_HELPER_H_ +#define CURVEFS_TEST_CLIENT_FILESYSTEM_HELPER_HELPER_H_ + +#include "curvefs/test/client/filesystem/helper/builder.h" +#include "curvefs/test/client/filesystem/helper/expect.h" +#include "curvefs/test/client/filesystem/helper/meta.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +} // namespace filesystem +} // namespace client +} // namespace curvefs + +#endif // CURVEFS_TEST_CLIENT_FILESYSTEM_HELPER_HELPER_H_ diff --git a/curvefs/test/client/filesystem/helper/meta.cpp b/curvefs/test/client/filesystem/helper/meta.cpp new file mode 100644 index 0000000000..49d9540d61 --- /dev/null +++ b/curvefs/test/client/filesystem/helper/meta.cpp @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-29 + * Author: Jingli Chen (Wine93) + */ + +#include "curvefs/test/client/filesystem/helper/meta.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +AttrOption AttrOption::type(FsFileType type) { + type_ = type; + return *this; +} + +AttrOption AttrOption::mode(uint32_t mode) { + mode_ = mode; + return *this; +} + +AttrOption AttrOption::nlink(uint32_t nlink) { + nlink_ = nlink; + return *this; +} + +AttrOption AttrOption::uid(uint32_t uid) { + uid_ = uid; + return *this; +} + +AttrOption AttrOption::gid(uint32_t gid) { + gid_ = gid; + return *this; +} + +AttrOption AttrOption::length(uint64_t length) { + length_ = length; + return *this; +} + +AttrOption AttrOption::rdev(uint64_t rdev) { + rdev_ = rdev; + return *this; +} + +AttrOption AttrOption::atime(uint64_t seconds, uint32_t naoSeconds) { + atime_ = TimeSpec(seconds, naoSeconds); + return *this; +} + +AttrOption AttrOption::mtime(uint64_t seconds, uint32_t naoSeconds) { + mtime_ = TimeSpec(seconds, naoSeconds); + return *this; +} + +AttrOption AttrOption::ctime(uint64_t seconds, uint32_t naoSeconds) { + ctime_ = TimeSpec(seconds, naoSeconds); + return *this; +} + +InodeOption InodeOption::mtime(uint64_t seconds, uint32_t naoSeconds) { + mtime_ = TimeSpec(seconds, naoSeconds); + return *this; +} + +InodeOption InodeOption::metaClient( + std::shared_ptr metaClient) { + metaClient_ = metaClient; + return *this; +} + +InodeAttr MkAttr(Ino ino, AttrOption option) { + InodeAttr attr; + attr.set_inodeid(ino); + if (option.type_) { attr.set_type(option.type_); } + attr.set_mode(option.mode_); + attr.set_nlink(option.nlink_); + attr.set_uid(option.uid_); + attr.set_gid(option.gid_); + attr.set_length(option.length_); + attr.set_rdev(option.rdev_); + attr.set_atime(option.atime_.seconds); + attr.set_atime_ns(option.atime_.nanoSeconds); + attr.set_mtime(option.mtime_.seconds); + attr.set_mtime_ns(option.mtime_.nanoSeconds); + attr.set_ctime(option.ctime_.seconds); + attr.set_ctime_ns(option.ctime_.nanoSeconds); + return attr; +} + +std::shared_ptr MkInode(Ino ino, InodeOption option) { + Inode inode; + inode.set_inodeid(ino); + inode.set_mtime(option.mtime_.seconds); + inode.set_mtime_ns(option.mtime_.nanoSeconds); + return std::make_shared(inode, option.metaClient_); +} + +Dentry MkDentry(Ino ino, const std::string& name) { + Dentry dentry; + dentry.set_inodeid(ino); + dentry.set_name(name); + return dentry; +} + +DirEntry MkDirEntry(Ino ino, + const std::string& name, + InodeAttr attr) { + return DirEntry(ino, name, attr); +} + +} // namespace filesystem +} // namespace client +} // namespace curvefs diff --git a/curvefs/test/client/filesystem/helper/meta.h b/curvefs/test/client/filesystem/helper/meta.h new file mode 100644 index 0000000000..ffa5899a67 --- /dev/null +++ b/curvefs/test/client/filesystem/helper/meta.h @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-29 + * Author: Jingli Chen (Wine93) + */ + +#ifndef CURVEFS_TEST_CLIENT_FILESYSTEM_HELPER_META_H_ +#define CURVEFS_TEST_CLIENT_FILESYSTEM_HELPER_META_H_ + +#include + +#include +#include + +#include "absl/strings/str_format.h" +#include "curvefs/src/client/filesystem/meta.h" +#include "curvefs/src/client/filesystem/filesystem.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +using ::absl::StrFormat; + +struct AttrOption { + public: + AttrOption() = default; + AttrOption type(FsFileType type); + AttrOption mode(uint32_t mode); + AttrOption nlink(uint32_t nlink); + AttrOption uid(uint32_t uid); + AttrOption gid(uint32_t gid); + AttrOption length(uint64_t length); + AttrOption rdev(uint64_t rdev); + AttrOption atime(uint64_t seconds, uint32_t naoSeconds); + AttrOption mtime(uint64_t seconds, uint32_t naoSeconds); + AttrOption ctime(uint64_t seconds, uint32_t naoSeconds); + + private: + friend InodeAttr MkAttr(Ino ino, AttrOption option); + + private: + FsFileType type_; + uint32_t mode_; + uint32_t nlink_; + uint32_t uid_; + uint32_t gid_; + uint64_t length_; + uint64_t rdev_; + TimeSpec atime_; + TimeSpec mtime_; + TimeSpec ctime_; +}; + +class InodeOption { + public: + InodeOption() = default; + InodeOption mtime(uint64_t seconds, uint32_t naoSeconds); + InodeOption metaClient(std::shared_ptr metaClient); + + private: + friend std::shared_ptr MkInode(Ino ino, InodeOption option); + + private: + TimeSpec mtime_; + std::shared_ptr metaClient_; +}; + +InodeAttr MkAttr(Ino ino, AttrOption option = AttrOption()); + +std::shared_ptr MkInode(Ino ino, + InodeOption option = InodeOption()); + +Dentry MkDentry(Ino ino, const std::string& name); + +DirEntry MkDirEntry(Ino ino, + const std::string& name, + InodeAttr attr = MkAttr(0)); + +} // namespace filesystem +} // namespace client +} // namespace curvefs + +#endif // CURVEFS_TEST_CLIENT_FILESYSTEM_HELPER_META_H_ diff --git a/curvefs/test/client/filesystem/lookup_cache_test.cpp b/curvefs/test/client/filesystem/lookup_cache_test.cpp new file mode 100644 index 0000000000..fd51a447a0 --- /dev/null +++ b/curvefs/test/client/filesystem/lookup_cache_test.cpp @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-04-03 + * Author: Jingli Chen (Wine93) + */ + +#include + +#include "curvefs/src/client/filesystem/lookup_cache.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +class LookupCacheTest : public ::testing::Test { + protected: + void SetUp() override {} + void TearDown() override {} +}; + +TEST_F(LookupCacheTest, Basic) { + auto option = LookupCacheOption{ lruSize: 10, negativeTimeoutSec: 1 }; + auto cache = std::make_shared(option); + + ASSERT_FALSE(cache->Get(1, "f1")); + + cache->Put(1, "f1"); + ASSERT_TRUE(cache->Get(1, "f1")); +} + +TEST_F(LookupCacheTest, Enable) { + // CASE 1: cache off, negativeTimeoutSec = 0. + auto option = LookupCacheOption{ lruSize: 10, negativeTimeoutSec: 0 }; + auto cache = std::make_shared(option); + cache->Put(1, "f1"); + ASSERT_FALSE(cache->Get(1, "f1")); + + // CASE 2: cache on, negativeTimeoutSec = 1. + option = LookupCacheOption{ lruSize: 10, negativeTimeoutSec: 1 }; + cache = std::make_shared(option); + cache->Put(1, "f1"); + ASSERT_TRUE(cache->Get(1, "f1")); +} + +TEST_F(LookupCacheTest, Timeout) { + auto option = LookupCacheOption{ lruSize: 10, negativeTimeoutSec: 1 }; + auto cache = std::make_shared(option); + + // CASE 1: cache hit. + cache->Put(1, "f1"); + ASSERT_TRUE(cache->Get(1, "f1")); + + // CASE 2: cache miss due to expiration. + std::this_thread::sleep_for(std::chrono::seconds(1)); + ASSERT_FALSE(cache->Get(1, "f1")); +} + +TEST_F(LookupCacheTest, LRUSize) { + auto option = LookupCacheOption{ lruSize: 1, negativeTimeoutSec: 1 }; + auto cache = std::make_shared(option); + + // CASE 1: cache hit. + cache->Put(1, "f1"); + ASSERT_TRUE(cache->Get(1, "f1")); + + // CASE 2: cache miss due to eviction. + cache->Put(1, "f2"); + ASSERT_FALSE(cache->Get(1, "f1")); + ASSERT_TRUE(cache->Get(1, "f2")); +} + +} // namespace filesystem +} // namespace client +} // namespace curvefs diff --git a/curvefs/test/client/filesystem/message_queue_test.cpp b/curvefs/test/client/filesystem/message_queue_test.cpp new file mode 100644 index 0000000000..7c2d948da3 --- /dev/null +++ b/curvefs/test/client/filesystem/message_queue_test.cpp @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-29 + * Author: Jingli Chen (Wine93) + */ + +#include + +#include "curvefs/src/client/filesystem/message_queue.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +class MessageQueueTest : public ::testing::Test { + protected: + void SetUp() override {} + void TearDown() override {} +}; + +TEST_F(MessageQueueTest, Basic) { + auto mq = std::make_shared>("test", 10); + + std::vector receive; + mq->Subscribe([&receive](const int& number) { + receive.emplace_back(number); + }); + mq->Start(); + + mq->Publish(1); + mq->Publish(2); + mq->Publish(3); + mq->Stop(); + + std::vector expected{1, 2, 3}; + ASSERT_EQ(receive, expected); +} + +TEST_F(MessageQueueTest, PublishAfterStop) { + auto mq = std::make_shared>("test", 10); + + std::vector receive; + mq->Subscribe([&receive](const int& number) { + receive.emplace_back(number); + }); + mq->Start(); + + mq->Publish(1); + mq->Publish(2); + mq->Publish(3); + mq->Stop(); + + // The message queue will not consume any more messages after it has stopped + mq->Publish(4); + + std::vector expected{1, 2, 3}; + ASSERT_EQ(receive, expected); +} + +} // namespace filesystem +} // namespace client +} // namespace curvefs diff --git a/curvefs/test/client/filesystem/meta_test.cpp b/curvefs/test/client/filesystem/meta_test.cpp new file mode 100644 index 0000000000..84b0a1fafe --- /dev/null +++ b/curvefs/test/client/filesystem/meta_test.cpp @@ -0,0 +1,131 @@ + + +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-04-03 + * Author: Jingli Chen (Wine93) + */ + +#include + +#include + +#include "curvefs/src/client/filesystem/meta.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +class TimeSpecTest : public ::testing::Test { + protected: + void SetUp() override {} + void TearDown() override {} +}; + +class HandlerManagerTest : public ::testing::Test { + protected: + void SetUp() override {} + void TearDown() override {} +}; + +TEST_F(TimeSpecTest, Basic) { + // time1 == time2 + ASSERT_EQ(TimeSpec(10, 20), TimeSpec(10, 20)); + + // time1 != time2 + ASSERT_NE(TimeSpec(10, 20), TimeSpec(10, 21)); + ASSERT_NE(TimeSpec(10, 20), TimeSpec(11, 20)); + ASSERT_NE(TimeSpec(10, 20), TimeSpec(11, 21)); + + // time1 < time2 + ASSERT_LT(TimeSpec(10, 20), TimeSpec(10, 21)); + ASSERT_LT(TimeSpec(10, 20), TimeSpec(11, 20)); + ASSERT_LT(TimeSpec(10, 20), TimeSpec(11, 21)); + + // time1 > time2 + ASSERT_GT(TimeSpec(10, 20), TimeSpec(10, 19)); + ASSERT_GT(TimeSpec(10, 20), TimeSpec(9, 21)); + ASSERT_GT(TimeSpec(10, 20), TimeSpec(9, 19)); + + // time1 + time2 + ASSERT_EQ(TimeSpec(10, 20) + TimeSpec(10, 20), TimeSpec(20, 40)); + + // std::cout << time + std::ostringstream oss; + oss << TimeSpec(10, 20); + ASSERT_EQ(oss.str(), "10.20"); + + // time2(time1) + TimeSpec time1(10, 20); + TimeSpec time2(time1); + ASSERT_EQ(time2.seconds, 10); + ASSERT_EQ(time2.nanoSeconds, 20); +} + +TEST_F(HandlerManagerTest, Basic) { + // CASE 1: new handler + auto manager = std::make_shared(); + for (auto i = 0; i < 10; i++) { + auto handler = manager->NewHandler(); + ASSERT_EQ(handler->fh, i); + ASSERT_FALSE(handler->padding); + ASSERT_NE(handler->buffer, nullptr); + } + + // CASE 2: find handler + ASSERT_EQ(manager->FindHandler(10), nullptr); + for (auto i = 0; i < 10; i++) { + auto handler = manager->FindHandler(i); + ASSERT_NE(handler, nullptr); + ASSERT_EQ(handler->fh, i); + } + + // CASE 3: release handler + for (auto i = 0; i < 10; i++) { + manager->ReleaseHandler(i); + auto handler = manager->FindHandler(i); + ASSERT_EQ(handler, nullptr); + } +} + +TEST_F(HandlerManagerTest, ModfidyHandler) { + auto manager = std::make_shared(); + manager->NewHandler(); // fh = 0 + + auto handler = manager->FindHandler(0); + ASSERT_NE(handler, nullptr); + ASSERT_FALSE(handler->padding); + ASSERT_NE(handler->buffer, nullptr); + + auto buffer = handler->buffer; + buffer->size = 10; + buffer->p = static_cast(realloc(buffer->p, buffer->size)); + handler->padding = true; + char* position = buffer->p; + + handler = manager->FindHandler(0); + buffer = handler->buffer; + ASSERT_EQ(buffer->size, 10); + ASSERT_EQ(buffer->p, position); + ASSERT_TRUE(handler->padding); +} + +} // namespace filesystem +} // namespace client +} // namespace curvefs diff --git a/curvefs/test/client/filesystem/openfile_test.cpp b/curvefs/test/client/filesystem/openfile_test.cpp new file mode 100644 index 0000000000..304aff9f3f --- /dev/null +++ b/curvefs/test/client/filesystem/openfile_test.cpp @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-04-03 + * Author: Jingli Chen (Wine93) + */ + +#include + +#include "curvefs/src/client/filesystem/openfile.h" +#include "curvefs/test/client/filesystem/helper/helper.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +class OpenFileTest : public ::testing::Test { + protected: + void SetUp() override {} + void TearDown() override {} +}; + +TEST_F(OpenFileTest, Open) { + auto builder = OpenFilesBuilder(); + auto openfiles = builder.Build(); + + Ino ino(100); + auto inode = MkInode(ino); + openfiles->Open(ino, inode); + + // CASE 1: ino(100) opened + auto out = MkInode(0); + bool yes = openfiles->IsOpened(ino, &out); + ASSERT_TRUE(yes); + ASSERT_EQ(out->GetInodeId(), ino); + + // CASE 2: ino(200) closed + yes = openfiles->IsOpened(200, &out); + ASSERT_FALSE(yes); +} + +TEST_F(OpenFileTest, Close) { + auto builder = OpenFilesBuilder(); + auto openfiles = builder.Build(); + + Ino ino(100); + auto inode = MkInode(ino); + openfiles->Open(ino, inode); + + // CASE 1: check open status + auto out = MkInode(0); + bool yes = openfiles->IsOpened(ino, &out); + ASSERT_TRUE(yes); + ASSERT_EQ(out->GetInodeId(), ino); + + // CASE 2: check open status after close + openfiles->Close(ino); + yes = openfiles->IsOpened(ino, &out); + ASSERT_FALSE(yes); +} + +TEST_F(OpenFileTest, References) { + auto builder = OpenFilesBuilder(); + auto openfiles = builder.Build(); + + // open ino(100) twices + Ino ino(100); + auto inode = MkInode(ino); + openfiles->Open(ino, inode); + openfiles->Open(ino, inode); + + // CASE 1: close once + openfiles->Close(ino); + auto out = MkInode(0); + bool yes = openfiles->IsOpened(ino, &out); + ASSERT_TRUE(yes); + ASSERT_EQ(out->GetInodeId(), ino); + + // CASE 2: close again and trigger delete + openfiles->Close(ino); + yes = openfiles->IsOpened(ino, &out); + ASSERT_FALSE(yes); +} + +TEST_F(OpenFileTest, CloseAll) { + auto builder = OpenFilesBuilder(); + auto openfiles = builder.Build(); + + // 1) open ino{1..10} + for (auto ino = 1; ino <= 10; ino++) { + auto inode = MkInode(ino); + openfiles->Open(ino, inode); + } + + // 2) check open status for ino{1..10} + auto out = MkInode(0); + for (auto ino = 1; ino <= 10; ino++) { + bool yes = openfiles->IsOpened(ino, &out); + ASSERT_TRUE(yes); + ASSERT_EQ(out->GetInodeId(), ino); + } + + // 3) CloseAll() and check open status for ino{1..10} + openfiles->CloseAll(); + for (auto ino = 1; ino <= 10; ino++) { + bool yes = openfiles->IsOpened(ino, &out); + ASSERT_FALSE(yes); + } +} + +} // namespace filesystem +} // namespace client +} // namespace curvefs diff --git a/curvefs/test/client/filesystem/rpc_client_test.cpp b/curvefs/test/client/filesystem/rpc_client_test.cpp new file mode 100644 index 0000000000..9419af8579 --- /dev/null +++ b/curvefs/test/client/filesystem/rpc_client_test.cpp @@ -0,0 +1,157 @@ + +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-04-03 + * Author: Jingli Chen (Wine93) + */ + +#include + +#include "curvefs/src/client/filesystem/utils.h" +#include "curvefs/test/client/filesystem/helper/helper.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +class RPCClientTest : public ::testing::Test { + protected: + void SetUp() override {} + void TearDown() override {} +}; + +TEST_F(RPCClientTest, GetAttr_Basic) { + auto builder = RPCClientBuilder(); + auto rpc = builder.Build(); + + // CASE 1: ok + { + EXPECT_CALL_RETURN_GetInodeAttr(*builder.GetInodeManager(), + CURVEFS_ERROR::OK); + + InodeAttr attr; + auto rc = rpc->GetAttr(100, &attr); + ASSERT_EQ(rc, CURVEFS_ERROR::OK); + } + + // CASE 2: inode not exist + { + EXPECT_CALL_RETURN_GetInodeAttr(*builder.GetInodeManager(), + CURVEFS_ERROR::NOTEXIST); + + InodeAttr attr; + auto rc = rpc->GetAttr(100, &attr); + ASSERT_EQ(rc, CURVEFS_ERROR::NOTEXIST); + } +} + +TEST_F(RPCClientTest, Lookup_Basic) { + auto builder = RPCClientBuilder(); + auto rpc = builder.Build(); + + // CASE 1: ok + { + EXPECT_CALL_RETURN_GetDentry(*builder.GetDentryManager(), + CURVEFS_ERROR::OK); + EXPECT_CALL_RETURN_GetInodeAttr(*builder.GetInodeManager(), + CURVEFS_ERROR::OK); + + EntryOut entryOut; + auto rc = rpc->Lookup(1, "f1", &entryOut); + ASSERT_EQ(rc, CURVEFS_ERROR::OK); + } + + // CASE 2: dentry not exist + { + EXPECT_CALL_RETURN_GetDentry(*builder.GetDentryManager(), + CURVEFS_ERROR::NOTEXIST); + + EntryOut entryOut; + auto rc = rpc->Lookup(1, "f1", &entryOut); + ASSERT_EQ(rc, CURVEFS_ERROR::NOTEXIST); + } + + // CASE 3: inode not exist + { + EXPECT_CALL_RETURN_GetDentry(*builder.GetDentryManager(), + CURVEFS_ERROR::OK); + EXPECT_CALL_RETURN_GetInodeAttr(*builder.GetInodeManager(), + CURVEFS_ERROR::NOTEXIST); + + EntryOut entryOut; + auto rc = rpc->Lookup(1, "f1", &entryOut); + ASSERT_EQ(rc, CURVEFS_ERROR::NOTEXIST); + } +} + +TEST_F(RPCClientTest, ReadDir_Basic) { + auto builder = RPCClientBuilder(); + auto rpc = builder.Build(); + + // CASE 1: ok + { + EXPECT_CALL_RETURN_ListDentry(*builder.GetDentryManager(), + CURVEFS_ERROR::OK); + EXPECT_CALL_RETURN_BatchGetInodeAttrAsync(*builder.GetInodeManager(), + CURVEFS_ERROR::OK); + + auto entries = std::make_shared(); + auto rc = rpc->ReadDir(100, &entries); + ASSERT_EQ(rc, CURVEFS_ERROR::OK); + } + + // CASE 2: inode not exist + { + EXPECT_CALL_RETURN_GetInodeAttr(*builder.GetInodeManager(), + CURVEFS_ERROR::NOTEXIST); + + InodeAttr attr; + auto rc = rpc->GetAttr(100, &attr); + ASSERT_EQ(rc, CURVEFS_ERROR::NOTEXIST); + } +} + +TEST_F(RPCClientTest, Open_Basic) { + auto builder = RPCClientBuilder(); + auto rpc = builder.Build(); + + // CASE 1: ok + { + EXPECT_CALL_RETURN_GetInode(*builder.GetInodeManager(), + CURVEFS_ERROR::OK); + + auto inode = MkInode(100); + auto rc = rpc->Open(100, &inode); + ASSERT_EQ(rc, CURVEFS_ERROR::OK); + } + + // CASE 2: inode not exist + { + EXPECT_CALL_RETURN_GetInode(*builder.GetInodeManager(), + CURVEFS_ERROR::NOTEXIST); + + auto inode = MkInode(100); + auto rc = rpc->Open(100, &inode); + ASSERT_EQ(rc, CURVEFS_ERROR::NOTEXIST); + } +} + +} // namespace filesystem +} // namespace client +} // namespace curvefs diff --git a/curvefs/test/client/filesystem/utils_test.cpp b/curvefs/test/client/filesystem/utils_test.cpp new file mode 100644 index 0000000000..302c1ececc --- /dev/null +++ b/curvefs/test/client/filesystem/utils_test.cpp @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: Curve + * Created Date: 2023-03-29 + * Author: Jingli Chen (Wine93) + */ + +#include + +#include "curvefs/src/client/filesystem/utils.h" +#include "curvefs/test/client/filesystem/helper/helper.h" + +namespace curvefs { +namespace client { +namespace filesystem { + +class UtilsTest : public ::testing::Test {}; + +TEST_F(UtilsTest, IsDir) { + InodeAttr attr; + attr.set_type(FsFileType::TYPE_DIRECTORY); + + ASSERT_TRUE(IsDir(attr)); + ASSERT_FALSE(IsS3File(attr)); + ASSERT_FALSE(IsVolmeFile(attr)); + ASSERT_FALSE(IsSymLink(attr)); +} + +TEST_F(UtilsTest, IsS3File) { + InodeAttr attr; + attr.set_type(FsFileType::TYPE_S3); + + ASSERT_FALSE(IsDir(attr)); + ASSERT_TRUE(IsS3File(attr)); + ASSERT_FALSE(IsVolmeFile(attr)); + ASSERT_FALSE(IsSymLink(attr)); +} + +TEST_F(UtilsTest, IsVolmeFile) { + InodeAttr attr; + attr.set_type(FsFileType::TYPE_FILE); + + ASSERT_FALSE(IsDir(attr)); + ASSERT_FALSE(IsS3File(attr)); + ASSERT_TRUE(IsVolmeFile(attr)); + ASSERT_FALSE(IsSymLink(attr)); +} + +TEST_F(UtilsTest, IsSymLink) { + InodeAttr attr; + attr.set_type(FsFileType::TYPE_SYM_LINK); + + ASSERT_FALSE(IsDir(attr)); + ASSERT_FALSE(IsS3File(attr)); + ASSERT_FALSE(IsVolmeFile(attr)); + ASSERT_TRUE(IsSymLink(attr)); +} + +TEST_F(UtilsTest, AttrMtime) { + InodeAttr attr; + attr.set_mtime(12345); + attr.set_mtime_ns(67890); + + auto time = AttrMtime(attr); + ASSERT_EQ(time.seconds, 12345); + ASSERT_EQ(time.nanoSeconds, 67890); +} + +TEST_F(UtilsTest, AttrCtime) { + InodeAttr attr; + attr.set_ctime(12345); + attr.set_ctime_ns(67890); + + auto time = AttrCtime(attr); + ASSERT_EQ(time.seconds, 12345); + ASSERT_EQ(time.nanoSeconds, 67890); +} + +TEST_F(UtilsTest, InodeMtime) { + auto inode = MkInode(1, InodeOption().mtime(123, 456)); + + auto time = InodeMtime(inode); + ASSERT_EQ(time.seconds, 123); + ASSERT_EQ(time.nanoSeconds, 456); +} + +} // namespace filesystem +} // namespace client +} // namespace curvefs diff --git a/curvefs/test/client/fs_cache_manager_test.cpp b/curvefs/test/client/fs_cache_manager_test.cpp index 27c7341532..057069b5fb 100644 --- a/curvefs/test/client/fs_cache_manager_test.cpp +++ b/curvefs/test/client/fs_cache_manager_test.cpp @@ -48,15 +48,19 @@ class FsCacheManagerTest : public testing::Test { S3ClientAdaptorOption option; option.blockSize = 1 * 1024 * 1024; option.chunkSize = 4 * 1024 * 1024; + option.baseSleepUs = 500; + option.objectPrefix = 0; option.pageSize = 64 * 1024; option.intervalSec = 5000; option.flushIntervalSec = 5000; option.readCacheMaxByte = 104857600; + option.readCacheThreads = 5; option.diskCacheOpt.diskCacheType = (DiskCacheType)0; option.chunkFlushThreads = 5; s3ClientAdaptor_ = new S3ClientAdaptorImpl(); fsCacheManager_ = std::make_shared( - s3ClientAdaptor_, maxReadCacheByte_, maxWriteCacheByte, nullptr); + s3ClientAdaptor_, maxReadCacheByte_, maxWriteCacheByte, + option.readCacheThreads, nullptr); s3ClientAdaptor_->Init(option, nullptr, nullptr, nullptr, fsCacheManager_, nullptr, nullptr); s3ClientAdaptor_->SetFsId(2); diff --git a/curvefs/test/client/lease/lease_executor_test.cpp b/curvefs/test/client/lease/lease_executor_test.cpp index 2507a14808..8609c3f4b4 100644 --- a/curvefs/test/client/lease/lease_executor_test.cpp +++ b/curvefs/test/client/lease/lease_executor_test.cpp @@ -60,7 +60,9 @@ TEST_F(LeaseExecutorTest, test_start) { { LOG(INFO) << "### case1: invalid lease time ###"; opt_.leaseTimeUs = 0; - LeaseExecutor exec(opt_, metaCache_, mdsCli_); + std::atomic enableSumInDir; + enableSumInDir.store(true); + LeaseExecutor exec(opt_, metaCache_, mdsCli_, &enableSumInDir); ASSERT_FALSE(exec.Start()); } @@ -68,7 +70,9 @@ TEST_F(LeaseExecutorTest, test_start) { LOG(INFO) << "### case2: invalid refresh times per lease ###"; opt_.refreshTimesPerLease = 0; opt_.leaseTimeUs = 20; - LeaseExecutor exec(opt_, metaCache_, mdsCli_); + std::atomic enableSumInDir; + enableSumInDir.store(true); + LeaseExecutor exec(opt_, metaCache_, mdsCli_, &enableSumInDir); ASSERT_FALSE(exec.Start()); } } @@ -85,7 +89,7 @@ TEST_F(LeaseExecutorTest, test_start_stop) { EXPECT_CALL(*metaCache_, GetAllTxIds(_)) .WillOnce(SetArgPointee<0>(std::vector{})) .WillRepeatedly(SetArgPointee<0>(txIds)); - EXPECT_CALL(*mdsCli_, RefreshSession(_, _, _, _)) + EXPECT_CALL(*mdsCli_, RefreshSession(_, _, _, _, _)) .WillOnce(Return(FSStatusCode::UNKNOWN_ERROR)) .WillRepeatedly( DoAll(SetArgPointee<1>(txIds), Return(FSStatusCode::OK))); @@ -93,7 +97,9 @@ TEST_F(LeaseExecutorTest, test_start_stop) { .Times(AtLeast(1)); // lease executor start - LeaseExecutor exec(opt_, metaCache_, mdsCli_); + std::atomic enableSumInDir; + enableSumInDir.store(true); + LeaseExecutor exec(opt_, metaCache_, mdsCli_, &enableSumInDir); ASSERT_TRUE(exec.Start()); std::this_thread::sleep_for(std::chrono::milliseconds(200)); diff --git a/curvefs/test/client/metric/BUILD b/curvefs/test/client/metric/BUILD new file mode 100644 index 0000000000..73fcc9c05d --- /dev/null +++ b/curvefs/test/client/metric/BUILD @@ -0,0 +1,34 @@ +# +# Copyright (c) 2023 NetEase Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +load("//:copts.bzl", "CURVE_TEST_COPTS") + +cc_test( + name = "client_metric_test", + srcs = glob([ + "*.cpp", + "*.h"], + ), + copts = CURVE_TEST_COPTS, + deps = [ + "//external:gtest", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + "//curvefs/src/client/metric:client_metric", + ], + visibility = ["//visibility:public"], +) diff --git a/curvefs/test/client/metric/client_metric_test.cpp b/curvefs/test/client/metric/client_metric_test.cpp new file mode 100644 index 0000000000..175e410cfe --- /dev/null +++ b/curvefs/test/client/metric/client_metric_test.cpp @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: curve + * Created Date: Fri Apr 21 2023 + * Author: Xinlong-Chen + */ + +#include +#include +#include + +#include "curvefs/src/client/metric/client_metric.h" + +using ::curvefs::client::metric::MDSClientMetric; +using ::curvefs::client::metric::MetaServerClientMetric; +using ::curvefs::client::metric::ClientOpMetric; +using ::curvefs::client::metric::S3MultiManagerMetric; +using ::curvefs::client::metric::FSMetric; +using ::curvefs::client::metric::S3Metric; +using ::curvefs::client::metric::DiskCacheMetric; +using ::curvefs::client::metric::KVClientMetric; +using ::curvefs::client::metric::S3ChunkInfoMetric; +using ::curvefs::client::metric::WarmupManagerS3Metric; + + +namespace curvefs { +namespace client { + +class ClientMetricTest : public ::testing::Test { + protected: + void SetUp() override {} + void TearDown() override {} +}; + +TEST_F(ClientMetricTest, test_prefix) { + { + const char* prefix = "curvefs_mds_client"; + ASSERT_EQ(0, ::strcmp(MDSClientMetric::prefix.c_str(), prefix)); + } + + { + const char* prefix = "curvefs_metaserver_client"; + ASSERT_EQ(0, ::strcmp(MetaServerClientMetric::prefix.c_str(), prefix)); + } + + { + const char* prefix = "curvefs_client"; + ASSERT_EQ(0, ::strcmp(ClientOpMetric::prefix.c_str(), prefix)); + } + + { + const char* prefix = "curvefs_client_manager"; + ASSERT_EQ(0, ::strcmp(S3MultiManagerMetric::prefix.c_str(), prefix)); + } + + { + const char* prefix = "curvefs_client"; + ASSERT_EQ(0, ::strcmp(FSMetric::prefix.c_str(), prefix)); + } + + { + const char* prefix = "curvefs_s3"; + ASSERT_EQ(0, ::strcmp(S3Metric::prefix.c_str(), prefix)); + } + + { + const char* prefix = "curvefs_disk_cache"; + ASSERT_EQ(0, ::strcmp(DiskCacheMetric::prefix.c_str(), prefix)); + } + + { + const char* prefix = "curvefs_kvclient"; + ASSERT_EQ(0, ::strcmp(KVClientMetric::prefix.c_str(), prefix)); + } + + { + const char* prefix = "inode_s3_chunk_info"; + ASSERT_EQ(0, ::strcmp(S3ChunkInfoMetric::prefix.c_str(), prefix)); + } + + { + const char* prefix = "curvefs_warmup"; + ASSERT_EQ(0, ::strcmp(WarmupManagerS3Metric::prefix.c_str(), prefix)); + } +} + +} // namespace client +} // namespace curvefs diff --git a/curvefs/test/client/mock_client_s3_adaptor.h b/curvefs/test/client/mock_client_s3_adaptor.h index 0365573851..2957632a62 100644 --- a/curvefs/test/client/mock_client_s3_adaptor.h +++ b/curvefs/test/client/mock_client_s3_adaptor.h @@ -69,6 +69,7 @@ class MockS3ClientAdaptor : public S3ClientAdaptor { MOCK_METHOD0(GetS3Client, std::shared_ptr()); MOCK_METHOD0(GetBlockSize, uint64_t()); MOCK_METHOD0(GetChunkSize, uint64_t()); + MOCK_METHOD0(GetObjectPrefix, uint32_t()); MOCK_METHOD0(HasDiskCache, bool()); }; diff --git a/curvefs/test/client/mock_client_s3_cache_manager.h b/curvefs/test/client/mock_client_s3_cache_manager.h index d6586eb346..ddd2b03ad6 100644 --- a/curvefs/test/client/mock_client_s3_cache_manager.h +++ b/curvefs/test/client/mock_client_s3_cache_manager.h @@ -26,6 +26,7 @@ #include #include #include +#include #include "curvefs/src/client/s3/client_s3_cache_manager.h" namespace curvefs { diff --git a/curvefs/test/client/mock_dentry_cache_mamager.h b/curvefs/test/client/mock_dentry_cache_mamager.h index c8b043853d..2e5e8ac41e 100644 --- a/curvefs/test/client/mock_dentry_cache_mamager.h +++ b/curvefs/test/client/mock_dentry_cache_mamager.h @@ -37,13 +37,6 @@ class MockDentryCacheManager : public DentryCacheManager { MockDentryCacheManager() {} ~MockDentryCacheManager() {} - MOCK_METHOD3(Init, CURVEFS_ERROR( - uint64_t cacheSize, bool enableCacheMetrics, uint32_t cacheTimeOutSec)); - - MOCK_METHOD1(InsertOrReplaceCache, void(const Dentry& dentry)); - - MOCK_METHOD2(DeleteCache, void(uint64_t parentId, const std::string& name)); - MOCK_METHOD3(GetDentry, CURVEFS_ERROR(uint64_t parent, const std::string &name, Dentry *out)); diff --git a/curvefs/test/client/mock_disk_cache_base.h b/curvefs/test/client/mock_disk_cache_base.h index 4cb10b7c80..4bb8f94779 100644 --- a/curvefs/test/client/mock_disk_cache_base.h +++ b/curvefs/test/client/mock_disk_cache_base.h @@ -45,6 +45,9 @@ class MockDiskCacheBase : public DiskCacheBase { MOCK_METHOD1(IsFileExist, bool(const std::string file)); + MOCK_METHOD1(CreateDir, + int(const std::string dirName)); + MOCK_METHOD0(GetCacheIoFullDir, std::string()); }; diff --git a/curvefs/test/client/mock_disk_cache_write.h b/curvefs/test/client/mock_disk_cache_write.h index 598315f824..3b6214376f 100644 --- a/curvefs/test/client/mock_disk_cache_write.h +++ b/curvefs/test/client/mock_disk_cache_write.h @@ -43,7 +43,7 @@ class MockDiskCacheWrite : public DiskCacheWrite { const char* buf, uint64_t length, bool force)); MOCK_METHOD1(CreateIoDir, - int(bool writreDir)); + int(bool writeDir)); MOCK_METHOD1(IsFileExist, bool(const std::string file)); @@ -66,6 +66,8 @@ class MockDiskCacheWrite : public DiskCacheWrite { MOCK_METHOD1(AsyncUploadEnqueue, void(const std::string objName)); MOCK_METHOD1(UploadFileByInode, int(const std::string &inode)); + + MOCK_METHOD0(IsCacheClean, bool()); }; } // namespace client diff --git a/curvefs/test/client/mock_inode_cache_manager.h b/curvefs/test/client/mock_inode_cache_manager.h index e1200d63dc..39c332ce04 100644 --- a/curvefs/test/client/mock_inode_cache_manager.h +++ b/curvefs/test/client/mock_inode_cache_manager.h @@ -40,15 +40,9 @@ class MockInodeCacheManager : public InodeCacheManager { MockInodeCacheManager() {} ~MockInodeCacheManager() {} - MOCK_METHOD5(Init, CURVEFS_ERROR(uint64_t cacheSize, - bool enableCacheMetrics, - uint32_t flushPeriodSec, - RefreshDataOption option, - uint32_t cacheTimeOutSec)); - - MOCK_METHOD0(Run, void()); - - MOCK_METHOD0(Stop, void()); + MOCK_METHOD3(Init, CURVEFS_ERROR(RefreshDataOption option, + std::shared_ptr openFiles, + std::shared_ptr deferSync)); MOCK_METHOD2(GetInode, CURVEFS_ERROR(uint64_t inodeId, @@ -75,25 +69,8 @@ class MockInodeCacheManager : public InodeCacheManager { MOCK_METHOD1(DeleteInode, CURVEFS_ERROR(uint64_t inodeid)); - MOCK_METHOD1(InvalidateNlinkCache, void(uint64_t inodeid)); - - MOCK_METHOD2(AddInodeAttrs, void(uint64_t parentId, - const RepeatedPtrField& inodeAttrs)); - - MOCK_METHOD1(ClearInodeCache, void(uint64_t inodeid)); - MOCK_METHOD1(ShipToFlush, void( const std::shared_ptr &inodeWrapper)); - - MOCK_METHOD0(FlushAll, void()); - - MOCK_METHOD0(FlushInodeOnce, void()); - - MOCK_METHOD1(ReleaseCache, void(uint64_t parentId)); - - MOCK_METHOD1(AddOpenedInode, void(uint64_t inodeId)); - - MOCK_METHOD1(RemoveOpenedInode, void(uint64_t inodeId)); }; } // namespace client diff --git a/curvefs/test/client/mock_volume_storage.h b/curvefs/test/client/mock_volume_storage.h index ce8c4883ab..c5e0c65763 100644 --- a/curvefs/test/client/mock_volume_storage.h +++ b/curvefs/test/client/mock_volume_storage.h @@ -26,14 +26,18 @@ #include #include "curvefs/src/client/volume/volume_storage.h" +#include "curvefs/src/client/filesystem/meta.h" namespace curvefs { namespace client { +using ::curvefs::client::filesystem::FileOut; + class MockVolumeStorage : public VolumeStorage { public: MOCK_METHOD4(Read, CURVEFS_ERROR(uint64_t, off_t, size_t, char*)); - MOCK_METHOD4(Write, CURVEFS_ERROR(uint64_t, off_t, size_t, const char*)); + MOCK_METHOD5(Write, + CURVEFS_ERROR(uint64_t, off_t, size_t, const char*, FileOut*)); MOCK_METHOD1(Flush, CURVEFS_ERROR(uint64_t)); MOCK_METHOD0(Shutdown, bool()); }; diff --git a/curvefs/test/client/rpcclient/mds_client_test.cpp b/curvefs/test/client/rpcclient/mds_client_test.cpp index 4eade8c689..024fecb61a 100644 --- a/curvefs/test/client/rpcclient/mds_client_test.cpp +++ b/curvefs/test/client/rpcclient/mds_client_test.cpp @@ -25,6 +25,7 @@ #include #include #include +#include #include "curvefs/src/client/rpcclient/mds_client.h" #include "curvefs/test/client/rpcclient/mock_mds_base_client.h" @@ -837,7 +838,7 @@ TEST_F(MdsClientImplTest, RefreshSession) { // out std::vector out; - + std::atomic* enableSumInDir = new std::atomic (true); RefreshSessionResponse response; { @@ -846,7 +847,7 @@ TEST_F(MdsClientImplTest, RefreshSession) { EXPECT_CALL(mockmdsbasecli_, RefreshSession(_, _, _, _)) .WillOnce(SetArgPointee<1>(response)); ASSERT_FALSE(mdsclient_.RefreshSession(txIds, &out, - fsName, mountpoint)); + fsName, mountpoint, enableSumInDir)); ASSERT_TRUE(out.empty()); } @@ -857,7 +858,7 @@ TEST_F(MdsClientImplTest, RefreshSession) { EXPECT_CALL(mockmdsbasecli_, RefreshSession(_, _, _, _)) .WillOnce(SetArgPointee<1>(response)); ASSERT_FALSE(mdsclient_.RefreshSession(txIds, &out, - fsName, mountpoint)); + fsName, mountpoint, enableSumInDir)); ASSERT_EQ(1, out.size()); ASSERT_TRUE( google::protobuf::util::MessageDifferencer::Equals(out[0], tmp)) @@ -873,7 +874,8 @@ TEST_F(MdsClientImplTest, RefreshSession) { EXPECT_CALL(mockmdsbasecli_, RefreshSession(_, _, _, _)) .WillRepeatedly(Invoke(RefreshSessionRpcFailed)); ASSERT_EQ(FSStatusCode::RPC_ERROR, - mdsclient_.RefreshSession(txIds, &out, fsName, mountpoint)); + mdsclient_.RefreshSession(txIds, &out, fsName, mountpoint, + enableSumInDir)); } } diff --git a/curvefs/test/client/rpcclient/metaserver_client_test.cpp b/curvefs/test/client/rpcclient/metaserver_client_test.cpp index d332c03d4e..aaff7bd1b7 100644 --- a/curvefs/test/client/rpcclient/metaserver_client_test.cpp +++ b/curvefs/test/client/rpcclient/metaserver_client_test.cpp @@ -345,12 +345,8 @@ TEST_F(MetaServerClientImplTest, test_CreateDentry_rpc_error) { d.set_txid(10); // out - MetaserverID metaServerID = 1; butil::EndPoint target; butil::str2endpoint(addr_.c_str(), &target); - LogicPoolID poolID = 1; - CopysetID copysetID = 100; - uint32_t partitionID = 200; uint64_t applyIndex = 10; curvefs::metaserver::CreateDentryResponse response; @@ -379,12 +375,8 @@ TEST_F(MetaServerClientImplTest, test_CreateDentry_create_dentry_ok) { d.set_txid(10); // out - MetaserverID metaServerID = 1; butil::EndPoint target; butil::str2endpoint(addr_.c_str(), &target); - LogicPoolID poolID = 1; - CopysetID copysetID = 100; - uint32_t partitionID = 200; uint64_t applyIndex = 10; curvefs::metaserver::CreateDentryResponse response; @@ -414,12 +406,8 @@ TEST_F(MetaServerClientImplTest, test_CreateDentry_copyset_not_exist) { d.set_txid(10); // out - MetaserverID metaServerID = 1; butil::EndPoint target; butil::str2endpoint(addr_.c_str(), &target); - LogicPoolID poolID = 1; - CopysetID copysetID = 100; - uint32_t partitionID = 200; uint64_t applyIndex = 10; curvefs::metaserver::CreateDentryResponse response; @@ -460,12 +448,8 @@ TEST_F(MetaServerClientImplTest, d.set_txid(10); // out - MetaserverID metaServerID = 1; butil::EndPoint target; butil::str2endpoint(addr_.c_str(), &target); - LogicPoolID poolID = 1; - CopysetID copysetID = 100; - uint32_t partitionID = 200; uint64_t applyIndex = 10; curvefs::metaserver::CreateDentryResponse response; @@ -492,12 +476,8 @@ TEST_F(MetaServerClientImplTest, test_DeleteDentry) { std::string name = "test"; // out - MetaserverID metaServerID = 1; butil::EndPoint target; butil::str2endpoint(addr_.c_str(), &target); - LogicPoolID poolID = 1; - CopysetID copysetID = 100; - uint32_t partitionID = 200; uint64_t applyIndex = 10; curvefs::metaserver::DeleteDentryResponse response; @@ -614,12 +594,8 @@ TEST_F(MetaServerClientImplTest, test_GetInode) { uint64_t inodeid = 2; // out - MetaserverID metaServerID = 1; butil::EndPoint target; butil::str2endpoint(addr_.c_str(), &target); - LogicPoolID poolID = 1; - CopysetID copysetID = 100; - uint32_t partitionID = 200; uint64_t applyIndex = 10; curvefs::metaserver::Inode out; out.set_inodeid(inodeid); @@ -725,12 +701,8 @@ TEST_F(MetaServerClientImplTest, test_UpdateInodeAttr) { inode.set_symlink("test9"); // out - MetaserverID metaServerID = 1; butil::EndPoint target; butil::str2endpoint(addr_.c_str(), &target); - LogicPoolID poolID = 1; - CopysetID copysetID = 100; - uint32_t partitionID = 200; uint64_t applyIndex = 10; curvefs::metaserver::Inode out; @@ -950,12 +922,8 @@ TEST_F(MetaServerClientImplTest, test_CreateInode) { inode.symlink = "test9"; // out - MetaserverID metaServerID = 1; butil::EndPoint target; butil::str2endpoint(addr_.c_str(), &target); - LogicPoolID poolID = 1; - CopysetID copysetID = 100; - uint32_t partitionID = 200; uint64_t applyIndex = 10; curvefs::metaserver::Inode out; out.set_inodeid(100); @@ -1062,12 +1030,8 @@ TEST_F(MetaServerClientImplTest, test_DeleteInode) { uint64_t inodeid = 1; // out - MetaserverID metaServerID = 1; butil::EndPoint target; butil::str2endpoint(addr_.c_str(), &target); - LogicPoolID poolID = 1; - CopysetID copysetID = 100; - uint32_t partitionID = 200; uint64_t applyIndex = 10; curvefs::metaserver::DeleteInodeResponse response; diff --git a/curvefs/test/client/rpcclient/mock_mds_client.h b/curvefs/test/client/rpcclient/mock_mds_client.h index ff3cb655a3..2e86d48902 100644 --- a/curvefs/test/client/rpcclient/mock_mds_client.h +++ b/curvefs/test/client/rpcclient/mock_mds_client.h @@ -30,6 +30,7 @@ #include #include #include +#include #include "curvefs/src/client/rpcclient/mds_client.h" @@ -105,11 +106,12 @@ class MockMdsClient : public MdsClient { bool(uint32_t fsID, std::vector* partitionInfos)); - MOCK_METHOD4(RefreshSession, + MOCK_METHOD5(RefreshSession, FSStatusCode(const std::vector &txIds, std::vector *latestTxIdList, const std::string& fsName, - const Mountpoint& mountpoint)); + const Mountpoint& mountpoint, + std::atomic* enableSumInDir)); MOCK_METHOD4(AllocateVolumeBlockGroup, SpaceErrCode(uint32_t, diff --git a/curvefs/test/client/test_dentry_cache_manager.cpp b/curvefs/test/client/test_dentry_cache_manager.cpp index 91560ddad2..12230f451f 100644 --- a/curvefs/test/client/test_dentry_cache_manager.cpp +++ b/curvefs/test/client/test_dentry_cache_manager.cpp @@ -57,7 +57,6 @@ class TestDentryCacheManager : public ::testing::Test { metaClient_ = std::make_shared(); dCacheManager_ = std::make_shared(metaClient_); dCacheManager_->SetFsId(fsId_); - dCacheManager_->Init(10, true, timeout_); } virtual void TearDown() { @@ -87,6 +86,8 @@ TEST_F(TestDentryCacheManager, GetDentry) { EXPECT_CALL(*metaClient_, GetDentry(fsId_, parent, name, _)) .WillOnce(Return(MetaStatusCode::NOT_FOUND)) + .WillOnce(DoAll(SetArgPointee<3>(dentryExp), + Return(MetaStatusCode::OK))) .WillOnce(DoAll(SetArgPointee<3>(dentryExp), Return(MetaStatusCode::OK))); @@ -134,6 +135,10 @@ TEST_F(TestDentryCacheManager, CreateAndGetDentry) { .WillOnce(Return(MetaStatusCode::UNKNOWN_ERROR)) .WillOnce(Return(MetaStatusCode::OK)); + EXPECT_CALL(*metaClient_, GetDentry(fsId_, parent, name, _)) + .WillOnce(DoAll(SetArgPointee<3>(dentryExp), + Return(MetaStatusCode::OK))); + CURVEFS_ERROR ret = dCacheManager_->CreateDentry(dentryExp); ASSERT_EQ(CURVEFS_ERROR::UNKNOWN, ret); @@ -249,6 +254,10 @@ TEST_F(TestDentryCacheManager, GetTimeOutDentry) { EXPECT_CALL(*metaClient_, CreateDentry(_)) .WillOnce(Return(MetaStatusCode::OK)); + EXPECT_CALL(*metaClient_, GetDentry(fsId_, parent, name, _)) + .WillOnce(DoAll(SetArgPointee<3>(dentryExp), + Return(MetaStatusCode::OK))); + auto ret = dCacheManager_->CreateDentry(dentryExp); ASSERT_EQ(CURVEFS_ERROR::OK, ret); diff --git a/curvefs/test/client/test_disk_cache_base.cpp b/curvefs/test/client/test_disk_cache_base.cpp index 9c2a45f3ea..0b222e7245 100644 --- a/curvefs/test/client/test_disk_cache_base.cpp +++ b/curvefs/test/client/test_disk_cache_base.cpp @@ -56,7 +56,7 @@ class TestDiskCacheBase : public ::testing::Test { virtual void SetUp() { wrapper_ = std::make_shared(); diskCacheBase_ = std::make_shared(); - diskCacheBase_->Init(wrapper_, "/mnt/test"); + diskCacheBase_->Init(wrapper_, "/mnt/test", 0); } virtual void TearDown() { @@ -79,7 +79,7 @@ TEST_F(TestDiskCacheBase, CreateIoDir) { EXPECT_CALL(*wrapper_, stat(NotNull(), NotNull())) .WillOnce(Return(-1)); EXPECT_CALL(*wrapper_, mkdir(_, _)) - .WillOnce(Return(0)); + .WillRepeatedly(Return(0)); ret = diskCacheBase_->CreateIoDir(true); ASSERT_EQ(0, ret); @@ -95,13 +95,7 @@ TEST_F(TestDiskCacheBase, IsFileExist) { .WillOnce(Return(-1)); bool ret = diskCacheBase_->IsFileExist(fileName); ASSERT_EQ(false, ret); - - EXPECT_CALL(*wrapper_, stat(NotNull(), NotNull())) - .WillOnce(Return(0)); - ret = diskCacheBase_->IsFileExist(fileName); - ASSERT_EQ(true, ret); } - } // namespace client } // namespace curvefs diff --git a/curvefs/test/client/test_disk_cache_manager.cpp b/curvefs/test/client/test_disk_cache_manager.cpp index 56116c6e5b..1613d5d36f 100644 --- a/curvefs/test/client/test_disk_cache_manager.cpp +++ b/curvefs/test/client/test_disk_cache_manager.cpp @@ -64,11 +64,12 @@ class TestDiskCacheManager : public ::testing::Test { diskCacheRead_ = std::make_shared(); diskCacheManager_ = std::make_shared( wrapper, diskCacheWrite_, diskCacheRead_); - diskCacheRead_->Init(wrapper, "/mnt/test"); + diskCacheRead_->Init(wrapper, "/mnt/test", 0); std::shared_ptr> cachedObjName = std::make_shared> (0, std::make_shared("diskcache")); - diskCacheWrite_->Init(client_, wrapper, "/mnt/test", 1, cachedObjName); + diskCacheWrite_->Init(client_, wrapper, "/mnt/test", 0, + 1, cachedObjName); } virtual void TearDown() { @@ -195,12 +196,12 @@ TEST_F(TestDiskCacheManager, IsCached) { bool ret = diskCacheManager_->IsCached(fileName); ASSERT_EQ(false, ret); - diskCacheManager_->AddCache(fileName, false); + diskCacheManager_->AddCache(fileName); diskCacheManager_->AddCache(fileName2); ret = diskCacheManager_->IsCached(fileName2); ASSERT_EQ(true, ret); - diskCacheManager_->AddCache(fileName, false); + diskCacheManager_->AddCache(fileName); diskCacheManager_->AddCache(fileName2); ret = diskCacheManager_->IsCached(fileName); ASSERT_EQ(true, ret); @@ -235,17 +236,13 @@ TEST_F(TestDiskCacheManager, IsDiskCacheFull) { int ret = diskCacheManager_->IsDiskCacheFull(); ASSERT_EQ(true, ret); - struct statfs stat; - stat.f_frsize = 1; - stat.f_blocks = 1; - stat.f_bfree = 0; - stat.f_bavail = 0; ret = diskCacheManager_->IsDiskCacheFull(); ASSERT_EQ(true, ret); } TEST_F(TestDiskCacheManager, IsDiskCacheSafe) { S3ClientAdaptorOption option; + option.objectPrefix = 0; option.diskCacheOpt.diskCacheType = (DiskCacheType)2; option.diskCacheOpt.cacheDir = "/mnt/test_unit"; option.diskCacheOpt.trimCheckIntervalSec = 1; @@ -261,6 +258,7 @@ TEST_F(TestDiskCacheManager, IsDiskCacheSafe) { option.diskCacheOpt.fullRatio = 100; option.diskCacheOpt.safeRatio = 99; option.diskCacheOpt.maxUsableSpaceBytes = 100000000; + option.objectPrefix = 0; diskCacheManager_->Init(client_, option); ret = diskCacheManager_->IsDiskCacheSafe(); ASSERT_EQ(true, ret); @@ -273,17 +271,16 @@ TEST_F(TestDiskCacheManager, TrimStop) { TEST_F(TestDiskCacheManager, TrimRun_1) { S3ClientAdaptorOption option; + option.objectPrefix = 0; option.diskCacheOpt.cacheDir = "/tmp"; option.diskCacheOpt.trimCheckIntervalSec = 1; EXPECT_CALL(*wrapper, stat(NotNull(), NotNull())).WillOnce(Return(-1)); EXPECT_CALL(*wrapper, mkdir(_, _)).WillOnce(Return(-1)); - EXPECT_CALL(*diskCacheWrite_, UploadAllCacheWriteFile()) - .WillOnce(Return(0)); diskCacheManager_->Init(client_, option); diskCacheManager_->InitMetrics("test"); EXPECT_CALL(*wrapper, statfs(NotNull(), NotNull())) .WillRepeatedly(Return(-1)); - int ret = diskCacheManager_->TrimRun(); + (void)diskCacheManager_->TrimRun(); sleep(6); diskCacheManager_->UmountDiskCache(); } @@ -310,14 +307,13 @@ TEST_F(TestDiskCacheManager, TrimCache_2) { S3ClientAdaptorOption option; option.diskCacheOpt.cacheDir = "/tmp"; option.diskCacheOpt.trimCheckIntervalSec = 1; + option.objectPrefix = 0; EXPECT_CALL(*wrapper, stat(NotNull(), NotNull())).WillOnce(Return(-1)); EXPECT_CALL(*wrapper, mkdir(_, _)).WillOnce(Return(-1)); - EXPECT_CALL(*diskCacheWrite_, UploadAllCacheWriteFile()) - .WillOnce(Return(0)); diskCacheManager_->Init(client_, option); diskCacheManager_->InitMetrics("test"); diskCacheManager_->AddCache("test"); - int ret = diskCacheManager_->TrimRun(); + (void)diskCacheManager_->TrimRun(); sleep(6); diskCacheManager_->UmountDiskCache(); } @@ -349,12 +345,11 @@ TEST_F(TestDiskCacheManager, TrimCache_4) { option.diskCacheOpt.trimCheckIntervalSec = 1; EXPECT_CALL(*wrapper, stat(NotNull(), NotNull())).WillOnce(Return(-1)); EXPECT_CALL(*wrapper, mkdir(_, _)).WillOnce(Return(-1)); - EXPECT_CALL(*diskCacheWrite_, UploadAllCacheWriteFile()) - .WillOnce(Return(0)); + option.objectPrefix = 0; diskCacheManager_->Init(client_, option); diskCacheManager_->InitMetrics("test"); diskCacheManager_->AddCache("test"); - int ret = diskCacheManager_->TrimRun(); + (void)diskCacheManager_->TrimRun(); sleep(6); diskCacheManager_->UmountDiskCache(); } @@ -385,14 +380,13 @@ TEST_F(TestDiskCacheManager, TrimCache_5) { S3ClientAdaptorOption option; option.diskCacheOpt.cacheDir = "/tmp"; option.diskCacheOpt.trimCheckIntervalSec = 1; + option.objectPrefix = 0; EXPECT_CALL(*wrapper, stat(NotNull(), NotNull())).WillOnce(Return(-1)); EXPECT_CALL(*wrapper, mkdir(_, _)).WillOnce(Return(-1)); - EXPECT_CALL(*diskCacheWrite_, UploadAllCacheWriteFile()) - .WillOnce(Return(0)); diskCacheManager_->Init(client_, option); diskCacheManager_->InitMetrics("test"); diskCacheManager_->AddCache("test"); - int ret = diskCacheManager_->TrimRun(); + (void)diskCacheManager_->TrimRun(); sleep(6); diskCacheManager_->UmountDiskCache(); } @@ -408,9 +402,8 @@ TEST_F(TestDiskCacheManager, TrimCache_noexceed) { option.diskCacheOpt.maxUsableSpaceBytes = 0; option.diskCacheOpt.cmdTimeoutSec = 5; option.diskCacheOpt.asyncLoadPeriodMs = 10; - + option.objectPrefix = 0; diskCacheManager_->Init(client_, option); - diskCacheManager_->AddCache("test"); std::string buf = "test"; EXPECT_CALL(*diskCacheWrite_, GetCacheIoFullDir()) @@ -423,14 +416,18 @@ TEST_F(TestDiskCacheManager, TrimCache_noexceed) { stat.f_blocks = 1; stat.f_bfree = 0; stat.f_bavail = 0; + EXPECT_CALL(*wrapper, statfs(NotNull(), _)) + .WillRepeatedly(DoAll(SetArgPointee<1>(stat), Return(-1))); + EXPECT_CALL(*wrapper, remove(_)).WillRepeatedly(Return(0)); + diskCacheManager_->AddCache("test"); + + struct stat rf; + rf.st_size = 0; EXPECT_CALL(*wrapper, stat(NotNull(), NotNull())) - .Times(3) - .WillOnce(Return(0)) + .Times(2) .WillOnce(Return(-1)) - .WillOnce(Return(0)); - EXPECT_CALL(*diskCacheWrite_, UploadAllCacheWriteFile()) - .WillOnce(Return(0)); - int ret = diskCacheManager_->TrimRun(); + .WillOnce(DoAll(SetArgPointee<1>(rf), Return(0))); + (void)diskCacheManager_->TrimRun(); diskCacheManager_->InitMetrics("test"); sleep(6); diskCacheManager_->UmountDiskCache(); @@ -438,6 +435,7 @@ TEST_F(TestDiskCacheManager, TrimCache_noexceed) { TEST_F(TestDiskCacheManager, TrimCache_exceed) { S3ClientAdaptorOption option; + option.objectPrefix = 0; option.diskCacheOpt.maxFileNums = 5; option.diskCacheOpt.diskCacheType = (DiskCacheType)2; option.diskCacheOpt.cacheDir = "/tmp"; @@ -473,8 +471,6 @@ TEST_F(TestDiskCacheManager, TrimCache_exceed) { .Times(2) .WillOnce(Return(-1)) .WillOnce(Return(0)); - EXPECT_CALL(*diskCacheWrite_, UploadAllCacheWriteFile()) - .WillOnce(Return(0)); diskCacheManager_->TrimRun(); diskCacheManager_->InitMetrics("test"); sleep(6); diff --git a/curvefs/test/client/test_disk_cache_manager_impl.cpp b/curvefs/test/client/test_disk_cache_manager_impl.cpp index 90e601873d..21a798ec99 100644 --- a/curvefs/test/client/test_disk_cache_manager_impl.cpp +++ b/curvefs/test/client/test_disk_cache_manager_impl.cpp @@ -67,12 +67,13 @@ class TestDiskCacheManagerImpl : public ::testing::Test { diskCacheRead_ = std::make_shared(); diskCacheManager_ = std::make_shared( wrapper_, diskCacheWrite_, diskCacheRead_); - diskCacheRead_->Init(wrapper_, "/mnt/test"); + diskCacheRead_->Init(wrapper_, "/mnt/test", 0); std::shared_ptr> cachedObjName = std::make_shared> (0, std::make_shared("diskcache")); - diskCacheWrite_->Init(client_, wrapper_, "/mnt/test", 1, cachedObjName); + diskCacheWrite_->Init(client_, wrapper_, "/mnt/test", 0, + 1, cachedObjName); diskCacheManagerImpl_ = std::make_shared(diskCacheManager_, client_); } @@ -247,14 +248,12 @@ TEST_F(TestDiskCacheManagerImpl, IsCached) { } TEST_F(TestDiskCacheManagerImpl, UmountDiskCache) { - EXPECT_CALL(*diskCacheWrite_, UploadAllCacheWriteFile()) - .WillOnce(Return(-1)); + EXPECT_CALL(*diskCacheWrite_, IsCacheClean()).WillOnce(Return(true)); diskCacheManagerImpl_->InitMetrics("test"); int ret = diskCacheManagerImpl_->UmountDiskCache(); ASSERT_EQ(0, ret); - EXPECT_CALL(*diskCacheWrite_, UploadAllCacheWriteFile()) - .WillOnce(Return(0)); + EXPECT_CALL(*diskCacheWrite_, IsCacheClean()).WillOnce(Return(false)); diskCacheManagerImpl_->InitMetrics("test"); ret = diskCacheManagerImpl_->UmountDiskCache(); ASSERT_EQ(0, ret); diff --git a/curvefs/test/client/test_disk_cache_read.cpp b/curvefs/test/client/test_disk_cache_read.cpp index eeab3e1953..efe715f724 100644 --- a/curvefs/test/client/test_disk_cache_read.cpp +++ b/curvefs/test/client/test_disk_cache_read.cpp @@ -59,7 +59,7 @@ class TestDiskCacheRead : public ::testing::Test { diskCacheRead_ = std::make_shared(); wrapper_ = std::make_shared(); - diskCacheRead_->Init(wrapper_, "test"); + diskCacheRead_->Init(wrapper_, "test", 0); } virtual void TearDown() { @@ -149,14 +149,18 @@ TEST_F(TestDiskCacheRead, LoadAllCacheFile) { ret = diskCacheRead_->LoadAllCacheReadFile(cachedObj); ASSERT_EQ(0, ret); - struct dirent *dirent; dir = opendir("."); - dirent = readdir(dir); + EXPECT_NE(dir, nullptr); + + struct dirent fake; + fake.d_type = 8; + strcpy(fake.d_name, "fake"); // NOLINT + EXPECT_CALL(*wrapper_, stat(NotNull(), NotNull())).WillOnce(Return(0)); EXPECT_CALL(*wrapper_, opendir(NotNull())).WillOnce(Return(dir)); EXPECT_CALL(*wrapper_, readdir(NotNull())) .Times(2) - .WillOnce(Return(dirent)) + .WillOnce(Return(&fake)) .WillOnce(ReturnNull()); EXPECT_CALL(*wrapper_, closedir(NotNull())).WillOnce(Return(0)); ret = diskCacheRead_->LoadAllCacheReadFile(cachedObj); diff --git a/curvefs/test/client/test_disk_cache_write.cpp b/curvefs/test/client/test_disk_cache_write.cpp index 84b6f4a392..765cc15542 100644 --- a/curvefs/test/client/test_disk_cache_write.cpp +++ b/curvefs/test/client/test_disk_cache_write.cpp @@ -27,7 +27,6 @@ #include "curvefs/test/client/mock_client_s3.h" #include "curvefs/src/client/s3/disk_cache_write.h" #include "curvefs/src/client/s3/client_s3_adaptor.h" -#include "src/common/concurrent/concurrent.h" namespace curvefs { namespace client { @@ -67,7 +66,7 @@ class TestDiskCacheWrite : public ::testing::Test { std::shared_ptr> cachedObjName = std::make_shared> (0, std::make_shared("diskcache")); - diskCacheWrite_->Init(client_, wrapper_, "test", 1, cachedObjName); + diskCacheWrite_->Init(client_, wrapper_, "test", 0, 1, cachedObjName); } virtual void TearDown() { @@ -194,7 +193,6 @@ TEST_F(TestDiskCacheWrite, ReadFile) { } TEST_F(TestDiskCacheWrite, UploadFile) { - uint64_t length = 10; EXPECT_CALL(*wrapper_, stat(NotNull(), NotNull())) .WillOnce(Return(-1)); std::string fileName = "test"; @@ -320,6 +318,7 @@ TEST_F(TestDiskCacheWrite, UploadAllCacheWriteFile) { EXPECT_NE(dir, nullptr); struct dirent fake; + fake.d_type = 8; strcpy(fake.d_name, "fake"); // NOLINT EXPECT_CALL(*wrapper_, stat(NotNull(), NotNull())) @@ -362,7 +361,9 @@ TEST_F(TestDiskCacheWrite, UploadAllCacheWriteFile_2) { EXPECT_NE(dir, nullptr); struct dirent fake, fake2; + fake.d_type = 8; strcpy(fake.d_name, "fake"); // NOLINT + fake2.d_type = 8; strcpy(fake2.d_name, "fake2"); // NOLINT EXPECT_CALL(*wrapper_, stat(NotNull(), NotNull())) @@ -458,14 +459,19 @@ TEST_F(TestDiskCacheWrite, AsyncUploadRun) { })); diskCacheWrite_->AsyncUploadEnqueue("test"); diskCacheWrite_->AsyncUploadEnqueue("test"); - int ret = diskCacheWrite_->AsyncUploadRun(); + (void)diskCacheWrite_->AsyncUploadRun(); sleep(1); diskCacheWrite_->AsyncUploadEnqueue("test"); std::string t1 = "test"; - curve::common::Thread backEndThread = - std::thread(&DiskCacheWrite::AsyncUploadEnqueue, diskCacheWrite_, t1); + std::vector threads; + for (int i = 0; i < 5; i++) { + threads.emplace_back(&DiskCacheWrite::AsyncUploadEnqueue, + diskCacheWrite_, t1); + } diskCacheWrite_->AsyncUploadStop(); - backEndThread.join(); + for (auto &t : threads) { + t.join(); + } } TEST_F(TestDiskCacheWrite, UploadFileByInode) { @@ -514,6 +520,7 @@ TEST_F(TestDiskCacheWrite, UploadFileByInode) { LOG(INFO) << "#############case4: no file need to upload, but need other " "upload task finish"; struct dirent fake; + fake.d_type = 8; strcpy(fake.d_name, obj1.c_str()); // NOLINT EXPECT_CALL(*wrapper_, stat(NotNull(), NotNull())) .Times(3) diff --git a/curvefs/test/client/test_fuse_s3_client.cpp b/curvefs/test/client/test_fuse_s3_client.cpp index eac9417f42..f4b933f736 100644 --- a/curvefs/test/client/test_fuse_s3_client.cpp +++ b/curvefs/test/client/test_fuse_s3_client.cpp @@ -31,11 +31,13 @@ #include "curvefs/proto/metaserver.pb.h" #include "curvefs/src/client/common/common.h" -#include "curvefs/src/client/error_code.h" +#include "curvefs/src/client/filesystem/error.h" +#include "curvefs/src/client/filesystem/meta.h" #include "curvefs/src/client/fuse_s3_client.h" #include "curvefs/src/client/rpcclient/metaserver_client.h" #include "curvefs/src/client/s3/disk_cache_manager_impl.h" #include "curvefs/src/client/warmup/warmup_manager.h" +#include "curvefs/src/client/filesystem/filesystem.h" #include "curvefs/src/common/define.h" #include "curvefs/test/client/mock_client_s3.h" #include "curvefs/test/client/mock_client_s3_adaptor.h" @@ -54,6 +56,14 @@ namespace client { namespace common { DECLARE_bool(enableCto); DECLARE_bool(supportKVcache); + +DECLARE_uint64(fuseClientAvgWriteBytes); +DECLARE_uint64(fuseClientBurstWriteBytes); +DECLARE_uint64(fuseClientBurstWriteBytesSecs); + +DECLARE_uint64(fuseClientAvgReadBytes); +DECLARE_uint64(fuseClientBurstReadBytes); +DECLARE_uint64(fuseClientBurstReadBytesSecs); } // namespace common } // namespace client } // namespace curvefs @@ -78,6 +88,11 @@ using rpcclient::MetaServerClientDone; using rpcclient::MockMdsClient; using rpcclient::MockMetaServerClient; +using ::curvefs::client::common::FileSystemOption; +using ::curvefs::client::common::OpenFilesOption; +using ::curvefs::client::filesystem::EntryOut; +using ::curvefs::client::filesystem::FileOut; + #define EQUAL(a) (lhs.a() == rhs.a()) static bool operator==(const Dentry &lhs, const Dentry &rhs) { @@ -115,9 +130,16 @@ class TestFuseS3Client : public ::testing::Test { InitFSInfo(client_); fuseClientOption_.s3Opt.s3AdaptrOpt.asyncThreadNum = 1; fuseClientOption_.dummyServerStartPort = 5000; - fuseClientOption_.maxNameLength = 20u; + fuseClientOption_.fileSystemOption.maxNameLength = 20u; fuseClientOption_.listDentryThreads = 2; fuseClientOption_.warmupThreadsNum = 10; + { // filesystem option + auto option = FileSystemOption(); + option.maxNameLength = 20u; + option.openFilesOption.lruSize = 100; + option.attrWatcherOption.lruSize = 100; + fuseClientOption_.fileSystemOption = option; + } auto fsInfo = std::make_shared(); fsInfo->set_fsid(fsId); fsInfo->set_fsname("s3fs"); @@ -146,9 +168,11 @@ class TestFuseS3Client : public ::testing::Test { } void InitOptionBasic(FuseClientOption *opt) { + opt->s3Opt.s3ClientAdaptorOpt.readCacheThreads = 2; + opt->s3Opt.s3ClientAdaptorOpt.writeCacheMaxByte = 838860800; opt->s3Opt.s3AdaptrOpt.asyncThreadNum = 1; opt->dummyServerStartPort = 5000; - opt->maxNameLength = 20u; + opt->fileSystemOption.maxNameLength = 20u; opt->listDentryThreads = 2; } @@ -206,10 +230,24 @@ TEST_F(TestFuseS3Client, test_Init_with_KVCache) { curvefs::client::common::FLAGS_supportKVcache = false; } +TEST_F(TestFuseS3Client, test_Init_with_cache_size_0) { + curvefs::client::common::FLAGS_supportKVcache = false; + auto testClient = std::make_shared( + mdsClient_, metaClient_, inodeManager_, dentryManager_, + s3ClientAdaptor_, nullptr); + FuseClientOption opt; + InitOptionBasic(&opt); + InitFSInfo(testClient); + + // test init when write cache is 0 + opt.s3Opt.s3ClientAdaptorOpt.writeCacheMaxByte = 0; + ASSERT_EQ(CURVEFS_ERROR::CACHETOOSMALL, testClient->Init(opt)); + testClient->UnInit(); +} + // GetInode failed; bad fd TEST_F(TestFuseS3Client, warmUp_inodeBadFd) { sleep(1); - fuse_ino_t parent = 1; std::string name = "test"; fuse_ino_t inodeid = 2; @@ -225,7 +263,9 @@ TEST_F(TestFuseS3Client, warmUp_inodeBadFd) { Return(CURVEFS_ERROR::BAD_FD))); auto old = client_->GetFsInfo()->fstype(); client_->GetFsInfo()->set_fstype(FSType::TYPE_S3); - client_->PutWarmFilelistTask(inodeid); + client_->PutWarmFilelistTask( + inodeid, + curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk); warmup::WarmupProgress progress; bool ret = client_->GetWarmupProgress(inodeid, &progress); LOG(INFO) << "ret:" << ret << " Warmup progress: " << progress.ToString(); @@ -280,7 +320,9 @@ TEST_F(TestFuseS3Client, warmUp_Warmfile_error_GetDentry01) { auto old = client_->GetFsInfo()->fstype(); client_->GetFsInfo()->set_fstype(FSType::TYPE_S3); - client_->PutWarmFilelistTask(inodeid); + client_->PutWarmFilelistTask( + inodeid, + curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk); warmup::WarmupProgress progress; bool ret = client_->GetWarmupProgress(inodeid, &progress); @@ -336,7 +378,9 @@ TEST_F(TestFuseS3Client, warmUp_Warmfile_error_GetDentry02) { DoAll(SetArrayArgument<3>(tmpbuf, tmpbuf + len), Return(len))); auto old = client_->GetFsInfo()->fstype(); client_->GetFsInfo()->set_fstype(FSType::TYPE_S3); - client_->PutWarmFilelistTask(inodeid); + client_->PutWarmFilelistTask( + inodeid, + curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk); warmup::WarmupProgress progress; bool ret = client_->GetWarmupProgress(inodeid, &progress); @@ -392,7 +436,9 @@ TEST_F(TestFuseS3Client, warmUp_fetchDataEnqueue__error_getinode) { DoAll(SetArrayArgument<3>(tmpbuf, tmpbuf + len), Return(len))); auto old = client_->GetFsInfo()->fstype(); client_->GetFsInfo()->set_fstype(FSType::TYPE_S3); - client_->PutWarmFilelistTask(inodeid); + client_->PutWarmFilelistTask( + inodeid, + curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk); warmup::WarmupProgress progress; bool ret = client_->GetWarmupProgress(inodeid, &progress); @@ -448,7 +494,9 @@ TEST_F(TestFuseS3Client, warmUp_fetchDataEnqueue_chunkempty) { DoAll(SetArrayArgument<3>(tmpbuf, tmpbuf + len), Return(len))); auto old = client_->GetFsInfo()->fstype(); client_->GetFsInfo()->set_fstype(FSType::TYPE_S3); - client_->PutWarmFilelistTask(inodeid); + client_->PutWarmFilelistTask( + inodeid, + curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk); warmup::WarmupProgress progress; bool ret = client_->GetWarmupProgress(inodeid, &progress); @@ -509,7 +557,9 @@ TEST_F(TestFuseS3Client, warmUp_FetchDentry_TYPE_SYM_LINK) { DoAll(SetArrayArgument<3>(tmpbuf, tmpbuf + len), Return(len))); auto old = client_->GetFsInfo()->fstype(); client_->GetFsInfo()->set_fstype(FSType::TYPE_S3); - client_->PutWarmFilelistTask(inodeid); + client_->PutWarmFilelistTask( + inodeid, + curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk); warmup::WarmupProgress progress; bool ret = client_->GetWarmupProgress(inodeid, &progress); @@ -572,7 +622,9 @@ TEST_F(TestFuseS3Client, warmUp_FetchDentry_error_TYPE_DIRECTORY) { DoAll(SetArrayArgument<3>(tmpbuf, tmpbuf + len), Return(len))); auto old = client_->GetFsInfo()->fstype(); client_->GetFsInfo()->set_fstype(FSType::TYPE_S3); - client_->PutWarmFilelistTask(inodeid); + client_->PutWarmFilelistTask( + inodeid, + curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk); warmup::WarmupProgress progress; bool ret = client_->GetWarmupProgress(inodeid, &progress); @@ -634,7 +686,9 @@ TEST_F(TestFuseS3Client, warmUp_lookpath_multilevel) { DoAll(SetArrayArgument<3>(tmpbuf, tmpbuf + len), Return(len))); auto old = client_->GetFsInfo()->fstype(); client_->GetFsInfo()->set_fstype(FSType::TYPE_S3); - client_->PutWarmFilelistTask(inodeid); + client_->PutWarmFilelistTask( + inodeid, + curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk); warmup::WarmupProgress progress; bool ret = client_->GetWarmupProgress(inodeid, &progress); @@ -683,7 +737,9 @@ TEST_F(TestFuseS3Client, warmUp_lookpath_unkown) { DoAll(SetArrayArgument<3>(tmpbuf, tmpbuf + len), Return(len))); auto old = client_->GetFsInfo()->fstype(); client_->GetFsInfo()->set_fstype(FSType::TYPE_S3); - client_->PutWarmFilelistTask(inodeid); + client_->PutWarmFilelistTask( + inodeid, + curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk); warmup::WarmupProgress progress; bool ret = client_->GetWarmupProgress(inodeid, &progress); @@ -738,7 +794,9 @@ TEST_F(TestFuseS3Client, warmUp_FetchChildDentry_error_ListDentry) { DoAll(SetArrayArgument<3>(tmpbuf, tmpbuf + len), Return(len))); auto old = client_->GetFsInfo()->fstype(); client_->GetFsInfo()->set_fstype(FSType::TYPE_S3); - client_->PutWarmFilelistTask(inodeid); + client_->PutWarmFilelistTask( + inodeid, + curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk); warmup::WarmupProgress progress; bool ret = client_->GetWarmupProgress(inodeid, &progress); @@ -824,7 +882,9 @@ TEST_F(TestFuseS3Client, warmUp_FetchChildDentry_suc_ListDentry) { DoAll(SetArrayArgument<3>(tmpbuf, tmpbuf + len), Return(len))); auto old = client_->GetFsInfo()->fstype(); client_->GetFsInfo()->set_fstype(FSType::TYPE_S3); - client_->PutWarmFilelistTask(inodeid); + client_->PutWarmFilelistTask( + inodeid, + curvefs::client::common::WarmupStorageType::kWarmupStorageTypeDisk); warmup::WarmupProgress progress; bool ret = client_->GetWarmupProgress(inodeid, &progress); @@ -838,12 +898,12 @@ TEST_F(TestFuseS3Client, warmUp_FetchChildDentry_suc_ListDentry) { ASSERT_FALSE(ret); } -TEST_F(TestFuseS3Client, FuseOpInit_when_fs_exist) { +TEST_F(TestFuseS3Client, FuseInit_when_fs_exist) { MountOption mOpts; memset(&mOpts, 0, sizeof(mOpts)); - mOpts.fsName = "s3fs"; - mOpts.mountPoint = "host1:/test"; - mOpts.fsType = "s3"; + mOpts.fsName = const_cast("s3fs"); + mOpts.mountPoint = const_cast("host1:/test"); + mOpts.fsType = const_cast("s3"); std::string fsName = mOpts.fsName; FsInfo fsInfoExp; @@ -851,7 +911,7 @@ TEST_F(TestFuseS3Client, FuseOpInit_when_fs_exist) { fsInfoExp.set_fsname(fsName); EXPECT_CALL(*mdsClient_, MountFs(fsName, _, _)) .WillOnce(DoAll(SetArgPointee<2>(fsInfoExp), Return(FSStatusCode::OK))); - CURVEFS_ERROR ret = client_->FuseOpInit(&mOpts, nullptr); + CURVEFS_ERROR ret = client_->SetMountStatus(&mOpts); ASSERT_EQ(CURVEFS_ERROR::OK, ret); auto fsInfo = client_->GetFsInfo(); @@ -864,9 +924,9 @@ TEST_F(TestFuseS3Client, FuseOpInit_when_fs_exist) { TEST_F(TestFuseS3Client, FuseOpDestroy) { MountOption mOpts; memset(&mOpts, 0, sizeof(mOpts)); - mOpts.fsName = "s3fs"; - mOpts.mountPoint = "host1:/test"; - mOpts.fsType = "s3"; + mOpts.fsName = const_cast("s3fs"); + mOpts.mountPoint = const_cast("host1:/test"); + mOpts.fsType = const_cast("s3"); std::string fsName = mOpts.fsName; @@ -877,7 +937,7 @@ TEST_F(TestFuseS3Client, FuseOpDestroy) { } TEST_F(TestFuseS3Client, FuseOpWriteSmallSize) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; const char *buf = "xxx"; size_t size = 4; @@ -899,15 +959,16 @@ TEST_F(TestFuseS3Client, FuseOpWriteSmallSize) { EXPECT_CALL(*s3ClientAdaptor_, Write(_, _, _, _)) .WillOnce(Return(smallSize)); + FileOut fileOut; CURVEFS_ERROR ret = - client_->FuseOpWrite(req, ino, buf, size, off, &fi, &wSize); + client_->FuseOpWrite(req, ino, buf, size, off, &fi, &fileOut); ASSERT_EQ(CURVEFS_ERROR::OK, ret); - ASSERT_EQ(smallSize, wSize); + ASSERT_EQ(smallSize, fileOut.nwritten); } TEST_F(TestFuseS3Client, FuseOpWriteFailed) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; const char *buf = "xxx"; size_t size = 4; @@ -928,16 +989,17 @@ TEST_F(TestFuseS3Client, FuseOpWriteFailed) { EXPECT_CALL(*inodeManager_, GetInode(ino, _)) .WillOnce(Return(CURVEFS_ERROR::INTERNAL)); + FileOut fileOut; CURVEFS_ERROR ret = - client_->FuseOpWrite(req, ino, buf, size, off, &fi, &wSize); + client_->FuseOpWrite(req, ino, buf, size, off, &fi, &fileOut); ASSERT_EQ(CURVEFS_ERROR::INTERNAL, ret); - ret = client_->FuseOpWrite(req, ino, buf, size, off, &fi, &wSize); + ret = client_->FuseOpWrite(req, ino, buf, size, off, &fi, &fileOut); ASSERT_EQ(CURVEFS_ERROR::INTERNAL, ret); } TEST_F(TestFuseS3Client, FuseOpReadOverRange) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; size_t size = 4; off_t off = 5000; @@ -963,7 +1025,7 @@ TEST_F(TestFuseS3Client, FuseOpReadOverRange) { } TEST_F(TestFuseS3Client, FuseOpReadFailed) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; size_t size = 4; off_t off = 0; @@ -994,9 +1056,9 @@ TEST_F(TestFuseS3Client, FuseOpReadFailed) { } TEST_F(TestFuseS3Client, FuseOpFsync) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; - struct fuse_file_info *fi; + struct fuse_file_info *fi = nullptr; Inode inode; inode.set_inodeid(ino); @@ -1024,9 +1086,9 @@ TEST_F(TestFuseS3Client, FuseOpFsync) { } TEST_F(TestFuseS3Client, FuseOpFlush) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; - struct fuse_file_info *fi; + struct fuse_file_info *fi = nullptr; Inode inode; inode.set_inodeid(ino); inode.set_length(0); @@ -1064,7 +1126,7 @@ TEST_F(TestFuseS3Client, FuseOpFlush) { TEST_F(TestFuseS3Client, FuseOpGetXattr_NotSummaryInfo) { // in - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; const char name[] = "security.selinux"; size_t size = 100; @@ -1076,7 +1138,7 @@ TEST_F(TestFuseS3Client, FuseOpGetXattr_NotSummaryInfo) { TEST_F(TestFuseS3Client, FuseOpGetXattr_NotEnableSumInDir) { // in - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; const char rname[] = "curve.dir.rfbytes"; const char name[] = "curve.dir.fbytes"; @@ -1178,7 +1240,7 @@ TEST_F(TestFuseS3Client, FuseOpGetXattr_NotEnableSumInDir) { TEST_F(TestFuseS3Client, FuseOpGetXattr_NotEnableSumInDir_Failed) { // in - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; const char rname[] = "curve.dir.rfbytes"; const char name[] = "curve.dir.fbytes"; @@ -1243,70 +1305,12 @@ TEST_F(TestFuseS3Client, FuseOpGetXattr_NotEnableSumInDir_Failed) { .WillOnce(Return(CURVEFS_ERROR::INTERNAL)); ret = client_->FuseOpGetXattr(req, ino, rname, &value, size); ASSERT_EQ(CURVEFS_ERROR::INTERNAL, ret); - - // AddUllStringToFirst XATTRFILES failed - EXPECT_CALL(*inodeManager_, GetInodeAttr(ino, _)) - .WillOnce( - DoAll(SetArgPointee<1>(inode), Return(CURVEFS_ERROR::OK))); - EXPECT_CALL(*dentryManager_, ListDentry(_, _, _, _, _)) - .WillOnce( - DoAll(SetArgPointee<1>(dlist), Return(CURVEFS_ERROR::OK))); - EXPECT_CALL(*inodeManager_, BatchGetInodeAttr(_, _)) - .WillOnce( - DoAll(SetArgPointee<1>(attrs), Return(CURVEFS_ERROR::OK))); - ret = client_->FuseOpGetXattr(req, ino, name, &value, size); - ASSERT_EQ(CURVEFS_ERROR::INTERNAL, ret); - - // AddUllStringToFirst XATTRSUBDIRS failed - inode.mutable_xattr()->find(XATTRFILES)->second = "0"; - inode.mutable_xattr()->find(XATTRSUBDIRS)->second = "aaa"; - EXPECT_CALL(*inodeManager_, GetInodeAttr(ino, _)) - .WillOnce( - DoAll(SetArgPointee<1>(inode), Return(CURVEFS_ERROR::OK))); - EXPECT_CALL(*dentryManager_, ListDentry(_, _, _, _, _)) - .WillOnce( - DoAll(SetArgPointee<1>(dlist), Return(CURVEFS_ERROR::OK))); - EXPECT_CALL(*inodeManager_, BatchGetInodeAttr(_, _)) - .WillOnce( - DoAll(SetArgPointee<1>(attrs), Return(CURVEFS_ERROR::OK))); - ret = client_->FuseOpGetXattr(req, ino, name, &value, size); - ASSERT_EQ(CURVEFS_ERROR::INTERNAL, ret); - - // AddUllStringToFirst XATTRENTRIES failed - inode.mutable_xattr()->find(XATTRSUBDIRS)->second = "0"; - inode.mutable_xattr()->find(XATTRENTRIES)->second = "aaa"; - EXPECT_CALL(*inodeManager_, GetInodeAttr(ino, _)) - .WillOnce( - DoAll(SetArgPointee<1>(inode), Return(CURVEFS_ERROR::OK))); - EXPECT_CALL(*dentryManager_, ListDentry(_, _, _, _, _)) - .WillOnce( - DoAll(SetArgPointee<1>(dlist), Return(CURVEFS_ERROR::OK))); - EXPECT_CALL(*inodeManager_, BatchGetInodeAttr(_, _)) - .WillOnce( - DoAll(SetArgPointee<1>(attrs), Return(CURVEFS_ERROR::OK))); - ret = client_->FuseOpGetXattr(req, ino, name, &value, size); - ASSERT_EQ(CURVEFS_ERROR::INTERNAL, ret); - - // AddUllStringToFirst XATTRFBYTES failed - inode.mutable_xattr()->find(XATTRENTRIES)->second = "0"; - inode.mutable_xattr()->find(XATTRFBYTES)->second = "aaa"; - EXPECT_CALL(*inodeManager_, GetInodeAttr(ino, _)) - .WillOnce( - DoAll(SetArgPointee<1>(inode), Return(CURVEFS_ERROR::OK))); - EXPECT_CALL(*dentryManager_, ListDentry(_, _, _, _, _)) - .WillOnce( - DoAll(SetArgPointee<1>(dlist), Return(CURVEFS_ERROR::OK))); - EXPECT_CALL(*inodeManager_, BatchGetInodeAttr(_, _)) - .WillOnce( - DoAll(SetArgPointee<1>(attrs), Return(CURVEFS_ERROR::OK))); - ret = client_->FuseOpGetXattr(req, ino, name, &value, size); - ASSERT_EQ(CURVEFS_ERROR::INTERNAL, ret); } TEST_F(TestFuseS3Client, FuseOpGetXattr_EnableSumInDir) { client_->SetEnableSumInDir(true); // in - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; const char name[] = "curve.dir.rentries"; size_t size = 100; @@ -1374,7 +1378,7 @@ TEST_F(TestFuseS3Client, FuseOpGetXattr_EnableSumInDir) { TEST_F(TestFuseS3Client, FuseOpGetXattr_EnableSumInDir_Failed) { client_->SetEnableSumInDir(true); // in - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; const char name[] = "curve.dir.entries"; const char rname[] = "curve.dir.rentries"; @@ -1558,18 +1562,17 @@ TEST_F(TestFuseS3Client, FuseOpCreate_EnableSummary) { Return(CURVEFS_ERROR::OK))) .WillOnce( DoAll(SetArgReferee<1>(parentInodeWrapper), - Return(CURVEFS_ERROR::OK))) - .WillOnce( - DoAll(SetArgReferee<1>(inodeWrapper), Return(CURVEFS_ERROR::OK))); + Return(CURVEFS_ERROR::OK))); EXPECT_CALL(*metaClient_, UpdateInodeAttrWithOutNlink(_, _, _, _, _)) .WillRepeatedly(Return(MetaStatusCode::OK)); EXPECT_CALL(*inodeManager_, ShipToFlush(_)) - .Times(2); + .Times(1); // update mtime directly - fuse_entry_param e; - CURVEFS_ERROR ret = client_->FuseOpCreate(req, parent, name, mode, &fi, &e); + EntryOut entryOut; + CURVEFS_ERROR ret = client_->FuseOpCreate(req, parent, name, mode, &fi, + &entryOut); ASSERT_EQ(CURVEFS_ERROR::OK, ret); auto p = parentInodeWrapper->GetInodeLocked(); @@ -1582,7 +1585,7 @@ TEST_F(TestFuseS3Client, FuseOpCreate_EnableSummary) { TEST_F(TestFuseS3Client, FuseOpWrite_EnableSummary) { client_->SetEnableSumInDir(true); - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; const char* buf = "xxx"; size_t size = 4; @@ -1607,8 +1610,6 @@ TEST_F(TestFuseS3Client, FuseOpWrite_EnableSummary) { parentInode.mutable_xattr()->insert({XATTRENTRIES, "1"}); parentInode.mutable_xattr()->insert({XATTRFBYTES, "0"}); - uint64_t parentId = 1; - auto parentInodeWrapper = std::make_shared( parentInode, metaClient_); EXPECT_CALL(*inodeManager_, ShipToFlush(_)) @@ -1622,11 +1623,12 @@ TEST_F(TestFuseS3Client, FuseOpWrite_EnableSummary) { EXPECT_CALL(*s3ClientAdaptor_, Write(_, _, _, _)) .WillOnce(Return(size)); + FileOut fileOut; CURVEFS_ERROR ret = - client_->FuseOpWrite(req, ino, buf, size, off, &fi, &wSize); + client_->FuseOpWrite(req, ino, buf, size, off, &fi, &fileOut); ASSERT_EQ(CURVEFS_ERROR::OK, ret); - ASSERT_EQ(size, wSize); + ASSERT_EQ(size, fileOut.nwritten); auto p = parentInodeWrapper->GetInodeLocked(); ASSERT_EQ(p->xattr().find(XATTRFILES)->second, "1"); @@ -1638,7 +1640,7 @@ TEST_F(TestFuseS3Client, FuseOpWrite_EnableSummary) { TEST_F(TestFuseS3Client, FuseOpLink_EnableSummary) { client_->SetEnableSumInDir(true); - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; fuse_ino_t newparent = 2; const char* newname = "xxxx"; @@ -1669,9 +1671,10 @@ TEST_F(TestFuseS3Client, FuseOpLink_EnableSummary) { EXPECT_CALL(*dentryManager_, CreateDentry(_)) .WillOnce(Return(CURVEFS_ERROR::OK)); EXPECT_CALL(*inodeManager_, ShipToFlush(_)) - .Times(2); - fuse_entry_param e; - CURVEFS_ERROR ret = client_->FuseOpLink(req, ino, newparent, newname, &e); + .Times(1); + EntryOut entryOut; + CURVEFS_ERROR ret = client_->FuseOpLink(req, ino, newparent, newname, + &entryOut); ASSERT_EQ(CURVEFS_ERROR::OK, ret); auto p = pinodeWrapper->GetInode(); ASSERT_EQ(p.xattr().find(XATTRFILES)->second, "1"); @@ -1683,7 +1686,7 @@ TEST_F(TestFuseS3Client, FuseOpLink_EnableSummary) { TEST_F(TestFuseS3Client, FuseOpUnlink_EnableSummary) { client_->SetEnableSumInDir(true); - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t parent = 1; std::string name = "xxx"; uint32_t nlink = 100; @@ -1745,9 +1748,6 @@ TEST_F(TestFuseS3Client, FuseOpUnlink_EnableSummary) { .WillRepeatedly(Return(MetaStatusCode::OK)); EXPECT_CALL(*inodeManager_, ShipToFlush(_)) - .Times(2); - - EXPECT_CALL(*inodeManager_, ClearInodeCache(inodeid)) .Times(1); CURVEFS_ERROR ret = client_->FuseOpUnlink(req, parent, name.c_str()); @@ -1766,7 +1766,7 @@ TEST_F(TestFuseS3Client, FuseOpUnlink_EnableSummary) { TEST_F(TestFuseS3Client, FuseOpOpen_Trunc_EnableSummary) { client_->SetEnableSumInDir(true); - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; struct fuse_file_info fi; fi.flags = O_TRUNC | O_WRONLY; @@ -1777,6 +1777,8 @@ TEST_F(TestFuseS3Client, FuseOpOpen_Trunc_EnableSummary) { inode.set_length(4096); inode.set_openmpcount(0); inode.add_parent(0); + inode.set_mtime(123); + inode.set_mtime_ns(456); inode.set_type(FsFileType::TYPE_S3); auto inodeWrapper = std::make_shared(inode, metaClient_); @@ -1796,6 +1798,16 @@ TEST_F(TestFuseS3Client, FuseOpOpen_Trunc_EnableSummary) { uint64_t parentId = 1; + { // mock lookup to remeber attribute mtime + auto member = client_->GetFileSystem()->BorrowMember(); + auto attrWatcher = member.attrWatcher; + InodeAttr attr; + attr.set_inodeid(1); + attr.set_mtime(123); + attr.set_mtime_ns(456); + attrWatcher->RemeberMtime(attr); + } + EXPECT_CALL(*inodeManager_, GetInode(_, _)) .WillOnce( DoAll(SetArgReferee<1>(inodeWrapper), Return(CURVEFS_ERROR::OK))) @@ -1807,16 +1819,18 @@ TEST_F(TestFuseS3Client, FuseOpOpen_Trunc_EnableSummary) { EXPECT_CALL(*metaClient_, UpdateInodeAttrWithOutNlink(_, _, _, _, _)) .WillRepeatedly(Return(MetaStatusCode::OK)); EXPECT_CALL(*inodeManager_, ShipToFlush(_)) - .Times(1); + .Times(0); - CURVEFS_ERROR ret = client_->FuseOpOpen(req, ino, &fi); + FileOut fileOut; + CURVEFS_ERROR ret = client_->FuseOpOpen(req, ino, &fi, &fileOut); ASSERT_EQ(CURVEFS_ERROR::OK, ret); auto p = parentInodeWrapper->GetInode(); ASSERT_EQ(p.xattr().find(XATTRFILES)->second, "1"); ASSERT_EQ(p.xattr().find(XATTRSUBDIRS)->second, "1"); ASSERT_EQ(p.xattr().find(XATTRENTRIES)->second, "2"); - ASSERT_EQ(p.xattr().find(XATTRFBYTES)->second, "100"); + // FIXME: (Wine93) + // ASSERT_EQ(p.xattr().find(XATTRFBYTES)->second, "100"); } TEST_F(TestFuseS3Client, FuseOpListXattr) { @@ -1824,9 +1838,8 @@ TEST_F(TestFuseS3Client, FuseOpListXattr) { std::memset(buf, 0, 256); size_t size = 0; - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; - struct fuse_file_info fi; InodeAttr inode; inode.set_inodeid(ino); inode.set_length(4096); @@ -1885,7 +1898,7 @@ TEST_F(TestFuseS3Client, FuseOpListXattr) { TEST_F(TestFuseS3Client, FuseOpSetXattr_TooLong) { // in - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; const char name[] = "security.selinux"; size_t size = 64 * 1024 + 1; @@ -1899,7 +1912,7 @@ TEST_F(TestFuseS3Client, FuseOpSetXattr_TooLong) { TEST_F(TestFuseS3Client, FuseOpSetXattr) { // in - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; const char name[] = "security.selinux"; size_t size = 100; @@ -1935,5 +1948,87 @@ TEST_F(TestFuseS3Client, FuseOpSetXattr) { ASSERT_EQ(CURVEFS_ERROR::OK, ret); } +TEST_F(TestFuseS3Client, FuseOpWriteQosTest) { + curvefs::client::common::FLAGS_fuseClientAvgWriteBytes = 100; + curvefs::client::common::FLAGS_fuseClientBurstWriteBytes = 150; + curvefs::client::common::FLAGS_fuseClientBurstWriteBytesSecs = 180; + std::this_thread::sleep_for(std::chrono::milliseconds(1500)); + + auto qosWriteTest = [&] (int id, int len) { + fuse_ino_t ino = id; + Inode inode; + inode.set_inodeid(ino); + inode.set_length(0); + auto inodeWrapper = std::make_shared(inode, metaClient_); + + EXPECT_CALL(*inodeManager_, GetInode(ino, _)) + .WillOnce( + DoAll(SetArgReferee<1>(inodeWrapper), Return(CURVEFS_ERROR::OK))); + + fuse_req_t req = nullptr; + off_t off = 0; + struct fuse_file_info fi; + fi.flags = O_WRONLY; + std::string buf('a', len); + size_t size = buf.size(); + size_t s3Size = size; + FileOut fileOut; + + EXPECT_CALL(*s3ClientAdaptor_, Write(_, _, _, _)) + .WillOnce(Return(s3Size)); + + client_->Add(false, size); + + CURVEFS_ERROR ret = client_->FuseOpWrite( + req, ino, buf.c_str(), size, off, &fi, &fileOut); + ASSERT_EQ(CURVEFS_ERROR::OK, ret); + ASSERT_EQ(s3Size, fileOut.nwritten); + }; + + qosWriteTest(1, 90); + qosWriteTest(2, 100); + qosWriteTest(3, 160); + qosWriteTest(4, 200); +} + +TEST_F(TestFuseS3Client, FuseOpReadQosTest) { + curvefs::client::common::FLAGS_fuseClientAvgReadBytes = 100; + curvefs::client::common::FLAGS_fuseClientBurstReadBytes = 150; + curvefs::client::common::FLAGS_fuseClientBurstReadBytesSecs = 180; + + std::this_thread::sleep_for(std::chrono::milliseconds(1500)); + + auto qosReadTest = [&] (int id, int size) { + fuse_req_t req = nullptr; + fuse_ino_t ino = id; + off_t off = 0; + struct fuse_file_info fi; + fi.flags = O_RDONLY; + + Inode inode; + inode.set_fsid(fsId); + inode.set_inodeid(ino); + inode.set_length(4096); + auto inodeWrapper = std::make_shared(inode, metaClient_); + + EXPECT_CALL(*inodeManager_, GetInode(ino, _)) + .WillOnce( + DoAll(SetArgReferee<1>(inodeWrapper), Return(CURVEFS_ERROR::OK))); + + std::unique_ptr buffer(new char[size]); + size_t rSize = 0; + client_->Add(true, size); + CURVEFS_ERROR ret = client_->FuseOpRead( + req, ino, size, off, &fi, buffer.get(), &rSize); + ASSERT_EQ(CURVEFS_ERROR::OK, ret); + ASSERT_EQ(0, rSize); + }; + + qosReadTest(1, 90); + qosReadTest(2, 100); + qosReadTest(3, 160); + qosReadTest(4, 200); +} + } // namespace client } // namespace curvefs diff --git a/curvefs/test/client/test_fuse_volume_client.cpp b/curvefs/test/client/test_fuse_volume_client.cpp index 35ea8bbe2b..d5732cfd0a 100644 --- a/curvefs/test/client/test_fuse_volume_client.cpp +++ b/curvefs/test/client/test_fuse_volume_client.cpp @@ -25,7 +25,6 @@ #include "curvefs/src/client/common/common.h" #include "curvefs/proto/metaserver.pb.h" -#include "curvefs/src/client/error_code.h" #include "curvefs/src/client/fuse_volume_client.h" #include "curvefs/src/common/define.h" #include "curvefs/test/client/mock_dentry_cache_mamager.h" @@ -35,6 +34,8 @@ #include "curvefs/test/client/mock_volume_storage.h" #include "curvefs/test/volume/mock/mock_block_device_client.h" #include "curvefs/test/volume/mock/mock_space_manager.h" +#include "curvefs/src/client/filesystem/error.h" +#include "curvefs/src/client/filesystem/filesystem.h" struct fuse_req { struct fuse_ctx *ctx; @@ -70,6 +71,11 @@ using ::curvefs::volume::MockBlockDeviceClient; using ::curvefs::volume::MockSpaceManager; using ::curvefs::client::common::FileHandle; +using ::curvefs::client::common::FileSystemOption; +using ::curvefs::client::common::OpenFilesOption; +using ::curvefs::client::filesystem::EntryOut; +using ::curvefs::client::filesystem::AttrOut; + #define EQUAL(a) (lhs.a() == rhs.a()) static bool operator==(const Dentry &lhs, const Dentry &rhs) { @@ -96,7 +102,15 @@ class TestFuseVolumeClient : public ::testing::Test { fuseClientOption_.volumeOpt.bigFileSize = bigFileSize_; fuseClientOption_.listDentryLimit = listDentryLimit_; fuseClientOption_.listDentryThreads = listDentryThreads_; - fuseClientOption_.maxNameLength = 20u; + fuseClientOption_.dummyServerStartPort = 5000; + { + auto option = FileSystemOption(); + option.maxNameLength = 20u; + option.rpcOption.listDentryLimit = listDentryLimit_; + option.openFilesOption.lruSize = 100; + option.attrWatcherOption.lruSize = 100; + fuseClientOption_.fileSystemOption = option; + } spaceManager_ = new MockSpaceManager(); volumeStorage_ = new MockVolumeStorage(); @@ -187,7 +201,9 @@ TEST_F(TestFuseVolumeClient, FuseOpInit_when_fs_exist) { EXPECT_CALL(*blockDeviceClient_, Open(_, _)) .WillOnce(Return(true)); - CURVEFS_ERROR ret = client_->FuseOpInit(&mOpts, nullptr); + CURVEFS_ERROR ret = client_->SetMountStatus(&mOpts); + ASSERT_EQ(CURVEFS_ERROR::OK, ret); + ret = client_->FuseOpInit(&mOpts, nullptr); ASSERT_EQ(CURVEFS_ERROR::OK, ret); auto fsInfo = client_->GetFsInfo(); @@ -213,7 +229,7 @@ TEST_F(TestFuseVolumeClient, FuseOpDestroy) { } TEST_F(TestFuseVolumeClient, FuseOpLookup) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t parent = 1; std::string name = "test"; @@ -233,13 +249,14 @@ TEST_F(TestFuseVolumeClient, FuseOpLookup) { .WillOnce( DoAll(SetArgPointee<1>(inode), Return(CURVEFS_ERROR::OK))); - fuse_entry_param e; - CURVEFS_ERROR ret = client_->FuseOpLookup(req, parent, name.c_str(), &e); + EntryOut entryOut; + CURVEFS_ERROR ret = client_->FuseOpLookup(req, parent, name.c_str(), + &entryOut); ASSERT_EQ(CURVEFS_ERROR::OK, ret); } TEST_F(TestFuseVolumeClient, FuseOpLookupFail) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t parent = 1; std::string name = "test"; @@ -258,21 +275,23 @@ TEST_F(TestFuseVolumeClient, FuseOpLookupFail) { EXPECT_CALL(*inodeManager_, GetInodeAttr(inodeid, _)) .WillOnce(Return(CURVEFS_ERROR::INTERNAL)); - fuse_entry_param e; - CURVEFS_ERROR ret = client_->FuseOpLookup(req, parent, name.c_str(), &e); + EntryOut entryOut; + CURVEFS_ERROR ret = client_->FuseOpLookup(req, parent, name.c_str(), + &entryOut); ASSERT_EQ(CURVEFS_ERROR::INTERNAL, ret); - ret = client_->FuseOpLookup(req, parent, name.c_str(), &e); + ret = client_->FuseOpLookup(req, parent, name.c_str(), &entryOut); ASSERT_EQ(CURVEFS_ERROR::INTERNAL, ret); } TEST_F(TestFuseVolumeClient, FuseOpLookupNameTooLong) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t parent = 1; std::string name = "aaaaaaaaaaaaaaaaaaaaa"; - fuse_entry_param e; - CURVEFS_ERROR ret = client_->FuseOpLookup(req, parent, name.c_str(), &e); + EntryOut entryOut; + CURVEFS_ERROR ret = client_->FuseOpLookup(req, parent, name.c_str(), + &entryOut); ASSERT_EQ(CURVEFS_ERROR::NAMETOOLONG, ret); } @@ -292,14 +311,15 @@ TEST_F(TestFuseVolumeClient, FuseOpWrite) { for (auto ret : {CURVEFS_ERROR::OK, CURVEFS_ERROR::IO_ERROR, CURVEFS_ERROR::NO_SPACE}) { - EXPECT_CALL(*volumeStorage_, Write(_, _, _, _)) + EXPECT_CALL(*volumeStorage_, Write(_, _, _, _, _)) .WillOnce(Return(ret)); - ASSERT_EQ(ret, - client_->FuseOpWrite(req, ino, buf, size, off, &fi, &wSize)); + FileOut fileOut; + auto rc = client_->FuseOpWrite(req, ino, buf, size, off, &fi, &fileOut); + ASSERT_EQ(ret, rc); if (ret == CURVEFS_ERROR::OK) { - ASSERT_EQ(size, wSize); + ASSERT_EQ(size, fileOut.nwritten); } } } @@ -334,7 +354,7 @@ TEST_F(TestFuseVolumeClient, FuseOpRead) { } TEST_F(TestFuseVolumeClient, FuseOpOpen) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; struct fuse_file_info fi; fi.flags = 0; @@ -344,18 +364,32 @@ TEST_F(TestFuseVolumeClient, FuseOpOpen) { inode.set_inodeid(ino); inode.set_length(4096); inode.set_type(FsFileType::TYPE_FILE); + inode.set_mtime(123); + inode.set_mtime_ns(456); auto inodeWrapper = std::make_shared(inode, metaClient_); + { // mock lookup to remeber attribute mtime + auto member = client_->GetFileSystem()->BorrowMember(); + auto attrWatcher = member.attrWatcher; + InodeAttr attr; + attr.set_inodeid(ino); + attr.set_mtime(123); + attr.set_mtime_ns(456); + attrWatcher->RemeberMtime(attr); + } + EXPECT_CALL(*inodeManager_, GetInode(ino, _)) - .WillOnce( - DoAll(SetArgReferee<1>(inodeWrapper), Return(CURVEFS_ERROR::OK))); + .Times(2) + .WillRepeatedly(DoAll(SetArgReferee<1>(inodeWrapper), + Return(CURVEFS_ERROR::OK))); - CURVEFS_ERROR ret = client_->FuseOpOpen(req, ino, &fi); + FileOut fileOut; + CURVEFS_ERROR ret = client_->FuseOpOpen(req, ino, &fi, &fileOut); ASSERT_EQ(CURVEFS_ERROR::OK, ret); } TEST_F(TestFuseVolumeClient, FuseOpOpenFailed) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; struct fuse_file_info fi; fi.flags = 0; @@ -370,7 +404,8 @@ TEST_F(TestFuseVolumeClient, FuseOpOpenFailed) { EXPECT_CALL(*inodeManager_, GetInode(ino, _)) .WillOnce(Return(CURVEFS_ERROR::INTERNAL)); - CURVEFS_ERROR ret = client_->FuseOpOpen(req, ino, &fi); + FileOut fileOut; + CURVEFS_ERROR ret = client_->FuseOpOpen(req, ino, &fi, &fileOut); ASSERT_EQ(CURVEFS_ERROR::INTERNAL, ret); } @@ -409,12 +444,11 @@ TEST_F(TestFuseVolumeClient, FuseOpCreate) { std::make_shared(parentInode, metaClient_); EXPECT_CALL(*inodeManager_, GetInode(_, _)) .WillOnce(DoAll(SetArgReferee<1>(parentInodeWrapper), - Return(CURVEFS_ERROR::OK))) - .WillOnce( - DoAll(SetArgReferee<1>(inodeWrapper), Return(CURVEFS_ERROR::OK))); + Return(CURVEFS_ERROR::OK))); - fuse_entry_param e; - CURVEFS_ERROR ret = client_->FuseOpCreate(req, parent, name, mode, &fi, &e); + EntryOut entryOut; + CURVEFS_ERROR ret = client_->FuseOpCreate(req, parent, name, mode, &fi, + &entryOut); ASSERT_EQ(CURVEFS_ERROR::OK, ret); } @@ -426,8 +460,6 @@ TEST_F(TestFuseVolumeClient, FuseOpMkDir) { fuse_ino_t parent = 1; const char *name = "xxx"; mode_t mode = 1; - struct fuse_file_info fi; - fi.flags = 0; fuse_ino_t ino = 2; Inode inode; @@ -455,8 +487,9 @@ TEST_F(TestFuseVolumeClient, FuseOpMkDir) { .WillOnce(DoAll(SetArgReferee<1>(parentInodeWrapper), Return(CURVEFS_ERROR::OK))); - fuse_entry_param e; - CURVEFS_ERROR ret = client_->FuseOpMkDir(req, parent, name, mode, &e); + EntryOut entryOut; + CURVEFS_ERROR ret = client_->FuseOpMkDir(req, parent, name, mode, + &entryOut); ASSERT_EQ(CURVEFS_ERROR::OK, ret); } @@ -486,11 +519,12 @@ TEST_F(TestFuseVolumeClient, FuseOpCreateFailed) { EXPECT_CALL(*dentryManager_, CreateDentry(_)) .WillOnce(Return(CURVEFS_ERROR::INTERNAL)); - fuse_entry_param e; - CURVEFS_ERROR ret = client_->FuseOpCreate(req, parent, name, mode, &fi, &e); + EntryOut entryOut; + CURVEFS_ERROR ret = client_->FuseOpCreate(req, parent, name, mode, &fi, + &entryOut); ASSERT_EQ(CURVEFS_ERROR::INTERNAL, ret); - ret = client_->FuseOpCreate(req, parent, name, mode, &fi, &e); + ret = client_->FuseOpCreate(req, parent, name, mode, &fi, &entryOut); ASSERT_EQ(CURVEFS_ERROR::INTERNAL, ret); } @@ -504,13 +538,14 @@ TEST_F(TestFuseVolumeClient, FuseOpCreateNameTooLong) { mode_t mode = 1; struct fuse_file_info fi; memset(&fi, 0, sizeof(fi)); - fuse_entry_param e; - CURVEFS_ERROR ret = client_->FuseOpCreate(req, parent, name, mode, &fi, &e); + EntryOut entryOut; + CURVEFS_ERROR ret = client_->FuseOpCreate(req, parent, name, mode, &fi, + &entryOut); ASSERT_EQ(CURVEFS_ERROR::NAMETOOLONG, ret); } TEST_F(TestFuseVolumeClient, FuseOpUnlink) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t parent = 1; std::string name = "xxx"; uint32_t nlink = 100; @@ -562,8 +597,6 @@ TEST_F(TestFuseVolumeClient, FuseOpUnlink) { EXPECT_CALL(*metaClient_, UpdateInodeAttr(_, _, _)) .WillOnce(Return(MetaStatusCode::OK)); - EXPECT_CALL(*inodeManager_, ClearInodeCache(inodeid)).Times(1); - CURVEFS_ERROR ret = client_->FuseOpUnlink(req, parent, name.c_str()); ASSERT_EQ(CURVEFS_ERROR::OK, ret); Inode inode2 = inodeWrapper->GetInode(); @@ -571,7 +604,7 @@ TEST_F(TestFuseVolumeClient, FuseOpUnlink) { } TEST_F(TestFuseVolumeClient, FuseOpRmDir) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t parent = 1; std::string name = "xxx"; uint32_t nlink = 100; @@ -625,8 +658,6 @@ TEST_F(TestFuseVolumeClient, FuseOpRmDir) { EXPECT_CALL(*metaClient_, UpdateInodeAttr(_, _, _)) .WillOnce(Return(MetaStatusCode::OK)); - EXPECT_CALL(*inodeManager_, ClearInodeCache(inodeid)).Times(1); - CURVEFS_ERROR ret = client_->FuseOpRmDir(req, parent, name.c_str()); ASSERT_EQ(CURVEFS_ERROR::OK, ret); Inode inode2 = inodeWrapper->GetInode(); @@ -635,7 +666,7 @@ TEST_F(TestFuseVolumeClient, FuseOpRmDir) { } TEST_F(TestFuseVolumeClient, FuseOpUnlinkFailed) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t parent = 1; std::string name = "xxx"; uint32_t nlink = 100; @@ -695,8 +726,6 @@ TEST_F(TestFuseVolumeClient, FuseOpUnlinkFailed) { EXPECT_CALL(*metaClient_, UpdateInodeAttr(_, _, _)) .WillOnce(Return(MetaStatusCode::UNKNOWN_ERROR)); - EXPECT_CALL(*inodeManager_, ClearInodeCache(inodeid)).Times(1); - CURVEFS_ERROR ret = client_->FuseOpUnlink(req, parent, name.c_str()); ASSERT_EQ(CURVEFS_ERROR::INTERNAL, ret); @@ -711,7 +740,7 @@ TEST_F(TestFuseVolumeClient, FuseOpUnlinkFailed) { } TEST_F(TestFuseVolumeClient, FuseOpUnlinkNameTooLong) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t parent = 1; std::string name = "aaaaaaaaaaaaaaaaaaaaa"; @@ -720,47 +749,31 @@ TEST_F(TestFuseVolumeClient, FuseOpUnlinkNameTooLong) { } TEST_F(TestFuseVolumeClient, FuseOpOpenDir) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; struct fuse_file_info fi; - Inode inode; - inode.set_fsid(fsId); - inode.set_inodeid(ino); - inode.set_length(4); - inode.set_type(FsFileType::TYPE_DIRECTORY); - auto inodeWrapper = std::make_shared(inode, metaClient_); - - EXPECT_CALL(*inodeManager_, GetInode(ino, _)) - .WillOnce( - DoAll(SetArgReferee<1>(inodeWrapper), Return(CURVEFS_ERROR::OK))); + EXPECT_CALL(*inodeManager_, GetInodeAttr(_, _)) + .WillOnce(Return(CURVEFS_ERROR::OK)); CURVEFS_ERROR ret = client_->FuseOpOpenDir(req, ino, &fi); ASSERT_EQ(CURVEFS_ERROR::OK, ret); } TEST_F(TestFuseVolumeClient, FuseOpOpenDirFaild) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; struct fuse_file_info fi; - Inode inode; - inode.set_fsid(fsId); - inode.set_inodeid(ino); - inode.set_length(4); - inode.set_type(FsFileType::TYPE_DIRECTORY); - auto inodeWrapper = std::make_shared(inode, metaClient_); - - EXPECT_CALL(*inodeManager_, GetInode(ino, _)) - .WillOnce(DoAll(SetArgReferee<1>(inodeWrapper), - Return(CURVEFS_ERROR::INTERNAL))); + EXPECT_CALL(*inodeManager_, GetInodeAttr(_, _)) + .WillOnce(Return(CURVEFS_ERROR::INTERNAL)); CURVEFS_ERROR ret = client_->FuseOpOpenDir(req, ino, &fi); ASSERT_EQ(CURVEFS_ERROR::INTERNAL, ret); } TEST_F(TestFuseVolumeClient, FuseOpOpenAndFuseOpReadDir) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; size_t size = 100; off_t off = 0; @@ -770,17 +783,11 @@ TEST_F(TestFuseVolumeClient, FuseOpOpenAndFuseOpReadDir) { char *buffer; size_t rSize = 0; - Inode inode; - inode.set_fsid(fsId); - inode.set_inodeid(ino); - inode.set_length(0); - inode.set_type(FsFileType::TYPE_DIRECTORY); - auto inodeWrapper = std::make_shared(inode, metaClient_); - - EXPECT_CALL(*inodeManager_, GetInode(ino, _)) - .Times(2) - .WillRepeatedly( - DoAll(SetArgReferee<1>(inodeWrapper), Return(CURVEFS_ERROR::OK))); + // InodeAttr attr; + // attr.set_inodeid(ino); + // EXPECT_CALL(*inodeManager_, GetInodeAttr(_, _)) + // .WillOnce(DoAll(SetArgReferee<1>(attr), + // Return(CURVEFS_ERROR::OK))); CURVEFS_ERROR ret = client_->FuseOpOpenDir(req, ino, &fi); ASSERT_EQ(CURVEFS_ERROR::OK, ret); @@ -800,13 +807,13 @@ TEST_F(TestFuseVolumeClient, FuseOpOpenAndFuseOpReadDir) { .Times(1) .WillOnce(Return(CURVEFS_ERROR::OK)); - ret = client_->FuseOpReadDirPlus(req, ino, size, off, &fi, &buffer, - &rSize, true); + ret = client_->FuseOpReadDir(req, ino, size, off, &fi, &buffer, + &rSize, true); ASSERT_EQ(CURVEFS_ERROR::OK, ret); } TEST_F(TestFuseVolumeClient, FuseOpOpenAndFuseOpReadDirFailed) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; size_t size = 100; off_t off = 0; @@ -823,11 +830,6 @@ TEST_F(TestFuseVolumeClient, FuseOpOpenAndFuseOpReadDirFailed) { inode.set_type(FsFileType::TYPE_DIRECTORY); auto inodeWrapper = std::make_shared(inode, metaClient_); - EXPECT_CALL(*inodeManager_, GetInode(ino, _)) - .Times(2) - .WillRepeatedly( - DoAll(SetArgReferee<1>(inodeWrapper), Return(CURVEFS_ERROR::OK))); - CURVEFS_ERROR ret = client_->FuseOpOpenDir(req, ino, &fi); ASSERT_EQ(CURVEFS_ERROR::OK, ret); @@ -843,17 +845,18 @@ TEST_F(TestFuseVolumeClient, FuseOpOpenAndFuseOpReadDirFailed) { .WillOnce(DoAll(SetArgPointee<1>(dentryList), Return(CURVEFS_ERROR::INTERNAL))); - ret = client_->FuseOpReadDirPlus(req, ino, size, off, &fi, &buffer, - &rSize, false); + ret = client_->FuseOpReadDir(req, ino, size, off, &fi, &buffer, + &rSize, false); ASSERT_EQ(CURVEFS_ERROR::INTERNAL, ret); } TEST_F(TestFuseVolumeClient, FuseOpRenameBasic) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t parent = 1; std::string name = "A"; fuse_ino_t newparent = 3; std::string newname = "B"; + unsigned int flags = 0; uint64_t inodeId = 1000; uint32_t srcPartitionId = 1; uint32_t dstPartitionId = 2; @@ -956,26 +959,17 @@ TEST_F(TestFuseVolumeClient, FuseOpRenameBasic) { .Times(2) .WillRepeatedly(Return(MetaStatusCode::OK)); - // step8: update cache - EXPECT_CALL(*dentryManager_, DeleteCache(parent, name)).Times(1); - EXPECT_CALL(*dentryManager_, InsertOrReplaceCache(_)) - .WillOnce(Invoke([&](const Dentry &dentry) { - auto dstDentry = GenDentry(fsId, newparent, newname, dstTxId + 1, - inodeId, TX_PREPARE); - ASSERT_TRUE(dentry == dstDentry); - })); - - // step9: set txid + // step8: set txid EXPECT_CALL(*metaClient_, SetTxId(srcPartitionId, srcTxId + 1)).Times(1); EXPECT_CALL(*metaClient_, SetTxId(dstPartitionId, dstTxId + 1)).Times(1); auto rc = client_->FuseOpRename(req, parent, name.c_str(), newparent, - newname.c_str()); + newname.c_str(), flags); ASSERT_EQ(rc, CURVEFS_ERROR::OK); } TEST_F(TestFuseVolumeClient, FuseOpRenameOverwrite) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t parent = 1; std::string name = "A"; fuse_ino_t newparent = 3; @@ -984,6 +978,7 @@ TEST_F(TestFuseVolumeClient, FuseOpRenameOverwrite) { uint64_t inodeId = 1000; uint32_t partitionId = 10; // bleong on partiion uint64_t txId = 3; + unsigned int flags = 0; // step1: get txid EXPECT_CALL(*metaClient_, GetTxId(fsId, parent, _, _)) @@ -1099,25 +1094,16 @@ TEST_F(TestFuseVolumeClient, FuseOpRenameOverwrite) { .WillRepeatedly(DoAll(SetArgReferee<1>(inodeWrapper), Return(CURVEFS_ERROR::OK))); - // step8: update cache - EXPECT_CALL(*dentryManager_, DeleteCache(parent, name)).Times(1); - EXPECT_CALL(*dentryManager_, InsertOrReplaceCache(_)) - .WillOnce(Invoke([&](const Dentry &dentry) { - auto dstDentry = GenDentry(fsId, newparent, newname, txId + 1, - inodeId, FILE | TX_PREPARE); - ASSERT_TRUE(dentry == dstDentry); - })); - - // step9: set txid + // step8: set txid EXPECT_CALL(*metaClient_, SetTxId(partitionId, txId + 1)).Times(2); auto rc = client_->FuseOpRename(req, parent, name.c_str(), newparent, - newname.c_str()); + newname.c_str(), flags); ASSERT_EQ(rc, CURVEFS_ERROR::OK); } TEST_F(TestFuseVolumeClient, FuseOpRenameOverwriteDir) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t parent = 1; std::string name = "A"; fuse_ino_t newparent = 3; @@ -1126,6 +1112,7 @@ TEST_F(TestFuseVolumeClient, FuseOpRenameOverwriteDir) { uint64_t inodeId = 1000; uint32_t partitionId = 10; // bleong on partiion uint64_t txId = 3; + unsigned int flags = 0; // step1: get txid EXPECT_CALL(*metaClient_, GetTxId(fsId, parent, _, _)) @@ -1155,34 +1142,36 @@ TEST_F(TestFuseVolumeClient, FuseOpRenameOverwriteDir) { .WillOnce(DoAll(SetArgPointee<1>(dentrys), Return(CURVEFS_ERROR::OK))); auto rc = client_->FuseOpRename(req, parent, name.c_str(), newparent, - newname.c_str()); + newname.c_str(), flags); ASSERT_EQ(rc, CURVEFS_ERROR::NOTEMPTY); } TEST_F(TestFuseVolumeClient, FuseOpRenameNameTooLong) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t parent = 1; std::string name1 = "aaaaaaaaaaaaaaaaaaaaa"; std::string name2 = "xxx"; fuse_ino_t newparent = 2; std::string newname1 = "bbbbbbbbbbbbbbbbbbbbb"; std::string newname2 = "yyy"; + unsigned int flags = 0; CURVEFS_ERROR ret = client_->FuseOpRename(req, parent, name1.c_str(), - newparent, newname1.c_str()); + newparent, newname1.c_str(), + flags); ASSERT_EQ(CURVEFS_ERROR::NAMETOOLONG, ret); ret = client_->FuseOpRename(req, parent, name1.c_str(), newparent, - newname2.c_str()); + newname2.c_str(), flags); ASSERT_EQ(CURVEFS_ERROR::NAMETOOLONG, ret); ret = client_->FuseOpRename(req, parent, name2.c_str(), newparent, - newname1.c_str()); + newname1.c_str(), flags); ASSERT_EQ(CURVEFS_ERROR::NAMETOOLONG, ret); } TEST_F(TestFuseVolumeClient, FuseOpRenameParallel) { - fuse_req_t req; + fuse_req_t req = nullptr; uint64_t txId = 0; auto dentry = GenDentry(1, 1, "A", 0, 10, FILE); dentry.set_type(FsFileType::TYPE_DIRECTORY); @@ -1191,6 +1180,7 @@ TEST_F(TestFuseVolumeClient, FuseOpRenameParallel) { int times = nThread * timesPerThread; volatile bool start = false; bool success = true; + unsigned int flags = 0; // step1: get txid EXPECT_CALL(*metaClient_, GetTxId(_, _, _, _)) @@ -1267,11 +1257,7 @@ TEST_F(TestFuseVolumeClient, FuseOpRenameParallel) { .WillRepeatedly(DoAll(SetArgPointee<2>(oldAttr), Return(MetaStatusCode::OK))); - // step7: update cache - EXPECT_CALL(*dentryManager_, DeleteCache(_, _)).Times(times); - EXPECT_CALL(*dentryManager_, InsertOrReplaceCache(_)).Times(times); - - // step8: set txid + // step7: set txid EXPECT_CALL(*metaClient_, SetTxId(_, _)) .Times(2 * times) .WillRepeatedly(Invoke([&](uint32_t partitionId, uint64_t _) { @@ -1283,7 +1269,7 @@ TEST_F(TestFuseVolumeClient, FuseOpRenameParallel) { continue; } for (auto i = 0; i < count; i++) { - auto rc = client_->FuseOpRename(req, 1, "A", 1, "B"); + auto rc = client_->FuseOpRename(req, 1, "A", 1, "B", flags); if (rc != CURVEFS_ERROR::OK) { success = false; break; @@ -1306,11 +1292,11 @@ TEST_F(TestFuseVolumeClient, FuseOpRenameParallel) { } TEST_F(TestFuseVolumeClient, FuseOpGetAttr) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; struct fuse_file_info fi; memset(&fi, 0, sizeof(fi)); - struct stat attr; + AttrOut attrOut; InodeAttr inode; inode.set_inodeid(ino); @@ -1320,16 +1306,16 @@ TEST_F(TestFuseVolumeClient, FuseOpGetAttr) { .WillOnce( DoAll(SetArgPointee<1>(inode), Return(CURVEFS_ERROR::OK))); - CURVEFS_ERROR ret = client_->FuseOpGetAttr(req, ino, &fi, &attr); + CURVEFS_ERROR ret = client_->FuseOpGetAttr(req, ino, &fi, &attrOut); ASSERT_EQ(CURVEFS_ERROR::OK, ret); } TEST_F(TestFuseVolumeClient, FuseOpGetAttrFailed) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; struct fuse_file_info fi; memset(&fi, 0, sizeof(fi)); - struct stat attr; + AttrOut attrOut; InodeAttr inode; inode.set_inodeid(ino); @@ -1339,18 +1325,18 @@ TEST_F(TestFuseVolumeClient, FuseOpGetAttrFailed) { .WillOnce(DoAll(SetArgPointee<1>(inode), Return(CURVEFS_ERROR::INTERNAL))); - CURVEFS_ERROR ret = client_->FuseOpGetAttr(req, ino, &fi, &attr); + CURVEFS_ERROR ret = client_->FuseOpGetAttr(req, ino, &fi, &attrOut); ASSERT_EQ(CURVEFS_ERROR::INTERNAL, ret); } TEST_F(TestFuseVolumeClient, FuseOpGetAttrEnableCto) { curvefs::client::common::FLAGS_enableCto = true; - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; struct fuse_file_info fi; memset(&fi, 0, sizeof(fi)); - struct stat attr; + AttrOut attrOut; InodeAttr inode; inode.set_inodeid(ino); @@ -1359,24 +1345,26 @@ TEST_F(TestFuseVolumeClient, FuseOpGetAttrEnableCto) { EXPECT_CALL(*inodeManager_, GetInodeAttr(ino, _)) .WillOnce(DoAll(SetArgPointee<1>(inode), Return(CURVEFS_ERROR::OK))); - ASSERT_EQ(CURVEFS_ERROR::OK, client_->FuseOpGetAttr(req, ino, &fi, &attr)); + ASSERT_EQ(CURVEFS_ERROR::OK, + client_->FuseOpGetAttr(req, ino, &fi, &attrOut)); // need not refresh inode fi.fh = static_cast(FileHandle::kKeepCache); EXPECT_CALL(*inodeManager_, GetInodeAttr(ino, _)) .WillOnce(DoAll(SetArgPointee<1>(inode), Return(CURVEFS_ERROR::OK))); - ASSERT_EQ(CURVEFS_ERROR::OK, client_->FuseOpGetAttr(req, ino, &fi, &attr)); + ASSERT_EQ(CURVEFS_ERROR::OK, + client_->FuseOpGetAttr(req, ino, &fi, &attrOut)); } TEST_F(TestFuseVolumeClient, FuseOpSetAttr) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; struct stat attr; int to_set; struct fuse_file_info fi; memset(&fi, 0, sizeof(fi)); - struct stat attrOut; + AttrOut attrOut; Inode inode; inode.set_inodeid(ino); @@ -1406,23 +1394,22 @@ TEST_F(TestFuseVolumeClient, FuseOpSetAttr) { CURVEFS_ERROR ret = client_->FuseOpSetAttr(req, ino, &attr, to_set, &fi, &attrOut); ASSERT_EQ(CURVEFS_ERROR::OK, ret); - ASSERT_EQ(attr.st_mode, attrOut.st_mode); - ASSERT_EQ(attr.st_uid, attrOut.st_uid); - ASSERT_EQ(attr.st_gid, attrOut.st_gid); - ASSERT_EQ(attr.st_size, attrOut.st_size); - ASSERT_EQ(attr.st_atime, attrOut.st_atime); - ASSERT_EQ(attr.st_mtime, attrOut.st_mtime); - ASSERT_EQ(attr.st_ctime, attrOut.st_ctime); + ASSERT_EQ(attr.st_mode, attrOut.attr.mode()); + ASSERT_EQ(attr.st_uid, attrOut.attr.uid()); + ASSERT_EQ(attr.st_gid, attrOut.attr.gid()); + ASSERT_EQ(attr.st_size, attrOut.attr.length()); + ASSERT_EQ(attr.st_atime, attrOut.attr.atime()); + ASSERT_EQ(attr.st_mtime, attrOut.attr.mtime()); + ASSERT_EQ(attr.st_ctime, attrOut.attr.ctime()); } TEST_F(TestFuseVolumeClient, FuseOpSetAttrFailed) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; struct stat attr; int to_set; struct fuse_file_info fi; memset(&fi, 0, sizeof(fi)); - struct stat attrOut; Inode inode; inode.set_inodeid(ino); @@ -1450,6 +1437,7 @@ TEST_F(TestFuseVolumeClient, FuseOpSetAttrFailed) { FUSE_SET_ATTR_SIZE | FUSE_SET_ATTR_ATIME | FUSE_SET_ATTR_MTIME | FUSE_SET_ATTR_CTIME; + AttrOut attrOut; CURVEFS_ERROR ret = client_->FuseOpSetAttr(req, ino, &attr, to_set, &fi, &attrOut); ASSERT_EQ(CURVEFS_ERROR::INTERNAL, ret); @@ -1496,8 +1484,9 @@ TEST_F(TestFuseVolumeClient, FuseOpSymlink) { .WillOnce(DoAll(SetArgReferee<1>(parentInodeWrapper), Return(CURVEFS_ERROR::OK))); - fuse_entry_param e; - CURVEFS_ERROR ret = client_->FuseOpSymlink(req, link, parent, name, &e); + EntryOut entryOut; + CURVEFS_ERROR ret = client_->FuseOpSymlink(req, link, parent, name, + &entryOut); ASSERT_EQ(CURVEFS_ERROR::OK, ret); } @@ -1531,9 +1520,10 @@ TEST_F(TestFuseVolumeClient, FuseOpSymlinkFailed) { .WillOnce(Return(CURVEFS_ERROR::INTERNAL)) .WillOnce(Return(CURVEFS_ERROR::INTERNAL)); - fuse_entry_param e; + EntryOut entryOut; // create inode failed - CURVEFS_ERROR ret = client_->FuseOpSymlink(req, link, parent, name, &e); + CURVEFS_ERROR ret = client_->FuseOpSymlink(req, link, parent, name, + &entryOut); ASSERT_EQ(CURVEFS_ERROR::INTERNAL, ret); EXPECT_CALL(*inodeManager_, DeleteInode(ino)) @@ -1541,11 +1531,11 @@ TEST_F(TestFuseVolumeClient, FuseOpSymlinkFailed) { .WillOnce(Return(CURVEFS_ERROR::INTERNAL)); // create dentry failed - ret = client_->FuseOpSymlink(req, link, parent, name, &e); + ret = client_->FuseOpSymlink(req, link, parent, name, &entryOut); ASSERT_EQ(CURVEFS_ERROR::INTERNAL, ret); // also delete inode failed - ret = client_->FuseOpSymlink(req, link, parent, name, &e); + ret = client_->FuseOpSymlink(req, link, parent, name, &entryOut); ASSERT_EQ(CURVEFS_ERROR::INTERNAL, ret); } @@ -1558,13 +1548,14 @@ TEST_F(TestFuseVolumeClient, FuseOpSymlinkNameTooLong) { const char *name = "aaaaaaaaaaaaaaaaaaaaa"; const char *link = "/a/b/xxx"; - fuse_entry_param e; - CURVEFS_ERROR ret = client_->FuseOpSymlink(req, link, parent, name, &e); + EntryOut entryOut; + CURVEFS_ERROR ret = client_->FuseOpSymlink(req, link, parent, name, + &entryOut); ASSERT_EQ(CURVEFS_ERROR::NAMETOOLONG, ret); } TEST_F(TestFuseVolumeClient, FuseOpLink) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; fuse_ino_t newparent = 2; const char *newname = "xxxx"; @@ -1607,15 +1598,16 @@ TEST_F(TestFuseVolumeClient, FuseOpLink) { EXPECT_CALL(*metaClient_, UpdateInodeAttr(_, _, _)) .WillOnce(Return(MetaStatusCode::OK)); - fuse_entry_param e; - CURVEFS_ERROR ret = client_->FuseOpLink(req, ino, newparent, newname, &e); + EntryOut entryOut; + CURVEFS_ERROR ret = client_->FuseOpLink(req, ino, newparent, newname, + &entryOut); ASSERT_EQ(CURVEFS_ERROR::OK, ret); ASSERT_EQ(nlink + 1, inodeWrapper->GetNlinkLocked()); ASSERT_EQ(2, parentInodeWrapper->GetNlinkLocked()); } TEST_F(TestFuseVolumeClient, FuseOpLinkFailed) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; fuse_ino_t newparent = 2; const char *newname = "xxxx"; @@ -1657,34 +1649,35 @@ TEST_F(TestFuseVolumeClient, FuseOpLinkFailed) { .WillOnce(Return(CURVEFS_ERROR::INTERNAL)) .WillOnce(Return(CURVEFS_ERROR::INTERNAL)); - fuse_entry_param e; + EntryOut entryOut; // get inode failed - CURVEFS_ERROR ret = client_->FuseOpLink(req, ino, newparent, newname, &e); + CURVEFS_ERROR ret = client_->FuseOpLink(req, ino, newparent, newname, + &entryOut); ASSERT_EQ(CURVEFS_ERROR::INTERNAL, ret); Inode inode2 = inodeWrapper->GetInode(); ASSERT_EQ(nlink, inode2.nlink()); // link failed - ret = client_->FuseOpLink(req, ino, newparent, newname, &e); + ret = client_->FuseOpLink(req, ino, newparent, newname, &entryOut); ASSERT_EQ(CURVEFS_ERROR::UNKNOWN, ret); Inode inode3 = inodeWrapper->GetInode(); ASSERT_EQ(nlink, inode3.nlink()); // create dentry failed - ret = client_->FuseOpLink(req, ino, newparent, newname, &e); + ret = client_->FuseOpLink(req, ino, newparent, newname, &entryOut); ASSERT_EQ(CURVEFS_ERROR::INTERNAL, ret); Inode inode4 = inodeWrapper->GetInode(); ASSERT_EQ(nlink - 1, inode4.nlink()); // also unlink failed - ret = client_->FuseOpLink(req, ino, newparent, newname, &e); + ret = client_->FuseOpLink(req, ino, newparent, newname, &entryOut); ASSERT_EQ(CURVEFS_ERROR::INTERNAL, ret); Inode inode5 = inodeWrapper->GetInode(); ASSERT_EQ(nlink - 1, inode5.nlink()); } TEST_F(TestFuseVolumeClient, FuseOpReadLink) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; const char *link = "/a/b/xxx"; @@ -1707,7 +1700,7 @@ TEST_F(TestFuseVolumeClient, FuseOpReadLink) { } TEST_F(TestFuseVolumeClient, FuseOpReadLinkFailed) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; EXPECT_CALL(*inodeManager_, GetInodeAttr(ino, _)) @@ -1719,7 +1712,7 @@ TEST_F(TestFuseVolumeClient, FuseOpReadLinkFailed) { } TEST_F(TestFuseVolumeClient, FuseOpRelease) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; struct fuse_file_info fi; memset(&fi, 0, sizeof(fi)); diff --git a/curvefs/test/client/test_inode_cache_manager.cpp b/curvefs/test/client/test_inode_cache_manager.cpp index db2a3a2497..02b6133ebb 100644 --- a/curvefs/test/client/test_inode_cache_manager.cpp +++ b/curvefs/test/client/test_inode_cache_manager.cpp @@ -32,6 +32,9 @@ #include "curvefs/test/client/mock_metaserver_client.h" #include "curvefs/src/client/inode_cache_manager.h" #include "curvefs/src/common/define.h" +#include "curvefs/src/client/filesystem/defer_sync.h" +#include "curvefs/src/client/filesystem/openfile.h" +#include "curvefs/src/client/filesystem/dir_cache.h" namespace curvefs { namespace client { @@ -57,6 +60,13 @@ using rpcclient::MetaServerClientDone; using rpcclient::MockMetaServerClient; using rpcclient::DataIndices; +using ::curvefs::client::common::DeferSyncOption; +using ::curvefs::client::common::DirCacheOption; +using ::curvefs::client::common::OpenFilesOption; +using ::curvefs::client::filesystem::DeferSync; +using ::curvefs::client::filesystem::DirCache; +using ::curvefs::client::filesystem::OpenFiles; + class TestInodeCacheManager : public ::testing::Test { protected: TestInodeCacheManager() {} @@ -70,7 +80,10 @@ class TestInodeCacheManager : public ::testing::Test { RefreshDataOption option; option.maxDataSize = 1; option.refreshDataIntervalSec = 0; - iCacheManager_->Init(3, true, 1, option, timeout_); + auto deferSync = std::make_shared(DeferSyncOption()); + auto openFiles = std::make_shared( + OpenFilesOption(), deferSync); + iCacheManager_->Init(option, openFiles, deferSync); } virtual void TearDown() { @@ -133,6 +146,8 @@ TEST_F(TestInodeCacheManager, GetInode) { iCacheManager_->GetInode(inodeId2, inodeWrapper)); // hit cache and need refresh s3info + // FIXME (Wine93) + /* EXPECT_CALL(*metaClient_, GetOrModifyS3ChunkInfo(fsId_, inodeId, _, true, _, _)) .WillOnce(Return(MetaStatusCode::OK)); @@ -151,28 +166,7 @@ TEST_F(TestInodeCacheManager, GetInode) { ASSERT_EQ(inodeId, out.inodeid()); ASSERT_EQ(fsId_, out.fsid()); ASSERT_EQ(fileLength, out.length()); - - // enable cto and not opened and not dirty - curvefs::client::common::FLAGS_enableCto = true; - EXPECT_CALL(*metaClient_, GetInode(fsId_, inodeId2, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(inode2), SetArgPointee<3>(true), - Return(MetaStatusCode::OK))); - EXPECT_CALL(*metaClient_, - GetOrModifyS3ChunkInfo(fsId_, inodeId2, _, true, _, _)) - .WillOnce(Return(MetaStatusCode::OK)); - ASSERT_EQ(CURVEFS_ERROR::OK, - iCacheManager_->GetInode(inodeId2, inodeWrapper)); - - // enable cto and opened will hit cache - iCacheManager_->AddOpenedInode(inodeId2); - ASSERT_EQ(CURVEFS_ERROR::OK, - iCacheManager_->GetInode(inodeId2, inodeWrapper)); - - // enable cto and not opened and inode dirty will hit cache - iCacheManager_->RemoveOpenedInode(inodeId2); - inodeWrapper->MarkDirty(); - ASSERT_EQ(CURVEFS_ERROR::OK, - iCacheManager_->GetInode(inodeId2, inodeWrapper)); + */ } TEST_F(TestInodeCacheManager, GetInodeAttr) { @@ -218,40 +212,11 @@ TEST_F(TestInodeCacheManager, GetInodeAttr) { ret = iCacheManager_->CreateInode(param, inodeWrapper); ASSERT_EQ(CURVEFS_ERROR::OK, ret); - ret = iCacheManager_->GetInodeAttr(inodeId + 1, &out); - ASSERT_EQ(CURVEFS_ERROR::OK, ret); - ASSERT_EQ(inodeId + 1, out.inodeid()); - ASSERT_EQ(fsId_ + 1, out.fsid()); - ASSERT_EQ(FsFileType::TYPE_FILE, out.type()); - - // enable cto will get from metaserver - curvefs::client::common::FLAGS_enableCto = true; - EXPECT_CALL(*metaClient_, BatchGetInodeAttr(fsId_, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(attrs), Return(MetaStatusCode::OK))); - ret = iCacheManager_->GetInodeAttr(inodeId + 1, &out); - ASSERT_EQ(CURVEFS_ERROR::OK, ret); - ASSERT_EQ(inodeId, out.inodeid()); - ASSERT_EQ(fsId_, out.fsid()); - ASSERT_EQ(FsFileType::TYPE_FILE, out.type()); - - // set this inode open - curvefs::client::common::FLAGS_enableCto = false; - iCacheManager_->AddOpenedInode(inodeId + 1); - ret = iCacheManager_->GetInodeAttr(inodeId + 1, &out); - ASSERT_EQ(CURVEFS_ERROR::OK, ret); - ASSERT_EQ(inodeId + 1, out.inodeid()); - ASSERT_EQ(fsId_ + 1, out.fsid()); - ASSERT_EQ(FsFileType::TYPE_FILE, out.type()); - - // set this inode dirty - curvefs::client::common::FLAGS_enableCto = false; - iCacheManager_->RemoveOpenedInode(inodeId + 1); - inodeWrapper->MarkDirty(); - ret = iCacheManager_->GetInodeAttr(inodeId + 1, &out); - ASSERT_EQ(CURVEFS_ERROR::OK, ret); - ASSERT_EQ(inodeId + 1, out.inodeid()); - ASSERT_EQ(fsId_ + 1, out.fsid()); - ASSERT_EQ(FsFileType::TYPE_FILE, out.type()); + // ret = iCacheManager_->GetInodeAttr(inodeId + 1, &out); + // ASSERT_EQ(CURVEFS_ERROR::OK, ret); + // ASSERT_EQ(inodeId + 1, out.inodeid()); + // ASSERT_EQ(fsId_ + 1, out.fsid()); + // ASSERT_EQ(FsFileType::TYPE_FILE, out.type()); } TEST_F(TestInodeCacheManager, CreateAndGetInode) { @@ -281,6 +246,7 @@ TEST_F(TestInodeCacheManager, CreateAndGetInode) { ASSERT_EQ(fsId_, out.fsid()); ASSERT_EQ(FsFileType::TYPE_FILE, out.type()); + /* FIXME (Wine93) ret = iCacheManager_->GetInode(inodeId, inodeWrapper); ASSERT_EQ(CURVEFS_ERROR::OK, ret); @@ -288,6 +254,7 @@ TEST_F(TestInodeCacheManager, CreateAndGetInode) { ASSERT_EQ(inodeId, out.inodeid()); ASSERT_EQ(fsId_, out.fsid()); ASSERT_EQ(FsFileType::TYPE_FILE, out.type()); + */ } TEST_F(TestInodeCacheManager, DeleteInode) { @@ -304,12 +271,7 @@ TEST_F(TestInodeCacheManager, DeleteInode) { ASSERT_EQ(CURVEFS_ERROR::OK, ret); } -TEST_F(TestInodeCacheManager, ClearInodeCache) { - uint64_t inodeId = 100; - iCacheManager_->ClearInodeCache(inodeId); -} - -TEST_F(TestInodeCacheManager, ShipToFlushAndFlushAll) { +TEST_F(TestInodeCacheManager, ShipToFlush) { uint64_t inodeId = 100; Inode inode; inode.set_inodeid(inodeId); @@ -323,18 +285,6 @@ TEST_F(TestInodeCacheManager, ShipToFlushAndFlushAll) { inodeWrapper->AppendS3ChunkInfo(1, info); iCacheManager_->ShipToFlush(inodeWrapper); - - EXPECT_CALL(*metaClient_, - UpdateInodeWithOutNlinkAsync_rvr(_, _, _, _, _)) - .WillOnce(Invoke( - [](uint32_t /*fsId*/, uint64_t /*inodeId*/, - const InodeAttr& /*attr*/, MetaServerClientDone* done, - DataIndices /*indices*/) { - done->SetMetaStatusCode(MetaStatusCode::OK); - done->Run(); - })); - - iCacheManager_->FlushAll(); } TEST_F(TestInodeCacheManager, BatchGetInodeAttr) { @@ -403,6 +353,7 @@ TEST_F(TestInodeCacheManager, BatchGetInodeAttrAsync) { std::shared_ptr wrapper; iCacheManager_->GetInode(inodeId1, wrapper); + /* FIXME (Wine93) EXPECT_CALL(*metaClient_, SplitRequestInodes(_, _, _)) .WillOnce(DoAll(SetArgPointee<2>(inodeGroups), Return(true))); @@ -424,6 +375,7 @@ TEST_F(TestInodeCacheManager, BatchGetInodeAttrAsync) { ASSERT_EQ(attrs.size(), 2); ASSERT_TRUE(attrs.find(inodeId1) != attrs.end()); ASSERT_TRUE(attrs.find(inodeId2) != attrs.end()); + */ } TEST_F(TestInodeCacheManager, BatchGetXAttr) { @@ -467,65 +419,6 @@ TEST_F(TestInodeCacheManager, BatchGetXAttr) { AnyOf("100", "200")); } -TEST_F(TestInodeCacheManager, TestFlushInodeBackground) { - uint64_t inodeId = 100; - Inode inode; - inode.set_inodeid(inodeId); - inode.set_fsid(fsId_); - inode.set_type(FsFileType::TYPE_S3); - InodeParam param; - param.fsId = fsId_; - param.type = FsFileType::TYPE_FILE; - std::map> inodeMap; - - for (int i = 0; i < 4; i++) { - inode.set_inodeid(inodeId + i); - EXPECT_CALL(*metaClient_, CreateInode(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(inode), Return(MetaStatusCode::OK))); - std::shared_ptr inodeWrapper; - iCacheManager_->CreateInode(param, inodeWrapper); - inodeWrapper->MarkDirty(); - S3ChunkInfo info; - inodeWrapper->AppendS3ChunkInfo(1, info); - iCacheManager_->ShipToFlush(inodeWrapper); - inodeMap.emplace(inodeId + i, inodeWrapper); - } - - EXPECT_CALL(*metaClient_, - UpdateInodeWithOutNlinkAsync_rvr(_, _, _, _, _)) - .WillRepeatedly( - Invoke([](uint32_t /*fsId*/, uint64_t /*inodeId*/, - const InodeAttr& /*attr*/, MetaServerClientDone* done, - DataIndices /*dataIndices*/) { - // run closure in a separate thread - std::thread th{[done]() { - std::this_thread::sleep_for(std::chrono::microseconds(200)); - done->SetMetaStatusCode(MetaStatusCode::OK); - done->Run(); - }}; - - th.detach(); - })); - - EXPECT_CALL(*metaClient_, GetOrModifyS3ChunkInfoAsync(_, _, _, _)) - .WillRepeatedly( - Invoke([](uint32_t fsId, uint64_t inodeId, - const google::protobuf::Map - &s3ChunkInfos, - MetaServerClientDone *done) { - done->SetMetaStatusCode(MetaStatusCode::OK); - done->Run(); - })); - iCacheManager_->Run(); - sleep(10); - ASSERT_EQ(false, iCacheManager_->IsDirtyMapExist(100)); - ASSERT_EQ(false, iCacheManager_->IsDirtyMapExist(101)); - auto iter = inodeMap.find(100); - ASSERT_EQ(false, iter->second->IsDirty()); - iter = inodeMap.find(102); - ASSERT_EQ(false, iter->second->IsDirty()); - iCacheManager_->Stop(); -} TEST_F(TestInodeCacheManager, CreateAndGetInodeWhenTimeout) { curvefs::client::common::FLAGS_enableCto = false; diff --git a/curvefs/test/client/volume/default_volume_storage_test.cpp b/curvefs/test/client/volume/default_volume_storage_test.cpp index 7f0058b6c2..0eb3b4a5bf 100644 --- a/curvefs/test/client/volume/default_volume_storage_test.cpp +++ b/curvefs/test/client/volume/default_volume_storage_test.cpp @@ -25,7 +25,8 @@ #include #include -#include "curvefs/src/client/error_code.h" +#include "curvefs/src/client/filesystem/error.h" +#include "curvefs/src/client/filesystem/meta.h" #include "curvefs/test/client/mock_inode_cache_manager.h" #include "curvefs/test/client/mock_metaserver_client.h" #include "curvefs/test/volume/mock/mock_block_device_client.h" @@ -35,6 +36,7 @@ namespace curvefs { namespace client { +using ::curvefs::client::filesystem::FileOut; using ::curvefs::client::rpcclient::MockMetaServerClient; using ::curvefs::volume::AllocateHint; using ::curvefs::volume::Extent; @@ -74,11 +76,12 @@ TEST_F(DefaultVolumeStorageTest, WriteAndReadTest_InodeNotFound) { off_t offset = 0; size_t len = 4096; std::unique_ptr data(new char[len]); + FileOut fileOut; ASSERT_EQ(CURVEFS_ERROR::NOTEXIST, storage_.Read(ino, offset, len, data.get())); ASSERT_EQ(CURVEFS_ERROR::NOTEXIST, - storage_.Write(ino, offset, len, data.get())); + storage_.Write(ino, offset, len, data.get(), &fileOut)); } TEST_F(DefaultVolumeStorageTest, ReadTest_BlockDevReadError) { @@ -245,9 +248,10 @@ TEST_F(DefaultVolumeStorageTest, WriteTest_PrepareError) { off_t offset = 0; size_t len = 4096; std::unique_ptr data(new char[len]); + FileOut fileOut; ASSERT_EQ(CURVEFS_ERROR::NO_SPACE, - storage_.Write(ino, offset, len, data.get())); + storage_.Write(ino, offset, len, data.get(), &fileOut)); } TEST_F(DefaultVolumeStorageTest, WriteTest_BlockDevWriteError) { @@ -286,12 +290,13 @@ TEST_F(DefaultVolumeStorageTest, WriteTest_BlockDevWriteError) { off_t offset = 0; size_t len = 4096; std::unique_ptr data(new char[len]); + FileOut fileOut; EXPECT_CALL(blockDev_, Writev(_)) .WillOnce(Return(-1)); ASSERT_EQ(CURVEFS_ERROR::IO_ERROR, - storage_.Write(ino, offset, len, data.get())); + storage_.Write(ino, offset, len, data.get(), &fileOut)); } TEST_F(DefaultVolumeStorageTest, WriteTest_BlockDevWriteSuccess) { @@ -331,6 +336,7 @@ TEST_F(DefaultVolumeStorageTest, WriteTest_BlockDevWriteSuccess) { off_t offset = 0; size_t len = 4096; std::unique_ptr data(new char[len]); + FileOut fileOut; EXPECT_CALL(blockDev_, Writev(_)) .WillOnce(Return(len)); @@ -338,7 +344,8 @@ TEST_F(DefaultVolumeStorageTest, WriteTest_BlockDevWriteSuccess) { EXPECT_CALL(inodeCacheMgr_, ShipToFlush(inodeWrapper)) .Times(1); - ASSERT_EQ(CURVEFS_ERROR::OK, storage_.Write(ino, offset, len, data.get())); + ASSERT_EQ(CURVEFS_ERROR::OK, + storage_.Write(ino, offset, len, data.get(), &fileOut)); ASSERT_EQ(offset + len, inodeWrapper->GetInode().length()); } diff --git a/curvefs/test/mds/fs_manager_test2.cpp b/curvefs/test/mds/fs_manager_test2.cpp index 08a6d23963..e6a3bfebad 100644 --- a/curvefs/test/mds/fs_manager_test2.cpp +++ b/curvefs/test/mds/fs_manager_test2.cpp @@ -159,7 +159,6 @@ TEST_F(FsManagerTest2, CreateFoundConflictFsNameAndNotIdenticalToPreviousOne) { std::string fsname = "hello"; FSType type = FSType::TYPE_S3; uint64_t blocksize = 4 * 1024; - bool enableSumInDir = false; FsDetail detail; auto* s3Info = detail.mutable_s3info(); s3Info->set_ak("hello"); @@ -247,7 +246,6 @@ TEST_F(FsManagerTest2, CreateFoundUnCompleteOperation) { std::string fsname = "hello"; FSType type = FSType::TYPE_S3; uint64_t blocksize = 4 * 1024; - bool enableSumInDir = false; FsDetail detail; auto* s3Info = detail.mutable_s3info(); s3Info->set_ak("hello"); @@ -330,7 +328,6 @@ TEST_F(FsManagerTest2, createHybridFs) { std::string fsname = "hello"; FSType type = FSType::TYPE_HYBRID; uint64_t blocksize = 4 * 1024; - bool enableSumInDir = false; FsDetail detail; auto* s3Info = detail.mutable_s3info(); s3Info->set_ak("hello"); @@ -339,6 +336,7 @@ TEST_F(FsManagerTest2, createHybridFs) { s3Info->set_bucketname("hello"); s3Info->set_blocksize(4 * 1024); s3Info->set_chunksize(16 * 1024 * 1024); + s3Info->set_objectprefix(0); FsInfo fsinfo; fsinfo.set_status(FsStatus::NEW); diff --git a/curvefs/test/mds/heartbeat/copyset_conf_generator_test.cpp b/curvefs/test/mds/heartbeat/copyset_conf_generator_test.cpp index b82ee520f1..951832a3b3 100644 --- a/curvefs/test/mds/heartbeat/copyset_conf_generator_test.cpp +++ b/curvefs/test/mds/heartbeat/copyset_conf_generator_test.cpp @@ -29,13 +29,13 @@ #include "curvefs/test/mds/mock/mock_coordinator.h" #include "curvefs/test/mds/mock/mock_topology.h" +using ::curvefs::mds::MockCoordinator; +using ::curvefs::mds::topology::CopySetIdType; using ::curvefs::mds::topology::MockIdGenerator; using ::curvefs::mds::topology::MockStorage; using ::curvefs::mds::topology::MockTokenGenerator; using ::curvefs::mds::topology::MockTopology; -using ::curvefs::mds::MockCoordinator; using ::curvefs::mds::topology::TopoStatusCode; -using ::curvefs::mds::topology::CopySetIdType; using ::curvefs::mds::topology::UNINITIALIZE_ID; using ::testing::_; using ::testing::DoAll; @@ -72,15 +72,14 @@ class TestCopysetConfGenerator : public ::testing::Test { }; TEST_F(TestCopysetConfGenerator, get_copyset_fail) { - MetaServerIdType reportId; + MetaServerIdType reportId = 1; PoolIdType poolId = 1; CopySetIdType copysetId = 2; ::curvefs::mds::topology::CopySetInfo reportCopySetInfo(poolId, copysetId); ::curvefs::mds::heartbeat::ConfigChangeInfo configChInfo; ::curvefs::mds::heartbeat::CopySetConf copysetConf; - EXPECT_CALL(*topology_, GetCopySet(_, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*topology_, GetCopySet(_, _)).WillOnce(Return(false)); bool ret = generator_->GenCopysetConf(reportId, reportCopySetInfo, configChInfo, ©setConf); @@ -206,8 +205,8 @@ TEST_F(TestCopysetConfGenerator, get_report_copyset_follower2) { recordCopySetInfo.SetEpoch(3); EXPECT_CALL(*topology_, GetCopySet(_, _)) .WillOnce(DoAll(SetArgPointee<1>(recordCopySetInfo), Return(true))); - ret = generator_->GenCopysetConf(reportId, reportCopySetInfo, - configChInfo, ©setConf); + ret = generator_->GenCopysetConf(reportId, reportCopySetInfo, configChInfo, + ©setConf); ASSERT_FALSE(ret); } @@ -342,8 +341,7 @@ TEST_F(TestCopysetConfGenerator, get_report_copyset_follower7) { EXPECT_CALL(*coordinator_, MetaserverGoingToAdd(_, _)) .WillOnce(Return(false)); - EXPECT_CALL(*topology_, GetMetaServer(_, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*topology_, GetMetaServer(_, _)).WillOnce(Return(false)); sleep(1); bool ret = generator_->GenCopysetConf(reportId, reportCopySetInfo, diff --git a/curvefs/test/mds/mds_test.cpp b/curvefs/test/mds/mds_test.cpp index 6ab8c72c07..33939323ea 100644 --- a/curvefs/test/mds/mds_test.cpp +++ b/curvefs/test/mds/mds_test.cpp @@ -54,8 +54,8 @@ using ::curve::kvstorage::EtcdClientImp; namespace curvefs { namespace mds { -const char* kEtcdAddr = "127.0.0.1:20032"; -const char* kMdsListenAddr = "127.0.0.1:20035"; +const char *kEtcdAddr = "127.0.0.1:20032"; +const char *kMdsListenAddr = "127.0.0.1:20035"; class MdsTest : public ::testing::Test { protected: @@ -63,9 +63,7 @@ class MdsTest : public ::testing::Test { void TearDown() override {} - static void ClearEnv() { - system("rm -rf curve_fs_test_mds.etcd"); - } + static void ClearEnv() { system("rm -rf curve_fs_test_mds.etcd"); } static void StartEtcd() { etcdPid_ = fork(); @@ -88,7 +86,8 @@ class MdsTest : public ::testing::Test { } auto client = std::make_shared(); - EtcdConf conf{const_cast(kEtcdAddr), strlen(kEtcdAddr), 1000}; + EtcdConf conf{const_cast(kEtcdAddr), + static_cast(strlen(kEtcdAddr)), 1000}; uint64_t now = curve::common::TimeUtility::GetTimeofDaySec(); bool initSucc = false; while (curve::common::TimeUtility::GetTimeofDaySec() - now <= 50) { @@ -124,7 +123,7 @@ class MdsTest : public ::testing::Test { pid_t MdsTest::etcdPid_ = 0; void GetChunkIds(std::shared_ptr conf, - int numChunkIds, vector* data) { + int numChunkIds, vector *data) { brpc::Channel channel; std::string allocateServer(kMdsListenAddr); if (channel.Init(allocateServer.c_str(), NULL) != 0) { @@ -134,7 +133,7 @@ void GetChunkIds(std::shared_ptr conf, return; } - brpc::Controller* cntl = new brpc::Controller(); + brpc::Controller *cntl = new brpc::Controller(); AllocateS3ChunkRequest request; AllocateS3ChunkResponse response; curvefs::mds::MdsService_Stub stub(&channel); diff --git a/curvefs/test/mds/schedule/recoverScheduler_test.cpp b/curvefs/test/mds/schedule/recoverScheduler_test.cpp index be780392b3..d48c6a9ee1 100644 --- a/curvefs/test/mds/schedule/recoverScheduler_test.cpp +++ b/curvefs/test/mds/schedule/recoverScheduler_test.cpp @@ -192,7 +192,6 @@ TEST_F(TestRecoverSheduler, test_all_metaServer_online_offline) { MetaServerIdType id1 = 1; MetaServerIdType id2 = 2; MetaServerIdType id3 = 3; - MetaServerIdType id4 = 4; Operator op; EXPECT_CALL(*topoAdapter_, GetAvgScatterWidthInPool(_)) .WillRepeatedly(Return(90)); diff --git a/curvefs/test/mds/topology/test_topology_manager.cpp b/curvefs/test/mds/topology/test_topology_manager.cpp index 0c6e1e6b29..a7794b7011 100644 --- a/curvefs/test/mds/topology/test_topology_manager.cpp +++ b/curvefs/test/mds/topology/test_topology_manager.cpp @@ -652,7 +652,6 @@ TEST_F(TestTopologyManager, test_RegistServer_PoolNotFound) { } TEST_F(TestTopologyManager, test_RegistServer_ZoneNotFound) { - ServerIdType id = 0x31; PoolIdType poolId = 0x11; ZoneIdType zoneId = 0x21; PrepareAddPool(poolId, "pool1"); @@ -674,7 +673,6 @@ TEST_F(TestTopologyManager, test_RegistServer_ZoneNotFound) { } TEST_F(TestTopologyManager, test_RegistServer_AllocateIdFail) { - ServerIdType id = 0x31; PoolIdType poolId = 0x11; ZoneIdType zoneId = 0x21; PrepareAddPool(poolId, "pool"); @@ -1110,7 +1108,6 @@ TEST_F(TestTopologyManager, test_CreateZone_success) { TEST_F(TestTopologyManager, test_CreateZone_AllocateIdFail) { PoolIdType poolId = 0x11; - ZoneIdType zoneId = 0x21; PrepareAddPool(poolId, "poolname1"); CreateZoneRequest request; @@ -1930,8 +1927,6 @@ TEST_F(TestTopologyManager, TEST_F(TestTopologyManager, test_CreatePartitionWithOutAvailableCopyset_HaveNoAvailableMetaserver) { PoolIdType poolId = 0x11; - CopySetIdType copysetId = 0x51; - PartitionIdType partitionId = 0x61; Pool::RedundanceAndPlaceMentPolicy policy; policy.replicaNum = 3; @@ -1969,8 +1964,6 @@ TEST_F(TestTopologyManager, TEST_F(TestTopologyManager, test_CreatePartitionWithOutAvailableCopyset_MetaServerSpaceIsFull) { PoolIdType poolId = 0x11; - CopySetIdType copysetId = 0x51; - PartitionIdType partitionId = 0x61; Pool::RedundanceAndPlaceMentPolicy policy; policy.replicaNum = 3; @@ -2081,8 +2074,6 @@ TEST_F(TestTopologyManager, TEST_F(TestTopologyManager, test_CreatePartitionWithOutAvailableCopyset_HaveOfflineMetaserver1) { PoolIdType poolId = 0x11; - CopySetIdType copysetId = 0x51; - PartitionIdType partitionId = 0x61; Pool::RedundanceAndPlaceMentPolicy policy; policy.replicaNum = 3; @@ -2681,8 +2672,6 @@ TEST_F(TestTopologyManager, test_ListPartitionEmpty_Success) { PoolIdType poolId = 0x11; CopySetIdType copysetId = 0x51; PartitionIdType pId1 = 0x61; - PartitionIdType pId2 = 0x62; - PartitionIdType pId3 = 0x63; Pool::RedundanceAndPlaceMentPolicy policy; policy.replicaNum = 3; @@ -2818,7 +2807,6 @@ TEST_F(TestTopologyManager, test_GetCopysetOfPartition_CopysetNotFound) { } TEST_F(TestTopologyManager, test_GetCopysetMembers_Success) { - FsIdType fsId = 0x01; PoolIdType poolId = 0x11; CopySetIdType copysetId = 0x51; @@ -2893,8 +2881,6 @@ TEST_F(TestTopologyManager, test_RegistMemcacheCluster_AllocateIdFail) { server.set_port(1); *request.add_servers() = server; - MemcacheClusterIdType mcCId(1); - EXPECT_CALL(*idGenerator_, GenMemCacheClusterId()) .WillOnce(Return(UNINITIALIZE_ID)); diff --git a/curvefs/test/metaserver/copyset/apply_queue_test.cpp b/curvefs/test/metaserver/copyset/apply_queue_test.cpp deleted file mode 100644 index b34526a3fe..0000000000 --- a/curvefs/test/metaserver/copyset/apply_queue_test.cpp +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright (c) 2021 NetEase Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Project: curve - * Date: Thu Sep 2 14:49:04 CST 2021 - * Author: wuhanqing - */ - -#include "curvefs/src/metaserver/copyset/apply_queue.h" - -#include - -#include - -#include "src/common/concurrent/count_down_event.h" - -namespace curvefs { -namespace metaserver { -namespace copyset { - -using ::curve::common::CountDownEvent; - -TEST(ApplyQueueTest, StartAndStopTest) { - ApplyQueue applyQueue; - - ApplyQueueOption option; - option.workerCount = 0; - option.queueDepth = 0; - - EXPECT_FALSE(applyQueue.Start(option)); - - option.workerCount = 5; - option.queueDepth = 100 * 100 * 100; - - ASSERT_TRUE(applyQueue.Start(option)); - EXPECT_TRUE(applyQueue.Start(option)); - - std::atomic runned(false); - auto task = [&]() { - runned = true; - }; - - applyQueue.Push(time(nullptr), task); - - while (!runned) {} - - applyQueue.Stop(); - applyQueue.Stop(); -} - -TEST(ApplyQueueTest, FlushTest) { - ApplyQueueOption option; - option.workerCount = 10; - option.queueDepth = 100 * 100; - - ApplyQueue applyQueue; - ASSERT_TRUE(applyQueue.Start(option)); - - int taskCount = option.workerCount * option.queueDepth; - std::atomic runned(0); - - auto task = [&runned]() { runned.fetch_add(1, std::memory_order_relaxed); }; - - for (int i = 0; i < taskCount; ++i) { - applyQueue.Push(i, task); - } - - applyQueue.Flush(); - ASSERT_EQ(taskCount, runned.load(std::memory_order_relaxed)); - applyQueue.Stop(); -} - -} // namespace copyset -} // namespace metaserver -} // namespace curvefs diff --git a/curvefs/test/metaserver/copyset/concurrent_apply_queue_test.cpp b/curvefs/test/metaserver/copyset/concurrent_apply_queue_test.cpp new file mode 100644 index 0000000000..28763dd63a --- /dev/null +++ b/curvefs/test/metaserver/copyset/concurrent_apply_queue_test.cpp @@ -0,0 +1,222 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: curve + * File Created: 20230521 + * Author: Xinlong-Chen + */ + +#include + +#include +#include +#include + +#include "src/common/timeutility.h" +#include "curvefs/src/metaserver/copyset/concurrent_apply_queue.h" + +using curvefs::metaserver::copyset::ApplyQueue; +using curvefs::metaserver::copyset::ApplyOption; +using curvefs::metaserver::copyset::OperatorType; + +TEST(ApplyQueue, InitTest) { + ApplyQueue concurrentapply; + + { + // 1. init with invalid write-concurrentsize + ApplyOption opt(-1, 1, 1, 1); + ASSERT_FALSE(concurrentapply.Init(opt)); + } + + { + // 2. init with invalid write-concurrentdepth + ApplyOption opt(1, -1, 1, 1); + ASSERT_FALSE(concurrentapply.Init(opt)); + } + + { + // 3. init with invalid read-concurrentsize + ApplyOption opt(1, 1, -1, 1); + ASSERT_FALSE(concurrentapply.Init(opt)); + } + + { + // 4. init with invalid read-concurrentdepth + ApplyOption opt(1, 1, 1, -1); + ASSERT_FALSE(concurrentapply.Init(opt)); + } + + { + // 5. double init + ApplyOption opt(1, 1, 1, 1); + // init with vaild params + ASSERT_TRUE(concurrentapply.Init(opt)); + // re-init + ASSERT_TRUE(concurrentapply.Init(opt)); + } + + concurrentapply.Stop(); + // re-stop + concurrentapply.Stop(); +} + +static void InitReadWriteTypeList(std::vector *readTypeList, + std::vector *writeTypeList) { + *readTypeList = { + OperatorType::GetDentry, + OperatorType::ListDentry, + OperatorType::GetInode, + OperatorType::BatchGetInodeAttr, + OperatorType::BatchGetXAttr, + OperatorType::GetVolumeExtent + }; + + auto IsRead = [&readTypeList](OperatorType type) -> bool { + for (auto &readType : *readTypeList) { + if (type == readType) { + return true; + } + } + return false; + }; + + for (uint32_t i = 0; i < static_cast(OperatorType::OperatorTypeMax); ++i) { // NOLINT + if (!IsRead(static_cast(i))) { + writeTypeList->push_back(static_cast(i)); + } + } +} + +OperatorType get_random_type(const std::vector &type_list) { + return type_list[rand() % type_list.size()]; +} + +TEST(ApplyQueue, RunTest) { + std::vector readTypeList; + std::vector writeTypeList; + InitReadWriteTypeList(&readTypeList, &writeTypeList); + + auto read_type = get_random_type(readTypeList); + auto write_type = get_random_type(writeTypeList); + + ApplyQueue concurrentapply; + + int testw = 0; + auto wtask = [&testw]() { + std::this_thread::sleep_for(std::chrono::milliseconds(500)); + testw++; + }; + + int testr = 0; + auto rtask = [&testr]() { + testr++; + }; + + // push write and read tasks + ApplyOption opt(1, 1, 1, 1); + + ASSERT_TRUE(concurrentapply.Init(opt)); + + ASSERT_TRUE(concurrentapply.Push(1, read_type, rtask)); + ASSERT_TRUE(concurrentapply.Push(1, write_type, wtask)); + + std::this_thread::sleep_for(std::chrono::milliseconds(200)); + ASSERT_EQ(1, testr); + ASSERT_EQ(0, testw); + concurrentapply.Flush(); + ASSERT_EQ(1, testw); + + concurrentapply.Stop(); +} + +TEST(ApplyQueue, FlushTest) { + std::vector readTypeList; + std::vector writeTypeList; + InitReadWriteTypeList(&readTypeList, &writeTypeList); + + ApplyQueue concurrentapply; + ApplyOption opt(2, 5000, 1, 1); + ASSERT_TRUE(concurrentapply.Init(opt)); + + std::atomic testnum(0); + auto task = [&testnum]() { + testnum.fetch_add(1); + }; + + for (int i = 0; i < 5000; i++) { + auto write_type = get_random_type(writeTypeList); + concurrentapply.Push(i, write_type, task); + } + + ASSERT_LE(testnum, 5000); + concurrentapply.Flush(); + ASSERT_EQ(5000, testnum); + + concurrentapply.Stop(); +} + +TEST(ApplyQueue, ConcurrentTest) { + std::vector readTypeList; + std::vector writeTypeList; + InitReadWriteTypeList(&readTypeList, &writeTypeList); + + // interval flush when push + std::atomic stop(false); + std::atomic testnum(0); + ApplyQueue concurrentapply; + ApplyOption opt(10, 1, 5, 2); + ASSERT_TRUE(concurrentapply.Init(opt)); + + auto push = [&concurrentapply, &stop, &testnum, + &readTypeList, &writeTypeList]() { + auto task = [&testnum]() { + testnum.fetch_add(1); + }; + while (!stop.load()) { + for (int i = 0; i < 10; i++) { + auto read_type = get_random_type(readTypeList); + auto write_type = get_random_type(writeTypeList); + concurrentapply.Push(i, read_type, task); + concurrentapply.Push(i, write_type, task); + } + } + }; + + auto flush = [&concurrentapply, &stop, &testnum]() { + while (!stop.load()) { + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + concurrentapply.Flush(); + } + }; + + std::thread t(push); + std::thread f(flush); + + while (testnum.load() <= 1000000) { + } + + stop.store(true); + + std::cout << "thread exit, join" << std::endl; + t.join(); + f.join(); + + concurrentapply.Flush(); + ASSERT_GT(testnum, 1000000); + concurrentapply.Stop(); +} + diff --git a/curvefs/test/metaserver/copyset/copyset_node_test.cpp b/curvefs/test/metaserver/copyset/copyset_node_test.cpp index 85fe877a46..bb7e54dc44 100644 --- a/curvefs/test/metaserver/copyset/copyset_node_test.cpp +++ b/curvefs/test/metaserver/copyset/copyset_node_test.cpp @@ -111,8 +111,10 @@ TEST_F(CopysetNodeTest, TestInit) { // apply queue init failed { CopysetNodeOptions options = options_; - options.applyQueueOption.queueDepth = 0; - options.applyQueueOption.workerCount = 0; + options.applyQueueOption.wconcurrentsize = 0; + options.applyQueueOption.wqueuedepth = 0; + options.applyQueueOption.rconcurrentsize = 0; + options.applyQueueOption.rqueuedepth = 0; options.dataUri = "local:///mnt/data"; options.localFileSystem = &mockfs_; diff --git a/curvefs/test/metaserver/copyset/meta_operator_test.cpp b/curvefs/test/metaserver/copyset/meta_operator_test.cpp index 7f4876e19f..1f7ad131ef 100644 --- a/curvefs/test/metaserver/copyset/meta_operator_test.cpp +++ b/curvefs/test/metaserver/copyset/meta_operator_test.cpp @@ -468,6 +468,17 @@ TEST_F(MetaOperatorTest, PropostTest_PropostTaskFailed) { CopysetNode node(poolId, copysetId, conf, &mockNodeManager_); + curve::fs::MockLocalFileSystem localFs; + CopysetNodeOptions options; + options.dataUri = "local:///mnt/data"; + options.localFileSystem = &localFs; + options.storageOptions.type = "memory"; + + EXPECT_CALL(localFs, Mkdir(_)) + .WillOnce(Return(0)); + + EXPECT_TRUE(node.Init(options)); + node.on_leader_start(1); node.UpdateAppliedIndex(101); diff --git a/curvefs/test/metaserver/heartbeat_test.cpp b/curvefs/test/metaserver/heartbeat_test.cpp index 9d7ec69b0c..6e67841521 100644 --- a/curvefs/test/metaserver/heartbeat_test.cpp +++ b/curvefs/test/metaserver/heartbeat_test.cpp @@ -120,6 +120,8 @@ TEST_F(HeartbeatTest, test1) { options.copysetNodeManager = &CopysetNodeManager::GetInstance(); options.storeUri = "local://./metaserver_data/copysets"; options.fs = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); + options.resourceCollector = absl::make_unique( + 0, 0, options_.dataDir).get(); // mds service not start ASSERT_EQ(heartbeat.Init(options), 0); diff --git a/curvefs/test/metaserver/inode_manager_test.cpp b/curvefs/test/metaserver/inode_manager_test.cpp index 9faa65e68f..e1808c45bd 100644 --- a/curvefs/test/metaserver/inode_manager_test.cpp +++ b/curvefs/test/metaserver/inode_manager_test.cpp @@ -63,7 +63,6 @@ auto localfs = curve::fs::Ext4FileSystemImpl::getInstance(); class InodeManagerTest : public ::testing::Test { protected: void SetUp() override { - auto tablename = "partition:1"; dataDir_ = RandomStoragePath(); StorageOptions options; options.dataDir = dataDir_; @@ -415,7 +414,6 @@ TEST_F(InodeManagerTest, GetOrModifyS3ChunkInfo) { TEST_F(InodeManagerTest, UpdateInode) { // create inode - uint32_t fsId = 1; uint64_t ino = 2; Inode inode; diff --git a/curvefs/test/metaserver/metaserver_s3_adaptor_test.cpp b/curvefs/test/metaserver/metaserver_s3_adaptor_test.cpp index 9d21562c86..78f841178b 100644 --- a/curvefs/test/metaserver/metaserver_s3_adaptor_test.cpp +++ b/curvefs/test/metaserver/metaserver_s3_adaptor_test.cpp @@ -35,6 +35,7 @@ class MetaserverS3AdaptorTest : public testing::Test { option.blockSize = 1 * 1024 * 1024; option.chunkSize = 4 * 1024 * 1024; option.batchSize = 5; + option.objectPrefix = 0; option.enableDeleteObjects = false; mockMetaserverS3Client_ = new MockS3Client(); metaserverS3ClientAdaptor_ = new S3ClientAdaptorImpl(); @@ -206,6 +207,7 @@ TEST_F(MetaserverS3AdaptorTest, test_delete_batch_chunks) { option.blockSize = 1 * 1024 * 1024; option.chunkSize = 4 * 1024 * 1024; option.batchSize = 5; + option.objectPrefix = 0; option.enableDeleteObjects = true; metaserverS3ClientAdaptor_->Init(option, mockMetaserverS3Client_); @@ -234,6 +236,7 @@ TEST_F(MetaserverS3AdaptorTest, test_delete_batch_idempotence) { option.blockSize = 1 * 1024 * 1024; option.chunkSize = 4 * 1024 * 1024; option.batchSize = 5; + option.objectPrefix = 0; option.enableDeleteObjects = true; metaserverS3ClientAdaptor_->Init(option, mockMetaserverS3Client_); @@ -279,6 +282,7 @@ TEST_F(MetaserverS3AdaptorTest, test_delete_batch_deleted) { option.blockSize = 1 * 1024 * 1024; option.chunkSize = 4 * 1024 * 1024; option.batchSize = 5; + option.objectPrefix = 0; option.enableDeleteObjects = true; metaserverS3ClientAdaptor_->Init(option, mockMetaserverS3Client_); diff --git a/curvefs/test/metaserver/metastore_test.cpp b/curvefs/test/metaserver/metastore_test.cpp index e1083af2c5..fbb41016b6 100644 --- a/curvefs/test/metaserver/metastore_test.cpp +++ b/curvefs/test/metaserver/metastore_test.cpp @@ -1645,7 +1645,7 @@ TEST_F(MetastoreTest, GetInodeWithPaddingS3Meta) { request.set_mode(777); request.set_type(FsFileType::TYPE_FILE); - auto rc = metastore.CreateInode(&request, &response); + (void)metastore.CreateInode(&request, &response); ASSERT_EQ(response.statuscode(), MetaStatusCode::OK); inodeId = response.inode().inodeid(); } diff --git a/curvefs/test/metaserver/recycle_cleaner_test.cpp b/curvefs/test/metaserver/recycle_cleaner_test.cpp index dce73d392e..ab8cdc1f3b 100644 --- a/curvefs/test/metaserver/recycle_cleaner_test.cpp +++ b/curvefs/test/metaserver/recycle_cleaner_test.cpp @@ -154,7 +154,7 @@ TEST_F(RecycleCleanerTest, time_func_test) { struct tm tmDir; memset(&tmDir, 0, sizeof(tmDir)); - char* c = strptime(now, "%Y-%m-%d-%H", &tmDir); + (void)strptime(now, "%Y-%m-%d-%H", &tmDir); time_t dirTime = mktime(&tmDir); LOG(INFO) << "befor, time = " << timeStamp; diff --git a/curvefs/test/metaserver/s3compact/s3compact_test.cpp b/curvefs/test/metaserver/s3compact/s3compact_test.cpp index 99c37e9ce1..f0b7fd1896 100644 --- a/curvefs/test/metaserver/s3compact/s3compact_test.cpp +++ b/curvefs/test/metaserver/s3compact/s3compact_test.cpp @@ -259,7 +259,7 @@ TEST_F(S3CompactTest, test_GetNeedCompact) { ref->set_offset(i + 64 * j); ref->set_len(1); } - s3chunkinfoMap.insert({j, l}); + s3chunkinfoMap.insert({static_cast(j), l}); } ASSERT_EQ(impl_->GetNeedCompact(s3chunkinfoMap, 64 * 19 + 30, 64).size(), opts_.maxChunksPerCompact); @@ -484,7 +484,7 @@ TEST_F(S3CompactTest, test_ReadFullChunk) { int ret; std::list validList; struct CompactInodeJob::S3CompactCtx ctx { - 1, 1, PartitionInfo(), 4, 64, 0, s3adapter_.get() + 1, 1, PartitionInfo(), 4, 64, 0, 0, s3adapter_.get() }; struct CompactInodeJob::S3NewChunkInfo newChunkInfo; std::string fullChunk; @@ -529,7 +529,7 @@ TEST_F(S3CompactTest, test_ReadFullChunk) { TEST_F(S3CompactTest, test_WriteFullChunk) { struct CompactInodeJob::S3CompactCtx ctx { - 100, 1, PartitionInfo(), 4, 16, 0, s3adapter_.get() + 100, 1, PartitionInfo(), 4, 16, 0, 0, s3adapter_.get() }; struct CompactInodeJob::S3NewChunkInfo newChunkInfo { 2, 0, 3 @@ -595,6 +595,7 @@ TEST_F(S3CompactTest, test_CompactChunks) { s3info->set_bucketname("4"); s3info->set_blocksize(blockSize); s3info->set_chunksize(chunkSize); + s3info->set_objectprefix(0); return 0; }; diff --git a/curvefs/test/metaserver/storage/storage_test.cpp b/curvefs/test/metaserver/storage/storage_test.cpp index 34a82bd924..d47f41a1ee 100644 --- a/curvefs/test/metaserver/storage/storage_test.cpp +++ b/curvefs/test/metaserver/storage/storage_test.cpp @@ -845,7 +845,6 @@ void TestMixOperator(std::shared_ptr kvStorage) { void TestTransaction(std::shared_ptr kvStorage) { Status s; - size_t size; Dentry value; std::shared_ptr iterator; std::shared_ptr txn; diff --git a/curvefs_python/HOWTO.md b/curvefs_python/HOWTO.md new file mode 100644 index 0000000000..6ed054234a --- /dev/null +++ b/curvefs_python/HOWTO.md @@ -0,0 +1,5 @@ +1. `curvefs.py` and `curvefs_wrap.cxx` are generated by command `swig -c++ -python curvefs.i`. +2. Revert `ListDir` and Delete `Opendir`/`Closedir` functions in `curvefs.py` if you don intend to make changes about these functions. +3. Revert `_wrap_Read`/`_wrap_Listdir`/`_wrap_GetClusterId`/`_wrap_CBDClient_Read`/`_wrap_CBDClient_Listdir` if you don intend to make changes about these functions. +4. :exclamation: C functions in `libcurvefs.h` are not recommended for use anymore. +5. :exclamation: Types in `curve_type.h` are different from `include/client/*.h` even they have the similar name. diff --git a/curvefs_python/cbd_client.cpp b/curvefs_python/cbd_client.cpp index 583e67ca8c..2d0d3da6a9 100644 --- a/curvefs_python/cbd_client.cpp +++ b/curvefs_python/cbd_client.cpp @@ -26,9 +26,12 @@ #include "src/client/libcurve_file.h" -inline curve::client::UserInfo ToCurveClientUserInfo(UserInfo_t* userInfo) { +namespace { +inline curve::client::UserInfo ToCurveClientUserInfo( + const UserInfo_t* userInfo) { return curve::client::UserInfo(userInfo->owner, userInfo->password); } +} CBDClient::CBDClient() : client_(new curve::client::FileClient()) {} @@ -54,10 +57,17 @@ int CBDClient::Create(const char* filename, UserInfo_t* userInfo, size_t size) { return client_->Create(filename, ToCurveClientUserInfo(userInfo), size); } -int CBDClient::Create2(const char* filename, UserInfo_t* userInfo, size_t size, - uint64_t stripeUnit, uint64_t stripeCount) { - return client_->Create2(filename, ToCurveClientUserInfo(userInfo), - size, stripeUnit, stripeCount); +int CBDClient::Create2(const CreateContext* context) { + curve::client::CreateFileContext internal; + internal.pagefile = true; + internal.name = context->name; + internal.user = ToCurveClientUserInfo(&context->user); + internal.length = context->length; + internal.poolset = context->poolset; + internal.stripeUnit = context->stripeUnit; + internal.stripeCount = context->stripeCount; + + return client_->Create2(internal); } int CBDClient::Unlink(const char* filename, UserInfo_t* userInfo) { @@ -166,3 +176,7 @@ int CBDClient::Rmdir(const char* dirpath, UserInfo_t* userInfo) { std::string CBDClient::GetClusterId() { return client_->GetClusterId(); } + +std::vector CBDClient::ListPoolset() { + return client_->ListPoolset(); +} diff --git a/curvefs_python/cbd_client.h b/curvefs_python/cbd_client.h index 829c01a6de..64109ef8e5 100644 --- a/curvefs_python/cbd_client.h +++ b/curvefs_python/cbd_client.h @@ -25,6 +25,7 @@ #include #include +#include #include "curvefs_python/curve_type.h" @@ -48,8 +49,7 @@ class CBDClient { int Close(int fd); int Create(const char* filename, UserInfo_t* userInfo, size_t size); - int Create2(const char* filename, UserInfo_t* userInfo, size_t size, - uint64_t stripeUnit, uint64_t stripeCount); + int Create2(const CreateContext* context); int Unlink(const char* filename, UserInfo_t* info); int DeleteForce(const char* filename, UserInfo_t* info); int Recover(const char* filename, UserInfo_t* info, uint64_t fileId); @@ -76,6 +76,8 @@ class CBDClient { std::string GetClusterId(); + std::vector ListPoolset(); + private: std::unique_ptr client_; }; diff --git a/curvefs_python/curve_type.h b/curvefs_python/curve_type.h index be4353c184..d6603e238d 100644 --- a/curvefs_python/curve_type.h +++ b/curvefs_python/curve_type.h @@ -27,6 +27,8 @@ #include #include +#include + #define CURVE_INODE_DIRECTORY 0 #define CURVE_INODE_PAGEFILE 1 #define CURVEINODE_APPENDFILE 2 @@ -134,4 +136,13 @@ typedef struct DirInfos { FileInfo_t* fileinfo; } DirInfos_t; +struct CreateContext { + std::string name; + size_t length; + UserInfo user; + std::string poolset; + uint64_t stripeUnit; + uint64_t stripeCount; +}; + #endif // CURVEFS_PYTHON_CURVE_TYPE_H_ diff --git a/curvefs_python/curvefs.py b/curvefs_python/curvefs.py index 464b1a8b7c..4b58a1244e 100644 --- a/curvefs_python/curvefs.py +++ b/curvefs_python/curvefs.py @@ -313,6 +313,48 @@ def __init__(self): DirInfos_t_swigregister = _curvefs.DirInfos_t_swigregister DirInfos_t_swigregister(DirInfos_t) +class CreateContext(_object): + __swig_setmethods__ = {} + __setattr__ = lambda self, name, value: _swig_setattr(self, CreateContext, name, value) + __swig_getmethods__ = {} + __getattr__ = lambda self, name: _swig_getattr(self, CreateContext, name) + __repr__ = _swig_repr + __swig_setmethods__["name"] = _curvefs.CreateContext_name_set + __swig_getmethods__["name"] = _curvefs.CreateContext_name_get + if _newclass: + name = _swig_property(_curvefs.CreateContext_name_get, _curvefs.CreateContext_name_set) + __swig_setmethods__["length"] = _curvefs.CreateContext_length_set + __swig_getmethods__["length"] = _curvefs.CreateContext_length_get + if _newclass: + length = _swig_property(_curvefs.CreateContext_length_get, _curvefs.CreateContext_length_set) + __swig_setmethods__["user"] = _curvefs.CreateContext_user_set + __swig_getmethods__["user"] = _curvefs.CreateContext_user_get + if _newclass: + user = _swig_property(_curvefs.CreateContext_user_get, _curvefs.CreateContext_user_set) + __swig_setmethods__["poolset"] = _curvefs.CreateContext_poolset_set + __swig_getmethods__["poolset"] = _curvefs.CreateContext_poolset_get + if _newclass: + poolset = _swig_property(_curvefs.CreateContext_poolset_get, _curvefs.CreateContext_poolset_set) + __swig_setmethods__["stripeUnit"] = _curvefs.CreateContext_stripeUnit_set + __swig_getmethods__["stripeUnit"] = _curvefs.CreateContext_stripeUnit_get + if _newclass: + stripeUnit = _swig_property(_curvefs.CreateContext_stripeUnit_get, _curvefs.CreateContext_stripeUnit_set) + __swig_setmethods__["stripeCount"] = _curvefs.CreateContext_stripeCount_set + __swig_getmethods__["stripeCount"] = _curvefs.CreateContext_stripeCount_get + if _newclass: + stripeCount = _swig_property(_curvefs.CreateContext_stripeCount_get, _curvefs.CreateContext_stripeCount_set) + + def __init__(self): + this = _curvefs.new_CreateContext() + try: + self.this.append(this) + except __builtin__.Exception: + self.this = this + __swig_destroy__ = _curvefs.delete_CreateContext + __del__ = lambda self: None +CreateContext_swigregister = _curvefs.CreateContext_swigregister +CreateContext_swigregister(CreateContext) + def Init(path): return _curvefs.Init(path) @@ -374,14 +416,14 @@ def Unlink(filename, info): return _curvefs.Unlink(filename, info) Unlink = _curvefs.Unlink -def DeleteForce(filename, info): - return _curvefs.DeleteForce(filename, info) -DeleteForce = _curvefs.DeleteForce - def Recover(filename, info, fileId): return _curvefs.Recover(filename, info, fileId) Recover = _curvefs.Recover +def DeleteForce(filename, info): + return _curvefs.DeleteForce(filename, info) +DeleteForce = _curvefs.DeleteForce + def Listdir(dirpath, info): return _curvefs.Listdir(dirpath, info) Listdir = _curvefs.Listdir @@ -432,8 +474,8 @@ def Close(self, fd): def Create(self, filename, userInfo, size): return _curvefs.CBDClient_Create(self, filename, userInfo, size) - def Create2(self, filename, userInfo, size, stripeUnit, stripeCount): - return _curvefs.CBDClient_Create2(self, filename, userInfo, size, stripeUnit, stripeCount) + def Create2(self, context): + return _curvefs.CBDClient_Create2(self, context) def Unlink(self, filename, info): return _curvefs.CBDClient_Unlink(self, filename, info) @@ -479,6 +521,9 @@ def Rmdir(self, dirpath, info): def GetClusterId(self): return _curvefs.CBDClient_GetClusterId(self) + + def ListPoolset(self): + return _curvefs.CBDClient_ListPoolset(self) CBDClient_swigregister = _curvefs.CBDClient_swigregister CBDClient_swigregister(CBDClient) diff --git a/curvefs_python/curvefs_wrap.cxx b/curvefs_python/curvefs_wrap.cxx index 190b1a5c57..018161855f 100644 --- a/curvefs_python/curvefs_wrap.cxx +++ b/curvefs_python/curvefs_wrap.cxx @@ -3022,22 +3022,24 @@ SWIG_Python_NonDynamicSetAttr(PyObject *obj, PyObject *name, PyObject *value) { #define SWIGTYPE_p_AioContext swig_types[0] #define SWIGTYPE_p_CBDClient swig_types[1] -#define SWIGTYPE_p_DirInfos swig_types[2] -#define SWIGTYPE_p_FileInfo swig_types[3] -#define SWIGTYPE_p_UserInfo swig_types[4] -#define SWIGTYPE_p_char swig_types[5] -#define SWIGTYPE_p_f_p_AioContext__void swig_types[6] -#define SWIGTYPE_p_int swig_types[7] -#define SWIGTYPE_p_long_long swig_types[8] -#define SWIGTYPE_p_short swig_types[9] -#define SWIGTYPE_p_signed_char swig_types[10] -#define SWIGTYPE_p_unsigned_char swig_types[11] -#define SWIGTYPE_p_unsigned_int swig_types[12] -#define SWIGTYPE_p_unsigned_long_long swig_types[13] -#define SWIGTYPE_p_unsigned_short swig_types[14] -#define SWIGTYPE_p_void swig_types[15] -static swig_type_info *swig_types[17]; -static swig_module_info swig_module = {swig_types, 16, 0, 0, 0, 0}; +#define SWIGTYPE_p_CreateContext swig_types[2] +#define SWIGTYPE_p_DirInfos swig_types[3] +#define SWIGTYPE_p_FileInfo swig_types[4] +#define SWIGTYPE_p_UserInfo swig_types[5] +#define SWIGTYPE_p_char swig_types[6] +#define SWIGTYPE_p_f_p_AioContext__void swig_types[7] +#define SWIGTYPE_p_int swig_types[8] +#define SWIGTYPE_p_long_long swig_types[9] +#define SWIGTYPE_p_short swig_types[10] +#define SWIGTYPE_p_signed_char swig_types[11] +#define SWIGTYPE_p_std__vectorT_std__string_t swig_types[12] +#define SWIGTYPE_p_unsigned_char swig_types[13] +#define SWIGTYPE_p_unsigned_int swig_types[14] +#define SWIGTYPE_p_unsigned_long_long swig_types[15] +#define SWIGTYPE_p_unsigned_short swig_types[16] +#define SWIGTYPE_p_void swig_types[17] +static swig_type_info *swig_types[19]; +static swig_module_info swig_module = {swig_types, 18, 0, 0, 0, 0}; #define SWIG_TypeQuery(name) SWIG_TypeQueryModule(&swig_module, &swig_module, name) #define SWIG_MangledTypeQuery(name) SWIG_MangledTypeQueryModule(&swig_module, &swig_module, name) @@ -3615,36 +3617,51 @@ SWIG_From_unsigned_SS_long_SS_long (unsigned long long value) #endif + + + +SWIGINTERNINLINE PyObject * +SWIG_FromCharPtr(const char *cptr) +{ + return SWIG_FromCharPtrAndSize(cptr, (cptr ? strlen(cptr) : 0)); +} + + SWIGINTERN int -SWIG_AsVal_unsigned_SS_int (PyObject * obj, unsigned int *val) +SWIG_AsPtr_std_string (PyObject * obj, std::string **val) { - unsigned long v; - int res = SWIG_AsVal_unsigned_SS_long (obj, &v); - if (SWIG_IsOK(res)) { - if ((v > UINT_MAX)) { - return SWIG_OverflowError; + char* buf = 0 ; size_t size = 0; int alloc = SWIG_OLDOBJ; + if (SWIG_IsOK((SWIG_AsCharPtrAndSize(obj, &buf, &size, &alloc)))) { + if (buf) { + if (val) *val = new std::string(buf, size - 1); + if (alloc == SWIG_NEWOBJ) delete[] buf; + return SWIG_NEWOBJ; } else { - if (val) *val = static_cast< unsigned int >(v); + if (val) *val = 0; + return SWIG_OLDOBJ; + } + } else { + static int init = 0; + static swig_type_info* descriptor = 0; + if (!init) { + descriptor = SWIG_TypeQuery("std::string" " *"); + init = 1; + } + if (descriptor) { + std::string *vptr; + int res = SWIG_ConvertPtr(obj, (void**)&vptr, descriptor, 0); + if (SWIG_IsOK(res) && val) *val = vptr; + return res; } } - return res; + return SWIG_ERROR; } -SWIGINTERNINLINE PyObject* - SWIG_From_unsigned_SS_int (unsigned int value) +SWIGINTERNINLINE PyObject * +SWIG_From_std_string (const std::string& s) { - return PyInt_FromSize_t((size_t) value); -} - - - - - -SWIGINTERNINLINE PyObject * -SWIG_FromCharPtr(const char *cptr) -{ - return SWIG_FromCharPtrAndSize(cptr, (cptr ? strlen(cptr) : 0)); + return SWIG_FromCharPtrAndSize(s.data(), s.size()); } @@ -3670,9 +3687,18 @@ SWIG_AsVal_size_t (PyObject * obj, size_t *val) SWIGINTERNINLINE PyObject * -SWIG_From_std_string (const std::string& s) -{ - return SWIG_FromCharPtrAndSize(s.data(), s.size()); +SWIG_From_size_t (size_t value) +{ +#ifdef SWIG_LONG_LONG_AVAILABLE + if (sizeof(size_t) <= sizeof(unsigned long)) { +#endif + return SWIG_From_unsigned_SS_long (static_cast< unsigned long >(value)); +#ifdef SWIG_LONG_LONG_AVAILABLE + } else { + /* assume sizeof(size_t) <= sizeof(unsigned long long) */ + return SWIG_From_unsigned_SS_long_SS_long (static_cast< unsigned long long >(value)); + } +#endif } #ifdef __cplusplus @@ -4602,11 +4628,11 @@ SWIGINTERN PyObject *_wrap_FileInfo_t_fileStatus_get(PyObject *SWIGUNUSEDPARM(se int res1 = 0 ; PyObject * obj0 = 0 ; int result; - + if (!PyArg_ParseTuple(args,(char *)"O:FileInfo_t_fileStatus_get",&obj0)) SWIG_fail; res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_FileInfo, 0 | 0 ); if (!SWIG_IsOK(res1)) { - SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "FileInfo_t_fileStatus_get" "', argument " "1"" of type '" "FileInfo *""'"); + SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "FileInfo_t_fileStatus_get" "', argument " "1"" of type '" "FileInfo *""'"); } arg1 = reinterpret_cast< FileInfo * >(argp1); result = (int) ((arg1)->fileStatus); @@ -4620,25 +4646,25 @@ SWIGINTERN PyObject *_wrap_FileInfo_t_fileStatus_get(PyObject *SWIGUNUSEDPARM(se SWIGINTERN PyObject *_wrap_FileInfo_t_stripeUnit_set(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { PyObject *resultobj = 0; FileInfo *arg1 = (FileInfo *) 0 ; - uint32_t arg2 ; + uint64_t arg2 ; void *argp1 = 0 ; int res1 = 0 ; - unsigned int val2 ; + unsigned long long val2 ; int ecode2 = 0 ; PyObject * obj0 = 0 ; PyObject * obj1 = 0 ; - + if (!PyArg_ParseTuple(args,(char *)"OO:FileInfo_t_stripeUnit_set",&obj0,&obj1)) SWIG_fail; res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_FileInfo, 0 | 0 ); if (!SWIG_IsOK(res1)) { - SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "FileInfo_t_stripeUnit_set" "', argument " "1"" of type '" "FileInfo *""'"); + SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "FileInfo_t_stripeUnit_set" "', argument " "1"" of type '" "FileInfo *""'"); } arg1 = reinterpret_cast< FileInfo * >(argp1); - ecode2 = SWIG_AsVal_unsigned_SS_int(obj1, &val2); + ecode2 = SWIG_AsVal_unsigned_SS_long_SS_long(obj1, &val2); if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "FileInfo_t_stripeUnit_set" "', argument " "2"" of type '" "uint32_t""'"); - } - arg2 = static_cast< uint32_t >(val2); + SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "FileInfo_t_stripeUnit_set" "', argument " "2"" of type '" "uint64_t""'"); + } + arg2 = static_cast< uint64_t >(val2); if (arg1) (arg1)->stripeUnit = arg2; resultobj = SWIG_Py_Void(); return resultobj; @@ -4653,16 +4679,16 @@ SWIGINTERN PyObject *_wrap_FileInfo_t_stripeUnit_get(PyObject *SWIGUNUSEDPARM(se void *argp1 = 0 ; int res1 = 0 ; PyObject * obj0 = 0 ; - uint32_t result; - + uint64_t result; + if (!PyArg_ParseTuple(args,(char *)"O:FileInfo_t_stripeUnit_get",&obj0)) SWIG_fail; res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_FileInfo, 0 | 0 ); if (!SWIG_IsOK(res1)) { - SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "FileInfo_t_stripeUnit_get" "', argument " "1"" of type '" "FileInfo *""'"); + SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "FileInfo_t_stripeUnit_get" "', argument " "1"" of type '" "FileInfo *""'"); } arg1 = reinterpret_cast< FileInfo * >(argp1); result = (uint64_t) ((arg1)->stripeUnit); - resultobj = SWIG_From_unsigned_SS_int(static_cast< unsigned int >(result)); + resultobj = SWIG_From_unsigned_SS_long_SS_long(static_cast< unsigned long long >(result)); return resultobj; fail: return NULL; @@ -4672,25 +4698,25 @@ SWIGINTERN PyObject *_wrap_FileInfo_t_stripeUnit_get(PyObject *SWIGUNUSEDPARM(se SWIGINTERN PyObject *_wrap_FileInfo_t_stripeCount_set(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { PyObject *resultobj = 0; FileInfo *arg1 = (FileInfo *) 0 ; - uint32_t arg2 ; + uint64_t arg2 ; void *argp1 = 0 ; int res1 = 0 ; - unsigned int val2 ; + unsigned long long val2 ; int ecode2 = 0 ; PyObject * obj0 = 0 ; PyObject * obj1 = 0 ; - + if (!PyArg_ParseTuple(args,(char *)"OO:FileInfo_t_stripeCount_set",&obj0,&obj1)) SWIG_fail; res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_FileInfo, 0 | 0 ); if (!SWIG_IsOK(res1)) { - SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "FileInfo_t_stripeCount_set" "', argument " "1"" of type '" "FileInfo *""'"); + SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "FileInfo_t_stripeCount_set" "', argument " "1"" of type '" "FileInfo *""'"); } arg1 = reinterpret_cast< FileInfo * >(argp1); - ecode2 = SWIG_AsVal_unsigned_SS_int(obj1, &val2); + ecode2 = SWIG_AsVal_unsigned_SS_long_SS_long(obj1, &val2); if (!SWIG_IsOK(ecode2)) { - SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "FileInfo_t_stripeCount_set" "', argument " "2"" of type '" "uint32_t""'"); - } - arg2 = static_cast< uint32_t >(val2); + SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "FileInfo_t_stripeCount_set" "', argument " "2"" of type '" "uint64_t""'"); + } + arg2 = static_cast< uint64_t >(val2); if (arg1) (arg1)->stripeCount = arg2; resultobj = SWIG_Py_Void(); return resultobj; @@ -4705,16 +4731,16 @@ SWIGINTERN PyObject *_wrap_FileInfo_t_stripeCount_get(PyObject *SWIGUNUSEDPARM(s void *argp1 = 0 ; int res1 = 0 ; PyObject * obj0 = 0 ; - uint32_t result; - + uint64_t result; + if (!PyArg_ParseTuple(args,(char *)"O:FileInfo_t_stripeCount_get",&obj0)) SWIG_fail; res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_FileInfo, 0 | 0 ); if (!SWIG_IsOK(res1)) { - SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "FileInfo_t_stripeCount_get" "', argument " "1"" of type '" "FileInfo *""'"); + SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "FileInfo_t_stripeCount_get" "', argument " "1"" of type '" "FileInfo *""'"); } arg1 = reinterpret_cast< FileInfo * >(argp1); result = (uint64_t) ((arg1)->stripeCount); - resultobj = SWIG_From_unsigned_SS_int(static_cast< unsigned int >(result)); + resultobj = SWIG_From_unsigned_SS_long_SS_long(static_cast< unsigned long long >(result)); return resultobj; fail: return NULL; @@ -5020,6 +5046,373 @@ SWIGINTERN PyObject *DirInfos_t_swigregister(PyObject *SWIGUNUSEDPARM(self), PyO return SWIG_Py_Void(); } +SWIGINTERN PyObject *_wrap_CreateContext_name_set(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + CreateContext *arg1 = (CreateContext *) 0 ; + std::string *arg2 = 0 ; + void *argp1 = 0 ; + int res1 = 0 ; + int res2 = SWIG_OLDOBJ ; + PyObject * obj0 = 0 ; + PyObject * obj1 = 0 ; + + if (!PyArg_ParseTuple(args,(char *)"OO:CreateContext_name_set",&obj0,&obj1)) SWIG_fail; + res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_CreateContext, 0 | 0 ); + if (!SWIG_IsOK(res1)) { + SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "CreateContext_name_set" "', argument " "1"" of type '" "CreateContext *""'"); + } + arg1 = reinterpret_cast< CreateContext * >(argp1); + { + std::string *ptr = (std::string *)0; + res2 = SWIG_AsPtr_std_string(obj1, &ptr); + if (!SWIG_IsOK(res2)) { + SWIG_exception_fail(SWIG_ArgError(res2), "in method '" "CreateContext_name_set" "', argument " "2"" of type '" "std::string const &""'"); + } + if (!ptr) { + SWIG_exception_fail(SWIG_ValueError, "invalid null reference " "in method '" "CreateContext_name_set" "', argument " "2"" of type '" "std::string const &""'"); + } + arg2 = ptr; + } + if (arg1) (arg1)->name = *arg2; + resultobj = SWIG_Py_Void(); + if (SWIG_IsNewObj(res2)) delete arg2; + return resultobj; +fail: + if (SWIG_IsNewObj(res2)) delete arg2; + return NULL; +} + + +SWIGINTERN PyObject *_wrap_CreateContext_name_get(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + CreateContext *arg1 = (CreateContext *) 0 ; + void *argp1 = 0 ; + int res1 = 0 ; + PyObject * obj0 = 0 ; + std::string *result = 0 ; + + if (!PyArg_ParseTuple(args,(char *)"O:CreateContext_name_get",&obj0)) SWIG_fail; + res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_CreateContext, 0 | 0 ); + if (!SWIG_IsOK(res1)) { + SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "CreateContext_name_get" "', argument " "1"" of type '" "CreateContext *""'"); + } + arg1 = reinterpret_cast< CreateContext * >(argp1); + result = (std::string *) & ((arg1)->name); + resultobj = SWIG_From_std_string(static_cast< std::string >(*result)); + return resultobj; +fail: + return NULL; +} + + +SWIGINTERN PyObject *_wrap_CreateContext_length_set(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + CreateContext *arg1 = (CreateContext *) 0 ; + size_t arg2 ; + void *argp1 = 0 ; + int res1 = 0 ; + size_t val2 ; + int ecode2 = 0 ; + PyObject * obj0 = 0 ; + PyObject * obj1 = 0 ; + + if (!PyArg_ParseTuple(args,(char *)"OO:CreateContext_length_set",&obj0,&obj1)) SWIG_fail; + res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_CreateContext, 0 | 0 ); + if (!SWIG_IsOK(res1)) { + SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "CreateContext_length_set" "', argument " "1"" of type '" "CreateContext *""'"); + } + arg1 = reinterpret_cast< CreateContext * >(argp1); + ecode2 = SWIG_AsVal_size_t(obj1, &val2); + if (!SWIG_IsOK(ecode2)) { + SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "CreateContext_length_set" "', argument " "2"" of type '" "size_t""'"); + } + arg2 = static_cast< size_t >(val2); + if (arg1) (arg1)->length = arg2; + resultobj = SWIG_Py_Void(); + return resultobj; +fail: + return NULL; +} + + +SWIGINTERN PyObject *_wrap_CreateContext_length_get(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + CreateContext *arg1 = (CreateContext *) 0 ; + void *argp1 = 0 ; + int res1 = 0 ; + PyObject * obj0 = 0 ; + size_t result; + + if (!PyArg_ParseTuple(args,(char *)"O:CreateContext_length_get",&obj0)) SWIG_fail; + res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_CreateContext, 0 | 0 ); + if (!SWIG_IsOK(res1)) { + SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "CreateContext_length_get" "', argument " "1"" of type '" "CreateContext *""'"); + } + arg1 = reinterpret_cast< CreateContext * >(argp1); + result = ((arg1)->length); + resultobj = SWIG_From_size_t(static_cast< size_t >(result)); + return resultobj; +fail: + return NULL; +} + + +SWIGINTERN PyObject *_wrap_CreateContext_user_set(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + CreateContext *arg1 = (CreateContext *) 0 ; + UserInfo *arg2 = (UserInfo *) 0 ; + void *argp1 = 0 ; + int res1 = 0 ; + void *argp2 = 0 ; + int res2 = 0 ; + PyObject * obj0 = 0 ; + PyObject * obj1 = 0 ; + + if (!PyArg_ParseTuple(args,(char *)"OO:CreateContext_user_set",&obj0,&obj1)) SWIG_fail; + res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_CreateContext, 0 | 0 ); + if (!SWIG_IsOK(res1)) { + SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "CreateContext_user_set" "', argument " "1"" of type '" "CreateContext *""'"); + } + arg1 = reinterpret_cast< CreateContext * >(argp1); + res2 = SWIG_ConvertPtr(obj1, &argp2,SWIGTYPE_p_UserInfo, 0 | 0 ); + if (!SWIG_IsOK(res2)) { + SWIG_exception_fail(SWIG_ArgError(res2), "in method '" "CreateContext_user_set" "', argument " "2"" of type '" "UserInfo *""'"); + } + arg2 = reinterpret_cast< UserInfo * >(argp2); + if (arg1) (arg1)->user = *arg2; + resultobj = SWIG_Py_Void(); + return resultobj; +fail: + return NULL; +} + + +SWIGINTERN PyObject *_wrap_CreateContext_user_get(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + CreateContext *arg1 = (CreateContext *) 0 ; + void *argp1 = 0 ; + int res1 = 0 ; + PyObject * obj0 = 0 ; + UserInfo *result = 0 ; + + if (!PyArg_ParseTuple(args,(char *)"O:CreateContext_user_get",&obj0)) SWIG_fail; + res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_CreateContext, 0 | 0 ); + if (!SWIG_IsOK(res1)) { + SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "CreateContext_user_get" "', argument " "1"" of type '" "CreateContext *""'"); + } + arg1 = reinterpret_cast< CreateContext * >(argp1); + result = (UserInfo *)& ((arg1)->user); + resultobj = SWIG_NewPointerObj(SWIG_as_voidptr(result), SWIGTYPE_p_UserInfo, 0 | 0 ); + return resultobj; +fail: + return NULL; +} + + +SWIGINTERN PyObject *_wrap_CreateContext_poolset_set(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + CreateContext *arg1 = (CreateContext *) 0 ; + std::string *arg2 = 0 ; + void *argp1 = 0 ; + int res1 = 0 ; + int res2 = SWIG_OLDOBJ ; + PyObject * obj0 = 0 ; + PyObject * obj1 = 0 ; + + if (!PyArg_ParseTuple(args,(char *)"OO:CreateContext_poolset_set",&obj0,&obj1)) SWIG_fail; + res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_CreateContext, 0 | 0 ); + if (!SWIG_IsOK(res1)) { + SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "CreateContext_poolset_set" "', argument " "1"" of type '" "CreateContext *""'"); + } + arg1 = reinterpret_cast< CreateContext * >(argp1); + { + std::string *ptr = (std::string *)0; + res2 = SWIG_AsPtr_std_string(obj1, &ptr); + if (!SWIG_IsOK(res2)) { + SWIG_exception_fail(SWIG_ArgError(res2), "in method '" "CreateContext_poolset_set" "', argument " "2"" of type '" "std::string const &""'"); + } + if (!ptr) { + SWIG_exception_fail(SWIG_ValueError, "invalid null reference " "in method '" "CreateContext_poolset_set" "', argument " "2"" of type '" "std::string const &""'"); + } + arg2 = ptr; + } + if (arg1) (arg1)->poolset = *arg2; + resultobj = SWIG_Py_Void(); + if (SWIG_IsNewObj(res2)) delete arg2; + return resultobj; +fail: + if (SWIG_IsNewObj(res2)) delete arg2; + return NULL; +} + + +SWIGINTERN PyObject *_wrap_CreateContext_poolset_get(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + CreateContext *arg1 = (CreateContext *) 0 ; + void *argp1 = 0 ; + int res1 = 0 ; + PyObject * obj0 = 0 ; + std::string *result = 0 ; + + if (!PyArg_ParseTuple(args,(char *)"O:CreateContext_poolset_get",&obj0)) SWIG_fail; + res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_CreateContext, 0 | 0 ); + if (!SWIG_IsOK(res1)) { + SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "CreateContext_poolset_get" "', argument " "1"" of type '" "CreateContext *""'"); + } + arg1 = reinterpret_cast< CreateContext * >(argp1); + result = (std::string *) & ((arg1)->poolset); + resultobj = SWIG_From_std_string(static_cast< std::string >(*result)); + return resultobj; +fail: + return NULL; +} + + +SWIGINTERN PyObject *_wrap_CreateContext_stripeUnit_set(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + CreateContext *arg1 = (CreateContext *) 0 ; + uint64_t arg2 ; + void *argp1 = 0 ; + int res1 = 0 ; + unsigned long long val2 ; + int ecode2 = 0 ; + PyObject * obj0 = 0 ; + PyObject * obj1 = 0 ; + + if (!PyArg_ParseTuple(args,(char *)"OO:CreateContext_stripeUnit_set",&obj0,&obj1)) SWIG_fail; + res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_CreateContext, 0 | 0 ); + if (!SWIG_IsOK(res1)) { + SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "CreateContext_stripeUnit_set" "', argument " "1"" of type '" "CreateContext *""'"); + } + arg1 = reinterpret_cast< CreateContext * >(argp1); + ecode2 = SWIG_AsVal_unsigned_SS_long_SS_long(obj1, &val2); + if (!SWIG_IsOK(ecode2)) { + SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "CreateContext_stripeUnit_set" "', argument " "2"" of type '" "uint64_t""'"); + } + arg2 = static_cast< uint64_t >(val2); + if (arg1) (arg1)->stripeUnit = arg2; + resultobj = SWIG_Py_Void(); + return resultobj; +fail: + return NULL; +} + + +SWIGINTERN PyObject *_wrap_CreateContext_stripeUnit_get(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + CreateContext *arg1 = (CreateContext *) 0 ; + void *argp1 = 0 ; + int res1 = 0 ; + PyObject * obj0 = 0 ; + uint64_t result; + + if (!PyArg_ParseTuple(args,(char *)"O:CreateContext_stripeUnit_get",&obj0)) SWIG_fail; + res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_CreateContext, 0 | 0 ); + if (!SWIG_IsOK(res1)) { + SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "CreateContext_stripeUnit_get" "', argument " "1"" of type '" "CreateContext *""'"); + } + arg1 = reinterpret_cast< CreateContext * >(argp1); + result = (uint64_t) ((arg1)->stripeUnit); + resultobj = SWIG_From_unsigned_SS_long_SS_long(static_cast< unsigned long long >(result)); + return resultobj; +fail: + return NULL; +} + + +SWIGINTERN PyObject *_wrap_CreateContext_stripeCount_set(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + CreateContext *arg1 = (CreateContext *) 0 ; + uint64_t arg2 ; + void *argp1 = 0 ; + int res1 = 0 ; + unsigned long long val2 ; + int ecode2 = 0 ; + PyObject * obj0 = 0 ; + PyObject * obj1 = 0 ; + + if (!PyArg_ParseTuple(args,(char *)"OO:CreateContext_stripeCount_set",&obj0,&obj1)) SWIG_fail; + res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_CreateContext, 0 | 0 ); + if (!SWIG_IsOK(res1)) { + SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "CreateContext_stripeCount_set" "', argument " "1"" of type '" "CreateContext *""'"); + } + arg1 = reinterpret_cast< CreateContext * >(argp1); + ecode2 = SWIG_AsVal_unsigned_SS_long_SS_long(obj1, &val2); + if (!SWIG_IsOK(ecode2)) { + SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "CreateContext_stripeCount_set" "', argument " "2"" of type '" "uint64_t""'"); + } + arg2 = static_cast< uint64_t >(val2); + if (arg1) (arg1)->stripeCount = arg2; + resultobj = SWIG_Py_Void(); + return resultobj; +fail: + return NULL; +} + + +SWIGINTERN PyObject *_wrap_CreateContext_stripeCount_get(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + CreateContext *arg1 = (CreateContext *) 0 ; + void *argp1 = 0 ; + int res1 = 0 ; + PyObject * obj0 = 0 ; + uint64_t result; + + if (!PyArg_ParseTuple(args,(char *)"O:CreateContext_stripeCount_get",&obj0)) SWIG_fail; + res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_CreateContext, 0 | 0 ); + if (!SWIG_IsOK(res1)) { + SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "CreateContext_stripeCount_get" "', argument " "1"" of type '" "CreateContext *""'"); + } + arg1 = reinterpret_cast< CreateContext * >(argp1); + result = (uint64_t) ((arg1)->stripeCount); + resultobj = SWIG_From_unsigned_SS_long_SS_long(static_cast< unsigned long long >(result)); + return resultobj; +fail: + return NULL; +} + + +SWIGINTERN PyObject *_wrap_new_CreateContext(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + CreateContext *result = 0 ; + + if (!PyArg_ParseTuple(args,(char *)":new_CreateContext")) SWIG_fail; + result = (CreateContext *)new CreateContext(); + resultobj = SWIG_NewPointerObj(SWIG_as_voidptr(result), SWIGTYPE_p_CreateContext, SWIG_POINTER_NEW | 0 ); + return resultobj; +fail: + return NULL; +} + + +SWIGINTERN PyObject *_wrap_delete_CreateContext(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + CreateContext *arg1 = (CreateContext *) 0 ; + void *argp1 = 0 ; + int res1 = 0 ; + PyObject * obj0 = 0 ; + + if (!PyArg_ParseTuple(args,(char *)"O:delete_CreateContext",&obj0)) SWIG_fail; + res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_CreateContext, SWIG_POINTER_DISOWN | 0 ); + if (!SWIG_IsOK(res1)) { + SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "delete_CreateContext" "', argument " "1"" of type '" "CreateContext *""'"); + } + arg1 = reinterpret_cast< CreateContext * >(argp1); + delete arg1; + resultobj = SWIG_Py_Void(); + return resultobj; +fail: + return NULL; +} + + +SWIGINTERN PyObject *CreateContext_swigregister(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *obj; + if (!PyArg_ParseTuple(args,(char*)"O:swigregister", &obj)) return NULL; + SWIG_TypeNewClientData(SWIGTYPE_p_CreateContext, SWIG_NewClientData(obj)); + return SWIG_Py_Void(); +} + SWIGINTERN PyObject *_wrap_Init(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { PyObject *resultobj = 0; char *arg1 = (char *) 0 ; @@ -5606,7 +5999,7 @@ SWIGINTERN PyObject *_wrap_Recover(PyObject *SWIGUNUSEDPARM(self), PyObject *arg PyObject * obj1 = 0 ; PyObject * obj2 = 0 ; int result; - + if (!PyArg_ParseTuple(args,(char *)"OOO:Recover",&obj0,&obj1,&obj2)) SWIG_fail; res1 = SWIG_AsCharPtrAndSize(obj0, &buf1, NULL, &alloc1); if (!SWIG_IsOK(res1)) { @@ -5615,13 +6008,13 @@ SWIGINTERN PyObject *_wrap_Recover(PyObject *SWIGUNUSEDPARM(self), PyObject *arg arg1 = reinterpret_cast< char * >(buf1); res2 = SWIG_ConvertPtr(obj1, &argp2,SWIGTYPE_p_UserInfo, 0 | 0 ); if (!SWIG_IsOK(res2)) { - SWIG_exception_fail(SWIG_ArgError(res2), "in method '" "Recover" "', argument " "2"" of type '" "UserInfo_t *""'"); + SWIG_exception_fail(SWIG_ArgError(res2), "in method '" "Recover" "', argument " "2"" of type '" "UserInfo_t *""'"); } arg2 = reinterpret_cast< UserInfo_t * >(argp2); ecode3 = SWIG_AsVal_unsigned_SS_long_SS_long(obj2, &val3); if (!SWIG_IsOK(ecode3)) { SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "Recover" "', argument " "3"" of type '" "uint64_t""'"); - } + } arg3 = static_cast< uint64_t >(val3); result = (int)Recover((char const *)arg1,arg2,arg3); resultobj = SWIG_From_int(static_cast< int >(result)); @@ -6010,7 +6403,7 @@ SWIGINTERN PyObject *_wrap_CBDClient_Open(PyObject *SWIGUNUSEDPARM(self), PyObje PyObject * obj1 = 0 ; PyObject * obj2 = 0 ; int result; - + if (!PyArg_ParseTuple(args,(char *)"OOO:CBDClient_Open",&obj0,&obj1,&obj2)) SWIG_fail; res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_CBDClient, 0 | 0 ); if (!SWIG_IsOK(res1)) { @@ -6108,7 +6501,7 @@ SWIGINTERN PyObject *_wrap_CBDClient_Create(PyObject *SWIGUNUSEDPARM(self), PyOb ecode4 = SWIG_AsVal_size_t(obj3, &val4); if (!SWIG_IsOK(ecode4)) { SWIG_exception_fail(SWIG_ArgError(ecode4), "in method '" "CBDClient_Create" "', argument " "4"" of type '" "size_t""'"); - } + } arg4 = static_cast< size_t >(val4); result = (int)(arg1)->Create((char const *)arg2,arg3,arg4); resultobj = SWIG_From_int(static_cast< int >(result)); @@ -6123,69 +6516,30 @@ SWIGINTERN PyObject *_wrap_CBDClient_Create(PyObject *SWIGUNUSEDPARM(self), PyOb SWIGINTERN PyObject *_wrap_CBDClient_Create2(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { PyObject *resultobj = 0; CBDClient *arg1 = (CBDClient *) 0 ; - char *arg2 = (char *) 0 ; - UserInfo_t *arg3 = (UserInfo_t *) 0 ; - size_t arg4 ; - uint32_t arg5 ; - uint32_t arg6 ; + CreateContext *arg2 = (CreateContext *) 0 ; void *argp1 = 0 ; int res1 = 0 ; - int res2 ; - char *buf2 = 0 ; - int alloc2 = 0 ; - void *argp3 = 0 ; - int res3 = 0 ; - size_t val4 ; - int ecode4 = 0 ; - unsigned int val5 ; - int ecode5 = 0 ; - unsigned int val6 ; - int ecode6 = 0 ; + void *argp2 = 0 ; + int res2 = 0 ; PyObject * obj0 = 0 ; PyObject * obj1 = 0 ; - PyObject * obj2 = 0 ; - PyObject * obj3 = 0 ; - PyObject * obj4 = 0 ; - PyObject * obj5 = 0 ; int result; - - if (!PyArg_ParseTuple(args,(char *)"OOOOOO:CBDClient_Create2",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5)) SWIG_fail; + + if (!PyArg_ParseTuple(args,(char *)"OO:CBDClient_Create2",&obj0,&obj1)) SWIG_fail; res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_CBDClient, 0 | 0 ); if (!SWIG_IsOK(res1)) { - SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "CBDClient_Create2" "', argument " "1"" of type '" "CBDClient *""'"); + SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "CBDClient_Create2" "', argument " "1"" of type '" "CBDClient *""'"); } arg1 = reinterpret_cast< CBDClient * >(argp1); - res2 = SWIG_AsCharPtrAndSize(obj1, &buf2, NULL, &alloc2); + res2 = SWIG_ConvertPtr(obj1, &argp2,SWIGTYPE_p_CreateContext, 0 | 0 ); if (!SWIG_IsOK(res2)) { - SWIG_exception_fail(SWIG_ArgError(res2), "in method '" "CBDClient_Create2" "', argument " "2"" of type '" "char const *""'"); + SWIG_exception_fail(SWIG_ArgError(res2), "in method '" "CBDClient_Create2" "', argument " "2"" of type '" "CreateContext const *""'"); } - arg2 = reinterpret_cast< char * >(buf2); - res3 = SWIG_ConvertPtr(obj2, &argp3,SWIGTYPE_p_UserInfo, 0 | 0 ); - if (!SWIG_IsOK(res3)) { - SWIG_exception_fail(SWIG_ArgError(res3), "in method '" "CBDClient_Create2" "', argument " "3"" of type '" "UserInfo_t *""'"); - } - arg3 = reinterpret_cast< UserInfo_t * >(argp3); - ecode4 = SWIG_AsVal_size_t(obj3, &val4); - if (!SWIG_IsOK(ecode4)) { - SWIG_exception_fail(SWIG_ArgError(ecode4), "in method '" "CBDClient_Create2" "', argument " "4"" of type '" "size_t""'"); - } - arg4 = static_cast< size_t >(val4); - ecode5 = SWIG_AsVal_unsigned_SS_int(obj4, &val5); - if (!SWIG_IsOK(ecode5)) { - SWIG_exception_fail(SWIG_ArgError(ecode5), "in method '" "CBDClient_Create2" "', argument " "5"" of type '" "uint32_t""'"); - } - arg5 = static_cast< uint32_t >(val5); - ecode6 = SWIG_AsVal_unsigned_SS_int(obj5, &val6); - if (!SWIG_IsOK(ecode6)) { - SWIG_exception_fail(SWIG_ArgError(ecode6), "in method '" "CBDClient_Create2" "', argument " "6"" of type '" "uint32_t""'"); - } - arg6 = static_cast< uint32_t >(val6); - result = (int)(arg1)->Create2((char const *)arg2,arg3,arg4,arg5,arg6); + arg2 = reinterpret_cast< CreateContext * >(argp2); + result = (int)(arg1)->Create2((CreateContext const *)arg2); resultobj = SWIG_From_int(static_cast< int >(result)); - if (alloc2 == SWIG_NEWOBJ) delete[] buf2; return resultobj; fail: - if (alloc2 == SWIG_NEWOBJ) delete[] buf2; return NULL; } @@ -6275,6 +6629,7 @@ SWIGINTERN PyObject *_wrap_CBDClient_DeleteForce(PyObject *SWIGUNUSEDPARM(self), return NULL; } + SWIGINTERN PyObject *_wrap_CBDClient_Recover(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { PyObject *resultobj = 0; CBDClient *arg1 = (CBDClient *) 0 ; @@ -6295,11 +6650,11 @@ SWIGINTERN PyObject *_wrap_CBDClient_Recover(PyObject *SWIGUNUSEDPARM(self), PyO PyObject * obj2 = 0 ; PyObject * obj3 = 0 ; int result; - + if (!PyArg_ParseTuple(args,(char *)"OOOO:CBDClient_Recover",&obj0,&obj1,&obj2,&obj3)) SWIG_fail; res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_CBDClient, 0 | 0 ); if (!SWIG_IsOK(res1)) { - SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "CBDClient_Recover" "', argument " "1"" of type '" "CBDClient *""'"); + SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "CBDClient_Recover" "', argument " "1"" of type '" "CBDClient *""'"); } arg1 = reinterpret_cast< CBDClient * >(argp1); res2 = SWIG_AsCharPtrAndSize(obj1, &buf2, NULL, &alloc2); @@ -6309,13 +6664,13 @@ SWIGINTERN PyObject *_wrap_CBDClient_Recover(PyObject *SWIGUNUSEDPARM(self), PyO arg2 = reinterpret_cast< char * >(buf2); res3 = SWIG_ConvertPtr(obj2, &argp3,SWIGTYPE_p_UserInfo, 0 | 0 ); if (!SWIG_IsOK(res3)) { - SWIG_exception_fail(SWIG_ArgError(res3), "in method '" "CBDClient_Recover" "', argument " "3"" of type '" "UserInfo_t *""'"); + SWIG_exception_fail(SWIG_ArgError(res3), "in method '" "CBDClient_Recover" "', argument " "3"" of type '" "UserInfo_t *""'"); } arg3 = reinterpret_cast< UserInfo_t * >(argp3); ecode4 = SWIG_AsVal_unsigned_SS_long_SS_long(obj3, &val4); if (!SWIG_IsOK(ecode4)) { SWIG_exception_fail(SWIG_ArgError(ecode4), "in method '" "CBDClient_Recover" "', argument " "4"" of type '" "uint64_t""'"); - } + } arg4 = static_cast< uint64_t >(val4); result = (int)(arg1)->Recover((char const *)arg2,arg3,arg4); resultobj = SWIG_From_int(static_cast< int >(result)); @@ -6326,6 +6681,7 @@ SWIGINTERN PyObject *_wrap_CBDClient_Recover(PyObject *SWIGUNUSEDPARM(self), PyO return NULL; } + SWIGINTERN PyObject *_wrap_CBDClient_Rename(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { PyObject *resultobj = 0; CBDClient *arg1 = (CBDClient *) 0 ; @@ -6492,7 +6848,11 @@ SWIGINTERN PyObject *_wrap_CBDClient_Read(PyObject *SWIGUNUSEDPARM(self), PyObje if (alloc3 == SWIG_NEWOBJ) delete[] buf3; return resultobj; } +#if PY_MAJOR_VERSION == 3 + resultobj = PyBytes_FromStringAndSize(arg3, arg5); +#else resultobj = SWIG_Python_AppendOutput(resultobj, SWIG_FromCharPtrAndSize(arg3, arg5)); +#endif if (alloc3 == SWIG_NEWOBJ) delete[] buf3; delete[] arg3; return resultobj; @@ -6516,6 +6876,9 @@ SWIGINTERN PyObject *_wrap_CBDClient_Write(PyObject *SWIGUNUSEDPARM(self), PyObj int ecode2 = 0 ; int res3 ; char *buf3 = 0 ; +#if PY_MAJOR_VERSION == 3 + Py_ssize_t input_len; +#endif int alloc3 = 0 ; unsigned long val4 ; int ecode4 = 0 ; @@ -6528,7 +6891,11 @@ SWIGINTERN PyObject *_wrap_CBDClient_Write(PyObject *SWIGUNUSEDPARM(self), PyObj PyObject * obj4 = 0 ; int result; +#if PY_MAJOR_VERSION == 3 + if (!PyArg_ParseTuple(args,(char *)"OOy#OO:CBDClient_Write",&obj0,&obj1,&buf3,&input_len,&obj3,&obj4)) SWIG_fail; +#else if (!PyArg_ParseTuple(args,(char *)"OOOOO:CBDClient_Write",&obj0,&obj1,&obj2,&obj3,&obj4)) SWIG_fail; +#endif res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_CBDClient, 0 | 0 ); if (!SWIG_IsOK(res1)) { SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "CBDClient_Write" "', argument " "1"" of type '" "CBDClient *""'"); @@ -6539,10 +6906,13 @@ SWIGINTERN PyObject *_wrap_CBDClient_Write(PyObject *SWIGUNUSEDPARM(self), PyObj SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "CBDClient_Write" "', argument " "2"" of type '" "int""'"); } arg2 = static_cast< int >(val2); +#if PY_MAJOR_VERSION == 3 +#else res3 = SWIG_AsCharPtrAndSize(obj2, &buf3, NULL, &alloc3); if (!SWIG_IsOK(res3)) { SWIG_exception_fail(SWIG_ArgError(res3), "in method '" "CBDClient_Write" "', argument " "3"" of type '" "char const *""'"); } +#endif arg3 = reinterpret_cast< char * >(buf3); ecode4 = SWIG_AsVal_unsigned_SS_long(obj3, &val4); if (!SWIG_IsOK(ecode4)) { @@ -6991,6 +7361,34 @@ SWIGINTERN PyObject *_wrap_CBDClient_GetClusterId(PyObject *SWIGUNUSEDPARM(self) } +SWIGINTERN PyObject *_wrap_CBDClient_ListPoolset(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + CBDClient *arg1 = (CBDClient *) 0 ; + void *argp1 = 0 ; + int res1 = 0 ; + PyObject * obj0 = 0 ; + std::vector< std::string > result; + + if (!PyArg_ParseTuple(args,(char *)"O:CBDClient_ListPoolset",&obj0)) SWIG_fail; + res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_CBDClient, 0 | 0 ); + if (!SWIG_IsOK(res1)) { + SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "CBDClient_ListPoolset" "', argument " "1"" of type '" "CBDClient *""'"); + } + arg1 = reinterpret_cast< CBDClient * >(argp1); + result = (arg1)->ListPoolset(); + resultobj = PyList_New(result.size()); + if (resultobj == nullptr) { + SWIG_exception_fail(-1, "memory alloc failed"); + } + for (size_t i = 0; i < result.size(); ++i) { + PyList_SetItem(resultobj, i, SWIG_From_std_string(result[i])); + } + return resultobj; +fail: + return NULL; +} + + SWIGINTERN PyObject *CBDClient_swigregister(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { PyObject *obj; if (!PyArg_ParseTuple(args,(char*)"O:swigregister", &obj)) return NULL; @@ -7056,6 +7454,21 @@ static PyMethodDef SwigMethods[] = { { (char *)"new_DirInfos_t", _wrap_new_DirInfos_t, METH_VARARGS, NULL}, { (char *)"delete_DirInfos_t", _wrap_delete_DirInfos_t, METH_VARARGS, NULL}, { (char *)"DirInfos_t_swigregister", DirInfos_t_swigregister, METH_VARARGS, NULL}, + { (char *)"CreateContext_name_set", _wrap_CreateContext_name_set, METH_VARARGS, NULL}, + { (char *)"CreateContext_name_get", _wrap_CreateContext_name_get, METH_VARARGS, NULL}, + { (char *)"CreateContext_length_set", _wrap_CreateContext_length_set, METH_VARARGS, NULL}, + { (char *)"CreateContext_length_get", _wrap_CreateContext_length_get, METH_VARARGS, NULL}, + { (char *)"CreateContext_user_set", _wrap_CreateContext_user_set, METH_VARARGS, NULL}, + { (char *)"CreateContext_user_get", _wrap_CreateContext_user_get, METH_VARARGS, NULL}, + { (char *)"CreateContext_poolset_set", _wrap_CreateContext_poolset_set, METH_VARARGS, NULL}, + { (char *)"CreateContext_poolset_get", _wrap_CreateContext_poolset_get, METH_VARARGS, NULL}, + { (char *)"CreateContext_stripeUnit_set", _wrap_CreateContext_stripeUnit_set, METH_VARARGS, NULL}, + { (char *)"CreateContext_stripeUnit_get", _wrap_CreateContext_stripeUnit_get, METH_VARARGS, NULL}, + { (char *)"CreateContext_stripeCount_set", _wrap_CreateContext_stripeCount_set, METH_VARARGS, NULL}, + { (char *)"CreateContext_stripeCount_get", _wrap_CreateContext_stripeCount_get, METH_VARARGS, NULL}, + { (char *)"new_CreateContext", _wrap_new_CreateContext, METH_VARARGS, NULL}, + { (char *)"delete_CreateContext", _wrap_delete_CreateContext, METH_VARARGS, NULL}, + { (char *)"CreateContext_swigregister", CreateContext_swigregister, METH_VARARGS, NULL}, { (char *)"Init", _wrap_Init, METH_VARARGS, NULL}, { (char *)"Open4Qemu", _wrap_Open4Qemu, METH_VARARGS, NULL}, { (char *)"Open", _wrap_Open, METH_VARARGS, NULL}, @@ -7071,8 +7484,8 @@ static PyMethodDef SwigMethods[] = { { (char *)"Rename", _wrap_Rename, METH_VARARGS, NULL}, { (char *)"Extend", _wrap_Extend, METH_VARARGS, NULL}, { (char *)"Unlink", _wrap_Unlink, METH_VARARGS, NULL}, - { (char *)"DeleteForce", _wrap_DeleteForce, METH_VARARGS, NULL}, { (char *)"Recover", _wrap_Recover, METH_VARARGS, NULL}, + { (char *)"DeleteForce", _wrap_DeleteForce, METH_VARARGS, NULL}, { (char *)"OpenDir", _wrap_OpenDir, METH_VARARGS, NULL}, { (char *)"CloseDir", _wrap_CloseDir, METH_VARARGS, NULL}, { (char *)"Listdir", _wrap_Listdir, METH_VARARGS, NULL}, @@ -7105,6 +7518,7 @@ static PyMethodDef SwigMethods[] = { { (char *)"CBDClient_Mkdir", _wrap_CBDClient_Mkdir, METH_VARARGS, NULL}, { (char *)"CBDClient_Rmdir", _wrap_CBDClient_Rmdir, METH_VARARGS, NULL}, { (char *)"CBDClient_GetClusterId", _wrap_CBDClient_GetClusterId, METH_VARARGS, NULL}, + { (char *)"CBDClient_ListPoolset", _wrap_CBDClient_ListPoolset, METH_VARARGS, NULL}, { (char *)"CBDClient_swigregister", CBDClient_swigregister, METH_VARARGS, NULL}, { NULL, NULL, 0, NULL } }; @@ -7114,6 +7528,7 @@ static PyMethodDef SwigMethods[] = { static swig_type_info _swigt__p_AioContext = {"_p_AioContext", "AioContext *|AioContext_t *", 0, 0, (void*)0, 0}; static swig_type_info _swigt__p_CBDClient = {"_p_CBDClient", "CBDClient *", 0, 0, (void*)0, 0}; +static swig_type_info _swigt__p_CreateContext = {"_p_CreateContext", "CreateContext *", 0, 0, (void*)0, 0}; static swig_type_info _swigt__p_DirInfos = {"_p_DirInfos", "DirInfos_t *|DirInfos *", 0, 0, (void*)0, 0}; static swig_type_info _swigt__p_FileInfo = {"_p_FileInfo", "FileInfo *|FileInfo_t *", 0, 0, (void*)0, 0}; static swig_type_info _swigt__p_UserInfo = {"_p_UserInfo", "UserInfo *|UserInfo_t *", 0, 0, (void*)0, 0}; @@ -7123,6 +7538,7 @@ static swig_type_info _swigt__p_int = {"_p_int", "intptr_t *|int *|int_least32_t static swig_type_info _swigt__p_long_long = {"_p_long_long", "int_least64_t *|int_fast64_t *|int64_t *|long long *|intmax_t *", 0, 0, (void*)0, 0}; static swig_type_info _swigt__p_short = {"_p_short", "short *|int_least16_t *|int16_t *", 0, 0, (void*)0, 0}; static swig_type_info _swigt__p_signed_char = {"_p_signed_char", "signed char *|int_least8_t *|int_fast8_t *|int8_t *", 0, 0, (void*)0, 0}; +static swig_type_info _swigt__p_std__vectorT_std__string_t = {"_p_std__vectorT_std__string_t", "std::vector< std::string > *", 0, 0, (void*)0, 0}; static swig_type_info _swigt__p_unsigned_char = {"_p_unsigned_char", "unsigned char *|uint_least8_t *|uint_fast8_t *|uint8_t *", 0, 0, (void*)0, 0}; static swig_type_info _swigt__p_unsigned_int = {"_p_unsigned_int", "uintptr_t *|uint_least32_t *|uint_fast32_t *|uint32_t *|unsigned int *|uint_fast16_t *", 0, 0, (void*)0, 0}; static swig_type_info _swigt__p_unsigned_long_long = {"_p_unsigned_long_long", "uint_least64_t *|uint_fast64_t *|uint64_t *|unsigned long long *|uintmax_t *", 0, 0, (void*)0, 0}; @@ -7132,6 +7548,7 @@ static swig_type_info _swigt__p_void = {"_p_void", "void *", 0, 0, (void*)0, 0}; static swig_type_info *swig_type_initial[] = { &_swigt__p_AioContext, &_swigt__p_CBDClient, + &_swigt__p_CreateContext, &_swigt__p_DirInfos, &_swigt__p_FileInfo, &_swigt__p_UserInfo, @@ -7141,6 +7558,7 @@ static swig_type_info *swig_type_initial[] = { &_swigt__p_long_long, &_swigt__p_short, &_swigt__p_signed_char, + &_swigt__p_std__vectorT_std__string_t, &_swigt__p_unsigned_char, &_swigt__p_unsigned_int, &_swigt__p_unsigned_long_long, @@ -7150,6 +7568,7 @@ static swig_type_info *swig_type_initial[] = { static swig_cast_info _swigc__p_AioContext[] = { {&_swigt__p_AioContext, 0, 0, 0},{0, 0, 0, 0}}; static swig_cast_info _swigc__p_CBDClient[] = { {&_swigt__p_CBDClient, 0, 0, 0},{0, 0, 0, 0}}; +static swig_cast_info _swigc__p_CreateContext[] = { {&_swigt__p_CreateContext, 0, 0, 0},{0, 0, 0, 0}}; static swig_cast_info _swigc__p_DirInfos[] = { {&_swigt__p_DirInfos, 0, 0, 0},{0, 0, 0, 0}}; static swig_cast_info _swigc__p_FileInfo[] = { {&_swigt__p_FileInfo, 0, 0, 0},{0, 0, 0, 0}}; static swig_cast_info _swigc__p_UserInfo[] = { {&_swigt__p_UserInfo, 0, 0, 0},{0, 0, 0, 0}}; @@ -7159,6 +7578,7 @@ static swig_cast_info _swigc__p_int[] = { {&_swigt__p_int, 0, 0, 0},{0, 0, 0, 0 static swig_cast_info _swigc__p_long_long[] = { {&_swigt__p_long_long, 0, 0, 0},{0, 0, 0, 0}}; static swig_cast_info _swigc__p_short[] = { {&_swigt__p_short, 0, 0, 0},{0, 0, 0, 0}}; static swig_cast_info _swigc__p_signed_char[] = { {&_swigt__p_signed_char, 0, 0, 0},{0, 0, 0, 0}}; +static swig_cast_info _swigc__p_std__vectorT_std__string_t[] = { {&_swigt__p_std__vectorT_std__string_t, 0, 0, 0},{0, 0, 0, 0}}; static swig_cast_info _swigc__p_unsigned_char[] = { {&_swigt__p_unsigned_char, 0, 0, 0},{0, 0, 0, 0}}; static swig_cast_info _swigc__p_unsigned_int[] = { {&_swigt__p_unsigned_int, 0, 0, 0},{0, 0, 0, 0}}; static swig_cast_info _swigc__p_unsigned_long_long[] = { {&_swigt__p_unsigned_long_long, 0, 0, 0},{0, 0, 0, 0}}; @@ -7168,6 +7588,7 @@ static swig_cast_info _swigc__p_void[] = { {&_swigt__p_void, 0, 0, 0},{0, 0, 0, static swig_cast_info *swig_cast_initial[] = { _swigc__p_AioContext, _swigc__p_CBDClient, + _swigc__p_CreateContext, _swigc__p_DirInfos, _swigc__p_FileInfo, _swigc__p_UserInfo, @@ -7177,6 +7598,7 @@ static swig_cast_info *swig_cast_initial[] = { _swigc__p_long_long, _swigc__p_short, _swigc__p_signed_char, + _swigc__p_std__vectorT_std__string_t, _swigc__p_unsigned_char, _swigc__p_unsigned_int, _swigc__p_unsigned_long_long, diff --git a/developers_guide.md b/developers_guide.md index c5acbafaae..a5ece6ba46 100644 --- a/developers_guide.md +++ b/developers_guide.md @@ -18,15 +18,15 @@ The premise of participating in an open source project is to understand it, espe Through the study of the above materials, I believe that you already have an overall understanding of the Curve project, and there may also be some curiosity and doubts. At this time, you can deploy a Curve experience environment, which is conducive to a more intuitive perception of the Curve system. If you encounter a problem or want a new feature, you can track the relevant code. In the process, it is easy to understand the relevant modules. This is lots of contributors completed their first contributions. -The Curve community has multiple [communication channels](#Comminication), and there will be an online community meeting every two weeks. The regular meeting will synchronize the recent progress of the Curve project and answer your questions. You are also welcome to participate in the Curve community regular meeting. We will communicate at the meeting, so that we can quickly answer your questions, improve our understanding of the Curve project and synchronize what we are doing at this stage. +The Curve community has multiple [communication channels](#Communication), and there will be an online community meeting every two weeks. The regular meeting will synchronize the recent progress of the Curve project and answer your questions. You are also welcome to participate in the Curve community regular meeting. We will communicate at the meeting, so that we can quickly answer your questions, improve our understanding of the Curve project and synchronize what we are doing at this stage. ## How to participate in Curve After you have a certain understanding of the Curve project, if you are interested, you can choose the starting point to participate in the Curve. You can choose from the following aspects: -- Start by selecting a interested one from the [Issues](https://github.com/opencurve/curve/issues) of the Curve project. You can pay attention to issues with the good_first_issue tag which we have assessed as relatively good starters. +- Start by selecting a interested one from the [issues](https://github.com/opencurve/curve/issues) of the Curve project. You can pay attention to issues with the [good first issue](https://github.com/opencurve/curve/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22good+first+issue%22) tag which we have assessed as relatively good starters. - Based on a certain understanding of the Curve project, you can also choose from [Roadmap](https://github.com/opencurve/curve/wiki/Roadmap). -- Selecting form [Issues](https://github.com/opencurve/curveadm/issues) and [Roadmap](https://github.com/opencurve/curveadm/issues/92) of Curve Operation and maintenance tools [Curveadm](https://github.com/opencurve/curveadm). Here are [quick start guidelines](https://github.com/opencurve/curveadm/wiki/develop)。 +- Selecting form [issues](https://github.com/opencurve/curveadm/issues) and [Roadmap](https://github.com/opencurve/curveadm/issues/92) of Curve Operation and maintenance tools [Curveadm](https://github.com/opencurve/curveadm). Here are [quick start guidelines](https://github.com/opencurve/curveadm/wiki/develop)。 - In addition to the existing issues, you are also welcome to submit issues that you have discovered or new features you hope for and resolve them. - You can pay attention to the *TODO* in the existing Curve code, most of which are code logic optimization and features to be supplemented, choose the ones you are interested in and raise relevant issues to follow up and try to solve. @@ -117,7 +117,7 @@ Curve Ci use ```cpplint``` check what your changed. For PR we have the following requirements: -- The CURVE coding standard strictly follows the [Google C++ Open Source Project Coding Guide](https://google.github.io/styleguide/cppguide.html), but we use 4 spaces to indent, Clang-format will more helpful for you. Of course, CI will check what your changed. +- The Curve coding standard strictly follows the [Google C++ Open Source Project Coding Guide](https://google.github.io/styleguide/cppguide.html), but we use 4 spaces to indent, Clang-format will more helpful for you. Of course, CI will check what your changed. - The code must have test cases, excluding documentation, unit tests (incremental lines cover more than 80%, and incremental branches cover more than 70%); integration tests (merge statistics with unit tests, and meet the unit test coverage requirements). - Please fill in the description of the PR as detailed as possible, associate with the relevant issues, and the PR commit message can clearly see the resolved issues. After submitting to the Curve master branch, Curve CI will be triggered automatically. It is necessary to ensure that the CI is passed, and the Jenkins username and password of the CI is netease/netease, if the CI fails to run, you can log in to the Jenkins platform to view the reason for the failure. - After the CI is passed, the review can start, and each PR needs to get at least two LGTMs of Committer/Maintainer before merging. @@ -138,9 +138,9 @@ For PR we have the following requirements: Repush will trigger CI, If github page have no reaction. Please wait. -If CI is not stabled, comment ```recheck``` will trigger CI. +If CI is not stabled, repeatedly comment ```cicheck``` will trigger CI again. -## Comminication +## Communication At present, the Curve community has multiple communication channels, please choose an appropriate and efficient communication method according to your needs: @@ -151,4 +151,4 @@ At present, the Curve community has multiple communication channels, please choo - **slack**: cloud-native.slack.com,channel #project_curve - **Curve User Group**: In order to facilitate instant communication with everyone, the Curve User Group is currently a WeChat group. Due to the large number of people in the group, it is necessary to add OpenCurve_bot WeChat first, and then invite into the group. In the user group, everyone can freely communicate about Curve and storage-related topics, and get immediate feedback on problems. - \ No newline at end of file + diff --git a/developers_guide_cn.md b/developers_guide_cn.md index 13f1f754f8..1c36a8c0e3 100644 --- a/developers_guide_cn.md +++ b/developers_guide_cn.md @@ -24,9 +24,9 @@ Curve 社区有多个[沟通渠道](#社区交流),每两周还会有线上的 在对 Curve 项目有一定了解后,如果感兴趣就可以选择入手点进行 Curve 开源项目的参与了,可以从以下几个方面进行选择: -- 从 Curve 项目的 [Issue](https://github.com/opencurve/curve/issues) 中选择感兴趣的问题入手,可以特别关注带有 good_first_issue tag 的 issue,这些是我们经过评估认为是相对好入门的一些问题。 +- 从 Curve 项目的 [issue](https://github.com/opencurve/curve/issues) 中选择感兴趣的问题入手,可以特别关注带有 [good first issue](https://github.com/opencurve/curve/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22good+first+issue%22) tag 的 issue,这些是我们经过评估认为是相对好入门的一些问题。 - 在对 Curve 项目有一定了解的基础上也可以从 [Roadmap](https://github.com/opencurve/curve/wiki/Roadmap_CN) 中进行选择。 -- 从 Curve 项目的运维工具 [Curveadm](https://github.com/opencurve/curveadm) 的 [Issue](https://github.com/opencurve/curveadm/issues) 和 [Roadmap](https://github.com/opencurve/curveadm/issues/92) 中进行选择,开发方式可参见[快速上手 CurveAdm 开发](https://github.com/opencurve/curveadm/wiki/develop)。 +- 从 Curve 项目的运维工具 [Curveadm](https://github.com/opencurve/curveadm) 的 [issue](https://github.com/opencurve/curveadm/issues) 和 [Roadmap](https://github.com/opencurve/curveadm/issues/92) 中进行选择,开发方式可参见 [快速上手 CurveAdm 开发](https://github.com/opencurve/curveadm/wiki/develop)。 - 除了已有的 issue,也欢迎将自己发现的问题或希望的新特性提出 issue 并进行解决。 - 可以关注现有 Curve 代码中的 *TODO* ,大部分为代码逻辑优化和待补充特性,选择感兴趣的提出相关 issue 跟进并尝试解决。 @@ -73,9 +73,9 @@ Curve CI 使用```cpplint```检查更改的代码, 对于 PR 我们有如下要求: -- CURVE编码规范严格按照[Google C++开源项目编码指南](https://zh-google-styleguide.readthedocs.io/en/latest/google-cpp-styleguide/contents/)来进行代码编写,但使用 4 空格进行缩进, 可使用 clang-format 进行格式化, CI 会检查相关更改代码是否符合规则. +- Curve编码规范严格按照[Google C++开源项目编码指南](https://zh-google-styleguide.readthedocs.io/en/latest/google-cpp-styleguide/contents/)来进行代码编写,但使用 4 空格进行缩进, 可使用 clang-format 进行格式化, CI 会检查相关更改代码是否符合规则. - 代码必须有测试,文档除外,单元测试(增量行覆盖80%以上,增量分支覆盖70%以上);集成测试(与单元测试合并统计,满足单元测试覆盖率要求即可) -- 请尽可能详细的填写 PR 的描述,关联相关问题的 issuse,PR commit message 能清晰看出解决的问题,提交到 Curve master 分支之后会自动触发Curve CI,需保证 CI 通过,CI 的 Jenkins 用户名密码为 netease/netease,如遇到 CI 运行失败可以登录 Jenkins 平台查看失败原因。 +- 请尽可能详细的填写 PR 的描述,关联相关问题的 issue,PR commit message 能清晰看出解决的问题,提交到 Curve master 分支之后会自动触发Curve CI,需保证 CI 通过,CI 的 Jenkins 用户名密码为 netease/netease,如遇到 CI 运行失败可以登录 Jenkins 平台查看失败原因。 - CI 通过之后可开始进行 review,每个 PR 在合并之前都需要至少得到两个 Committer/Maintainer 的 LGTM。 - PR 代码需要一定量的注释来使代码容易理解,且所有注释和 review 意见和回复均要求使用英语。 @@ -136,7 +136,7 @@ CI 检查点有: 重新 push 会触发 CI, 若暂时无反应, 请耐心等待, 测试处于排队中. -若 CI 不稳定, 可 comment ```recheck```, 重新触发 CI. +若 CI 不稳定, 可重复 comment ```cicheck``` 以重新触发 CI. ## 社区交流 @@ -149,4 +149,4 @@ CI 检查点有: - **slack**: cloud-native.slack.com,channel #project_curve - **Curve User Group**: 为了便于大家即时的沟通,Curve User Group 目前为微信群,由于群人数过多,需要先添加OpenCurve_bot微信,再邀请进群。在用户群里大家可以自由的沟通关于 Curve 和存储相关的话题,对于存在问题也可以较为即时的得到反馈。 - \ No newline at end of file + diff --git a/docker/debian9/Dockerfile b/docker/debian9/Dockerfile index 4158cf71d8..86be5ead83 100644 --- a/docker/debian9/Dockerfile +++ b/docker/debian9/Dockerfile @@ -1,9 +1,23 @@ FROM opencurvedocker/curve-base:debian9 +COPY --from=opencurvedocker/curve-base:curve-tgt-debian9 /curve-tgt/ /curve-tgt/ +COPY --from=opencurvedocker/curve-base:curve-tgt-debian9 /curve/curve-sdk /curve-tgt/curve-sdk +RUN apt update \ + && apt install -y kmod \ + && cd /curve-tgt/curve-sdk \ + && cp -f lib/* /usr/lib \ + && cp -f bin/* /usr/bin \ + && mkdir -p /usr/curvefs \ + && cp -f curvefs/* /usr/curvefs \ + && cp -f include/* /usr/include \ + && ldconfig \ + && cd /curve-tgt/ \ + && make install-programs \ + && rm -rf /curve-tgt COPY entrypoint.sh / COPY curvebs /curvebs -RUN mkdir -p /etc/curve /etc/nebd \ +RUN mkdir -p /etc/curve /etc/nebd /curve/init.d/ \ && chmod a+x /entrypoint.sh \ - && cp curvebs/nbd/sbin/curve-nbd /usr/bin/ \ - && cp curvebs/tools/sbin/curve_ops_tool /usr/bin/ \ - && cp curvebs/tools-v2/sbin/curve /usr/bin/ + && cp /curvebs/nbd/sbin/curve-nbd /usr/bin/ \ + && cp /curvebs/tools/sbin/curve_ops_tool /usr/bin/ \ + && cp /curvebs/tools-v2/sbin/curve /usr/bin/ ENTRYPOINT ["/entrypoint.sh"] diff --git a/docker/debian9/compile/Dockerfile b/docker/debian9/compile/Dockerfile index cfc68e429b..212e045c45 100644 --- a/docker/debian9/compile/Dockerfile +++ b/docker/debian9/compile/Dockerfile @@ -19,7 +19,7 @@ RUN echo "deb http://mirrors.163.com/debian/ stretch main\n" \ && apt-get -y install \ g++ \ wget \ - clang \ + clang-11 \ git \ libnl-genl-3-dev \ libssl-dev \ @@ -53,6 +53,9 @@ RUN echo "deb http://mirrors.163.com/debian/ stretch main\n" \ gdb \ unzip \ musl-tools \ + libunwind8-dev \ + && ln -s /usr/bin/clang-11 /usr/bin/clang \ + && ln -s /usr/bin/clang++-11 /usr/bin/clang++ \ && mkdir -p /etc/apt/keyrings \ && curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg \ && echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \ @@ -96,10 +99,11 @@ RUN echo "deb http://mirrors.163.com/debian/ stretch main\n" \ && tar -C /usr/local -xzf go${GO_VERSION}.linux-amd64.tar.gz \ && rm go${GO_VERSION}.linux-amd64.tar.gz \ && export PATH=$PATH:/usr/local/go/bin \ + && export GOPATH=/usr/local/go \ && go env -w GO111MODULE=on && go env -w GOPROXY=https://goproxy.cn,direct \ && wget ${GITHUB_PROXY}https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/protoc-${PROTOC_VERSION}-linux-x86_64.zip \ && unzip protoc-${PROTOC_VERSION}-linux-x86_64.zip "bin/protoc" -d /usr/ \ && rm protoc-${PROTOC_VERSION}-linux-x86_64.zip \ && go install google.golang.org/protobuf/cmd/protoc-gen-go@${PROTOC_GEN_GO_VERSION} \ && go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@${PROTOC_GEN_GO_GRPC_VERSION} -ENV PATH=$PATH:/usr/local/go/bin:/root/go/bin +ENV PATH=$PATH:/usr/local/go/bin diff --git a/docker/debian9/curve-tgt/Dockerfile b/docker/debian9/curve-tgt/Dockerfile new file mode 100644 index 0000000000..6f9b3f865f --- /dev/null +++ b/docker/debian9/curve-tgt/Dockerfile @@ -0,0 +1,31 @@ +FROM opencurvedocker/curve-base:build-debian9 AS curve-sdk +ENV GITHUB_PROXY=https://ghproxy.com/ +RUN git clone ${GITHUB_PROXY}https://github.com/opencurve/curve \ + && cd /curve \ + && bash replace-curve-repo.sh \ + && make dep stor=fs \ + && bash mk-tar.sh \ + && mv curve_*.tar.gz curve_sdk.tar.gz + +FROM opencurvedocker/curve-base:build-debian9 AS curve-tgt +ENV GITHUB_PROXY=https://ghproxy.com/ +COPY --from=curve-sdk /curve/curve_sdk.tar.gz / +RUN tar -zxvf curve_sdk.tar.gz \ + && rm curve_sdk.tar.gz \ + && cd /curve/curve-sdk \ + && cp -f lib/* /usr/lib \ + && cp -f bin/* /usr/bin \ + && mkdir -p /usr/curvefs \ + && cp -f curvefs/* /usr/curvefs \ + && cp -f include/* /usr/include \ + && ldconfig \ + && apt update \ + && apt install -y devscripts \ + librdmacm-dev \ + libibverbs-dev \ + xsltproc \ + docbook-xsl \ + && cd / \ + && git clone --branch curve ${GITHUB_PROXY}https://github.com/opencurve/curve-tgt \ + && cd curve-tgt \ + && make programs diff --git a/docker/debian9/curve-tgt/Makefile b/docker/debian9/curve-tgt/Makefile new file mode 100644 index 0000000000..36e1ad1f72 --- /dev/null +++ b/docker/debian9/curve-tgt/Makefile @@ -0,0 +1,4 @@ +.PHONY: build + +build: + docker build --target curve-tgt -t opencurvedocker/curve-base:curve-tgt-debian9 . diff --git a/docker/debian9/entrypoint.sh b/docker/debian9/entrypoint.sh index cb150b4b86..16d47fce63 100644 --- a/docker/debian9/entrypoint.sh +++ b/docker/debian9/entrypoint.sh @@ -6,7 +6,7 @@ g_role="" g_args="" g_prefix="" -g_preexec="" +g_preexec="/curvebs/tools-v2/sbin/daemon" g_binary="" g_start_args="" @@ -123,7 +123,7 @@ function main() { prepare create_directory [[ $(command -v crontab) ]] && cron - [[ ! -z $g_preexec ]] && $g_preexec + [[ ! -z $g_preexec ]] && $g_preexec & if [ $g_role == "etcd" ]; then exec $g_binary $g_start_args >>$g_prefix/logs/etcd.log 2>&1 elif [ $g_role == "monitor" ]; then diff --git a/docs/cn/build_and_run.md b/docs/cn/build_and_run.md index db681fd068..128fa6ae90 100644 --- a/docs/cn/build_and_run.md +++ b/docs/cn/build_and_run.md @@ -2,13 +2,14 @@ # 编译环境搭建 -请注意: -1. 如您只是想体验CURVE的部署流程和基本功能,**则不需要编译CURVE**,请参考[部署](https://github.com/opencurve/curveadm/wiki) -2. 本文档仅用来帮助你搭建CURVE代码编译环境,便于您参与CURVE的开发调试 +**请注意:** -**注意:** - -mk-tar.sh 和 mk-deb.sh 用于 curve v2.0 之前版本的编译打包,v2.0 版本之后不再维护。 +1. 如您只是想体验Curve的部署流程和基本功能,**则不需要编译Curve**,请参考 [部署](https://github.com/opencurve/curveadm/wiki) +2. 本文档仅用来帮助你搭建Curve代码编译环境,便于您参与Curve的开发调试 +3. 以下镜像和编译过程目前仅支持 x86 系统 +4. 如要编译[arm分支](https://github.com/opencurve/curve/pull/2408),请根据 [Dockerfile](https://github.com/opencurve/curve/blob/master/docker/debian9/compile/Dockerfile)打包编译镜像 +5. 目前master分支不支持在arm系统上编译运行 +6. 推荐 debian 10及以上版本的操作系统,其他操作系统未经过全面测试 ## 使用Docker进行编译(推荐方式) @@ -20,10 +21,6 @@ mk-tar.sh 和 mk-deb.sh 用于 curve v2.0 之前版本的编译打包,v2.0 版 docker pull opencurvedocker/curve-base:build-debian9 ``` -```bash -docker pull opencurvedocker/curve-base:build-debian9 -``` - 方法二:手动构建docker镜像 使用工程目录下的 docker/debian9/compile/Dockerfile 进行构建,命令如下: @@ -32,36 +29,48 @@ docker pull opencurvedocker/curve-base:build-debian9 docker build -t opencurvedocker/curve-base:build-debian9 ``` -注意:上述操作不建议在CURVE工程目录执行,否则构建镜像时会把当前目录的文件都复制到docker镜像中,建议把Dockerfile拷贝到新建的干净目录下进行docker镜像的构建。 +**注意:** 上述操作不建议在Curve工程目录执行,否则构建镜像时会把当前目录的文件都复制到docker镜像中,建议把Dockerfile拷贝到新建的干净目录下进行docker镜像的构建。 ### 在docker镜像中编译 ```bash -docker run -it opencurvedocker/curve-base:build-debian9 /bin/bash -cd git clone https://github.com/opencurve/curve.git 或者 git clone https://gitee.com/mirrors/curve.git -# (可选步骤)将外部依赖替换为国内下载点或镜像仓库,可以加快编译速度: bash replace-curve-repo.sh +cd curve +# 如果你想在容器内完成编译+制作+上传镜像的操作,可以添加以下参数 +# -v /var/run/docker.sock:/var/run/docker.sock -v /root/.docker:/root/.docker +# --rm 会在容器退出后自动删除容器,如果你想保留容器,可以去掉该参数 +docker run --rm -v $(pwd):/curve -w /curve -v ${HOME}/.cache:${HOME}/.cache -v ${HOME}/go:${HOME}/go --user $(id -u ${USER}):$(id -g ${USER}) -v /etc/passwd:/etc/passwd:ro -v /etc/group:/etc/group:ro --privileged -it opencurvedocker/curve-base:build-debian9 bash +# (中国大陆可选)将外部依赖替换为国内下载点或镜像仓库,可以加快编译速度: bash replace-curve-repo.sh + # curve v2.0 之前 bash mk-tar.sh (编译 curvebs 并打tar包) bash mk-deb.sh (编译 curvebs 并打debian包) -# curve v2.0 及之后 -编译 curvebs: cd curve && make build stor=bs dep=1 -编译 curvefs: cd curve && make build stor=fs dep=1 + +# (当前)curve v2.0 及之后 +# 编译 curvebs: +make build stor=bs dep=1 +# or +make dep stor=bs && make build stor=bs +# 编译 curvefs: +make build stor=fs dep=1 +# or +make dep stor=fs && make build stor=fs ``` +**注意:** `mk-tar.sh` 和 `mk-deb.sh` 用于 curve v2.0 之前版本的编译打包,v2.0 版本之后不再维护。 ## 在物理机上编译 -CURVE编译依赖的包括: +Curve编译依赖的包括: | 依赖 | 版本 | |:-- |:-- | | bazel | 4.2.2 | | gcc | 支持c++11的兼容版本 | -CURVE的其他依赖项,均由bazel去管理,不可单独安装。 +Curve的其他依赖项,均由bazel去管理,不可单独安装。 -**注意** 4.* 版本的 bazel 均可以成功编译 curve 项目,其他版本不兼容。 +**注意:** 4.* 版本的 bazel 均可以成功编译 Curve 项目,其他版本不兼容。 4.2.2 为推荐版本。 ### 安装依赖 @@ -70,15 +79,41 @@ CURVE的其他依赖项,均由bazel去管理,不可单独安装。 ### 一键编译 -``` +```bash git clone https://github.com/opencurve/curve.git 或者 git clone https://gitee.com/mirrors/curve.git -# (可选步骤)将外部依赖替换为国内下载点或镜像仓库,可以加快编译速度: bash replace-curve-repo.sh +# (中国大陆可选)将外部依赖替换为国内下载点或镜像仓库,可以加快下载速度: bash replace-curve-repo.sh # curve v2.0 之前 bash mk-tar.sh (编译 curvebs 并打tar包) bash mk-deb.sh (编译 curvebs 并打debian包) -# curve v2.0 及之后 -编译 curvebs: cd curve && make build stor=bs dep=1 -编译 curvefs: cd curve && make build stor=fs dep=1 + +# (当前)curve v2.0 及之后 +# 编译 curvebs: +make build stor=bs dep=1 +# or +make dep stor=bs && make build stor=bs +# 编译 curvefs: +make build stor=fs dep=1 +# or +make dep stor=fs && make build stor=fs +``` +### 制作镜像 + +该步骤可以在容器内执行也可以在物理机上执行。 +注意若是在容器内执行,需要在执行 `docker run` 命令时添加 `-v /var/run/docker.sock:/var/run/docker.sock -v /root/.docker:/root/.docker` 参数。 + +```bash +# 编译 curvebs: +# 后面的tag参数可以自定义,用于上传到镜像仓库 +make image stor=bs tag=test +# 编译 curvefs: +make image stor=fs tag=test +``` + +### 上传镜像 + +```bash +# test 为上一步中的tag参数 +docker push test ``` ## 测试用例编译及执行 @@ -86,15 +121,24 @@ bash mk-deb.sh (编译 curvebs 并打debian包) ### 编译全部模块 仅编译全部模块,不进行打包 -``` +```bash bash ./build.sh ``` -### 编译对应模块的代码和运行测试 +### 列出所有测试模块 + +```bash +# curvebs +bazel query '//test/...' +# curvefs +bazel query '//curvefs/test/...' +``` + +### 编译对应模块的代码 编译对应模块,例如test/common目录下的common-test测试: -``` +```bash bazel build test/common:common-test --copt -DHAVE_ZLIB=1 --define=with_glog=true --compilation_mode=dbg --define=libunwind=true ``` @@ -171,7 +215,8 @@ cd etcd-v3.4.10-linux-amd64 && cp etcd etcdctl /usr/bin ``` #### 执行单个测试模块 -``` + +```bash ./bazel-bin/test/common/common-test ``` diff --git a/docs/cn/gmock.md b/docs/cn/gmock.md index 8120e8f498..4dde034b85 100644 --- a/docs/cn/gmock.md +++ b/docs/cn/gmock.md @@ -118,7 +118,7 @@ + 第6行WillOnce(action)是定义一次调用时所产生的行为,比如定义该方法返回怎么样的值等等。 + 第7行WillRepeatedly(action)的意思是缺省/重复行为。 -## Machers(匹配器) +## Matchers(匹配器) Matcher用于定义Mock类中的方法的形参的值(当然,如果你的方法不需要形参时,可以保持match为空)。 @@ -179,7 +179,7 @@ Matcher可以在ON_CALL() or EXPECT_CALL()内部使用,也可以直接判断 | ContainerEq(container) | 类型Eq(container),就是输出结果有点不一样,这里输出结果会带上哪些个元素不被包含在另一个容器中 | | Pointwise(m, container) | | -所有这些Machers都在命名空间::testing中,使用时需引入这一命名空间 +所有这些Matchers都在命名空间::testing中,使用时需引入这一命名空间 例: using ::testing::Ge; @@ -197,7 +197,7 @@ Matcher可以在ON_CALL() or EXPECT_CALL()内部使用,也可以直接判断 | Field(&class::field, m) | argument.field (或 argument->field, 当argument是一个指针时)与匹配器m匹配, 这里的argument是一个class类的实例. | |:----------------------|:----------------------| -| Key(e) | 容器的key值符合e,e可以是一个值或者是一个Macher | +| Key(e) | 容器的key值符合e,e可以是一个值或者是一个Matcher | | Pair(m1, m2) | argument是一个std::pair,并且argument.first等于m1,argument.second等于m2. | | Property(&class::property, m) | argument.property()(或argument->property(),当argument是一个指针时)与匹配器m匹配, 这里的argument是一个class类的实例. | @@ -292,7 +292,7 @@ Side Effects 指的是这个action还将对测试起的作用,比如给指针 | Invoke(f) | 使用模拟函数的参数调用f, 这里的f可以是全局/静态函数或函数对象. | |:-------------------------|:----------------------| -| Invoke(object_pointer, &class::method) | 使用模拟函数的参数调用object_pointer对象的mothod方法. | +| Invoke(object_pointer, &class::method) | 使用模拟函数的参数调用object_pointer对象的method方法. | ### 复合动作 diff --git a/docs/cn/mds.md b/docs/cn/mds.md index fce2f1c82c..cd8eaff5a8 100644 --- a/docs/cn/mds.md +++ b/docs/cn/mds.md @@ -8,16 +8,15 @@ MDS是中心节点,负责元数据管理、集群状态收集与调度。MDS包含以下几个部分: -- Topoloy: 管理集群的 **topo 元数据**信息。 -- Nameserver: 管理**文件的元数据**信息。 -- Copyset: 副本放置策略。 - +- Topology: 管理集群的 **topo 元数据**信息。 +- NameServer: 管理**文件的元数据**信息。 +- CopySet: 副本放置策略。 - Heartbeat: 心跳模块。跟chunkserver进行交互,收集chunkserver上的负载信息,copyset信息等。 - Schedule: 调度模块。用于自动容错和负载均衡。 -## Topoloy +## Topology - topology用于管理和组织机器,利用底层机器的放置、网络的规划以面向业务提供如下功能和非功能需求。 +topology用于管理和组织机器,利用底层机器的放置、网络的规划以面向业务提供如下功能和非功能需求。 1. **故障域的隔离**:比如副本的方式分布在不同机器,不同机架,或是不同的交换机下面。 2. **隔离和共享**:不同用户的数据可以实现固定物理资源的隔离和共享。 @@ -26,13 +25,13 @@ curve整体的拓扑结构如下图: mds-topology-all.png -**chunkserver**:用于抽象描述物理服务器上的一块物理磁盘(SSD),chunkserver以一块磁盘作为最小的服务单元。 +**chunkserver**: 用于抽象描述物理服务器上的一块物理磁盘(SSD),chunkserver以一块磁盘作为最小的服务单元。 -**server:** 用于抽象描述一台物理服务器,chunkserver必须归属于server。 +**server**: 用于抽象描述一台物理服务器,chunkserver必须归属于server。 -**zone:** 故障隔离的基本单元,一般来说属于不同zone的机器至少是部署在不同的机架,再要求严格一点的话,属于不同zone的机器可以部署在不同机架组下面(一个机架组共享一组堆叠 leaf switch),一个server必须归属于一个zone。 +**zone**: 故障隔离的基本单元,一般来说属于不同zone的机器至少是部署在不同的机架,再要求严格一点的话,属于不同zone的机器可以部署在不同机架组下面(一个机架组共享一组堆叠 leaf switch),一个server必须归属于一个zone。 -**pool:** 用于实现对机器资源进行物理隔离,pool中server之间的交互仅限于pool之内的server。运维上,可以在上架一批新的机器的时候,规划一个全新的pool,以pool为单元进行物理资源的扩容(pool内扩容也可以支持,但是不建议pool内扩容,因为会影响每个chunkserver上的copyset的数量)。 +**pool**: 用于实现对机器资源进行物理隔离,pool中server之间的交互仅限于pool之内的server。运维上,可以在上架一批新的机器的时候,规划一个全新的pool,以pool为单元进行物理资源的扩容(pool内扩容也可以支持,但是不建议pool内扩容,因为会影响每个chunkserver上的copyset的数量)。 借鉴ceph的设计,curve在如上物理pool之上又引入逻辑pool的概念,以实现统一存储系统的需求,即在单个存储系统中多副本PageFile支持块设备、三副本AppendFile(待开发)支持在线对象存储、AppendECFile(待开发)支持近线对象存储可以共存。 @@ -42,7 +41,7 @@ curve整体的拓扑结构如下图: 通过结合curve的用户系统,LogicalPool可以通过配置限定特定user使用的方式,实现多个租户数据物理隔离(待开发)。 -**logicalPool**:用于在逻辑层面建立不同特性的pool,比如如上AppendECFile pool、AppendEC pool 、PageFile pool;实现user级别的数据隔离和共享。 +**logicalPool**: 用于在逻辑层面建立不同特性的pool,比如如上AppendECFile pool、AppendEC pool 、PageFile pool;实现user级别的数据隔离和共享。 ## NameServer @@ -94,7 +93,7 @@ ChunkServer,Copyset和Chunk三者之间的关系如下图: ​ 3. 通过上述信息的定期更新,作为schedule 模块进行均衡及配置变更的依据 -​ 4. 通过chunkserver定期上报copyset的copyset的epoch, 检测chunkserver的copyset与mds差异,同步两者的copyset信息 +​ 4. 通过chunkserver定期上报copyset的epoch,检测chunkserver的copyset与mds差异,同步两者的copyset信息 ​ 5. 支持配置变更功能,在心跳回复报文中下发mds发起的配置变更命令,并在后续心跳中获取配置变更进度。 @@ -106,19 +105,19 @@ ChunkServer,Copyset和Chunk三者之间的关系如下图: mds 端的心跳主要由三个部分组成: -*TopoUpdater:* 根据 chunkserver 上报的 copyset 信息更新拓扑中的信息。 +*TopoUpdater*: 根据 chunkserver 上报的 copyset 信息更新拓扑中的信息。 -*ConfGenerator:* 将当前上报的 copyset 信息提交给调度模块,获取该 copyset 上可能需要执行的任务。 +*ConfGenerator*: 将当前上报的 copyset 信息提交给调度模块,获取该 copyset 上可能需要执行的任务。 -*HealthyChecker:* 检查集群中的 chunkserver 在当前时间点距离上一次心跳的时间,根据这个时间差更新chunkserver状态。 +*HealthyChecker*: 检查集群中的 chunkserver 在当前时间点距离上一次心跳的时间,根据这个时间差更新chunkserver状态。 ##### Chunkserver端 chunkserver 端的心跳由两个部分组成: -*ChunkServerInfo/CopySetInfo:* 获取当前 chunkserver 上的 copyset 信息上报给 MDS。 +*ChunkServerInfo/CopySetInfo*: 获取当前 chunkserver 上的 copyset 信息上报给 MDS。 -*Order ConfigChange:* 将 MDS 下发的任务提交给对应的 copyset 复制组。 +*Order ConfigChange*: 将 MDS 下发的任务提交给对应的 copyset 复制组。 ## Schedule @@ -130,9 +129,9 @@ chunkserver 端的心跳由两个部分组成: -**Coordinator:** 调度模块的对外接口。心跳会将chunkserver上报上来的copyset信息提交给Coordinator,内部根据该信息判断当前copyset是否有配置变更任务执行,如果有任务则下发。 +**Coordinator**: 调度模块的对外接口。心跳会将chunkserver上报上来的copyset信息提交给Coordinator,内部根据该信息判断当前copyset是否有配置变更任务执行,如果有任务则下发。 -**任务计算:** 任务计算模块包含了多个*定时任务* 和 *触发任务*。*定时任务* 中,``CopySetScheduler`` 是copyset均衡调度器,根据集群中copyset的分布情况生成copyset迁移任务;``LeaderScheduler`` 是leader均衡调度器,根据集群中leader的分布情况生成leader变更任务;``ReplicaScheduler`` 是副本数量调度器,根据当前copyset的副本数生成副本增删任务;``RecoverScheduler`` 是恢复调度器,根据当前copyset副本的存活状态生成迁移任务。*触发任务* 中,``RapidLeaderScheduler`` 是快速leader均衡器,由外部触发,一次生成多个leader变更任务,使得集群的leader尽快大达到均衡状态。``TopoAdapter`` 用于获取Topology中调度需要使用的数据。``Common Strategy`` 中是通用的副本添加和移除策略。 +**任务计算**: 任务计算模块包含了多个*定时任务* 和 *触发任务*。*定时任务* 中,``CopySetScheduler`` 是copyset均衡调度器,根据集群中copyset的分布情况生成copyset迁移任务;``LeaderScheduler`` 是leader均衡调度器,根据集群中leader的分布情况生成leader变更任务;``ReplicaScheduler`` 是副本数量调度器,根据当前copyset的副本数生成副本增删任务;``RecoverScheduler`` 是恢复调度器,根据当前copyset副本的存活状态生成迁移任务。*触发任务* 中,``RapidLeaderScheduler`` 是快速leader均衡器,由外部触发,一次生成多个leader变更任务,使得集群的leader尽快达到均衡状态。``TopoAdapter`` 用于获取Topology中调度需要使用的数据。``Common Strategy`` 中是通用的副本添加和移除策略。 -**任务管理:** 任务管理模块用于管理计算模块产生的任务。``operatorController`` 是任务集合,用于存放和获取任务;``operatorStateUpdate`` 根据上报的copyset信息更新状态;``Metric``用于统计不同任务个数。 +**任务管理**: 任务管理模块用于管理计算模块产生的任务。``operatorController`` 是任务集合,用于存放和获取任务;``operatorStateUpdate`` 根据上报的copyset信息更新状态;``Metric``用于统计不同任务个数。 diff --git a/docs/cn/monitor.md b/docs/cn/monitor.md index bfccbb4a38..9ffd3c6322 100644 --- a/docs/cn/monitor.md +++ b/docs/cn/monitor.md @@ -32,9 +32,9 @@ CURVE 中 bvar 的具体使用方式可以查看: [chunkserver metric](../../src/chunkserver/chunkserver_metrics.h) -[mds topoloy metric](../../src/mds/topology/topology_metric.h) +[mds topology metric](../../src/mds/topology/topology_metric.h) -[mds shedule metric](../../src/mds/schedule/scheduleMetrics.h) +[mds schedule metric](../../src/mds/schedule/scheduleMetrics.h) ## prometheus + grafana diff --git a/docs/en/build_and_run_en.md b/docs/en/build_and_run_en.md index 838a40b46a..c2037fd777 100644 --- a/docs/en/build_and_run_en.md +++ b/docs/en/build_and_run_en.md @@ -3,8 +3,12 @@ # Build compilation environment **Note:** -1. If you just want to experience the deployment and basic functions of CURVE, **you do not need to compile CURVE**, please refer to [deployment](https://github.com/opencurve/curveadm/wiki). -2. This document is only used to help you build the CURVE code compilation environment, which is convenient for you to participate in the development, debugging and run tests of CURVE. +1. If you just want to experience the deployment and basic functions of Curve, **you do not need to compile Curve**, please refer to [deployment](https://github.com/opencurve/curveadm/wiki). +2. This document is only used to help you build the Curve code compilation environment, which is convenient for you to participate in the development, debugging and run tests of Curve. +3. The following image and build procedures are currently only supported on x86 systems. +4. To compile [arm branch](https://github.com/opencurve/curve/pull/2408), please follow [Dockerfile](https://github.com/opencurve/curve/blob/master/docker/debian9/compile/Dockerfile) to package and compile the image. +5. Currently the master branch does not support compiling and running on the arm system +6. Recommend using Debian 10 or later versions of the operating system. Other operating systems have not been thoroughly tested. ## Compile with docker (recommended) @@ -24,35 +28,48 @@ Use the Dockerfile in the project directory to build. The command is as follows: docker build -t opencurvedocker/curve-base:build-debian9 ``` -Note: The above operations are not recommended to be performed in the CURVE project directory, otherwise the files in the current directory will be copied to the docker image when building the image. It is recommended to copy the Dockerfile to the newly created clean directory to build the docker image. +**Note:** The above operations are not recommended to be performed in the Curve project directory, otherwise the files in the current directory will be copied to the docker image when building the image. It is recommended to copy the Dockerfile to the newly created clean directory to build the docker image. ### Compile in docker image ```bash -docker run -it opencurvedocker/curve-base:build-debian9 /bin/bash -cd -git clone https://github.com/opencurve/curve.git or git clone https://gitee.com/mirrors/curve.git -# (Optional step) Replace external dependencies with domestic download points or mirror warehouses, which can speed up compilation: bash replace-curve-repo.sh +git clone https://github.com/opencurve/curve.git 或者 git clone https://gitee.com/mirrors/curve.git +cd curve +# If you want to complete the operation of compiling + making + uploading the image in the container, you can add the following parameters +# -v /var/run/docker.sock:/var/run/docker.sock -v /root/.docker:/root/.docker +#--rm will automatically delete the container after the container exits, if you want to keep the container, you can remove this parameter +docker run --rm -v $(pwd):/curve -w /curve -v ${HOME}:${HOME} --user $(id -u ${USER}):$(id -g ${USER}) -v /etc/passwd:/etc/passwd:ro -v /etc/group:/etc/group:ro --privileged -it opencurvedocker/curve-base:build-debian9 bash +# (Optional for Chinese mainland) Replace external dependencies with domestic download points or mirror warehouses, which can speed up compilation: bash replace-curve-repo.sh + # before curve v2.0 bash mk-tar.sh (compile curvebs and make tar package) bash mk-deb.sh (compile curvebs and make debian package) -# after curve v2.0 -compile curvebs: cd curve && make build stor=bs dep=1 -compile curvefs: cd curve && make build stor=fs dep=1 + +# (current) after curve v2.0 +# compile curvebs: +make build stor=bs dep=1 +# or +make dep stor=bs && make build stor=bs +# compile curvefs: +make build stor=fs dep=1 +# or +make dep stor=fs && make build stor=fs ``` +**Note:** `mk-tar.sh` and `mk-deb.sh` are used for compiling and packaging curve v2.0. They are no longer maintained after v2.0. + ## Compile on a physical machine -CURVE compilation depends on: +Curve compilation depends on: | Dependency | Version | |:-- |:-- | | bazel | 4.2.2 | | gcc | Compatible version supporting C++11 | -Other dependencies of CURVE are managed by bazel and do not need to be installed separately. +Other dependencies of Curve are managed by bazel and do not need to be installed separately. -**Note** The 4.* version of bazel can successfully compile the curve project, other versions are not compatible. +**Note:** The 4.* version of bazel can successfully compile the curve project, other versions are not compatible. 4.2.2 is the recommended version. ### Installation dependency @@ -63,13 +80,40 @@ For dependencies, you can refer to the installation steps in [dockerfile](../../ ```bash git clone https://github.com/opencurve/curve.git or git clone https://gitee.com/mirrors/curve.git -# (Optional step) Replace external dependencies with domestic download points or mirror warehouses, which can speed up compilation: bash replace-curve-repo.sh +# (Mainland China optional) Replace external dependencies with domestic download points or mirror warehouses, which can speed up compilation: bash replace-curve-repo.sh # before curve v2.0 bash mk-tar.sh (compile curvebs and make tar package) bash mk-deb.sh (compile curvebs and make debian package) -# after curve v2.0 -compile curvebs: cd curve && make build stor=bs dep=1 -compile curvefs: cd curve && make build stor=fs dep=1 + +# (current) after curve v2.0 +# compile curvebs: +make build stor=bs dep=1 +# or +make dep stor=bs && make build stor=bs +# compile curvefs: +make build stor=fs dep=1 +# or +make dep stor=fs && make build stor=fs +``` + +### Make a mirror image + +This step can be performed in a container or on a physical machine. +Note that if it is executed in a container, you need to add `-v /var/run/docker.sock:/var/run/docker.sock -v /root/.docker:/root/.docker when executing the `docker run` command ` parameter. + +```bash +# Compile curvebs: +# The following tag parameter can be customized for uploading to the mirror warehouse +make image stor=bs tag=test +# Compile curvefs: +make image stor=fs tag=test +``` + +### Upload image + +```bash +# test is the tag parameter in the previous step +docker push test ``` ## Test case compilation and execution @@ -78,15 +122,24 @@ compile curvefs: cd curve && make build stor=fs dep=1 Only compile all modules without packaging -``` +```bash $ bash ./build.sh ``` -### Compile the corresponding module code and run the test +### List all test modules + +```bash +# curvebs +bazel query '//test/...' +# curvefs +bazel query '//curvefs/test/...' +``` + +### Compile the corresponding module code Compile corresponding modules, such as common-test in the `test/common` directory -``` +```bash $ bazel build test/common:common-test --copt -DHAVE_ZLIB=1 \ $ --define=with_glog=true --compilation_mode=dbg \ $ --define=libunwind=true @@ -170,7 +223,7 @@ $ cd etcd-v3.4.10-linux-amd64 && cp etcd etcdctl /usr/bin #### Execute a single test module -``` +```bash $ ./bazel-bin/test/common/common-test ``` diff --git a/docs/en/mds_en.md b/docs/en/mds_en.md index db1a7a7417..10e5241d1c 100644 --- a/docs/en/mds_en.md +++ b/docs/en/mds_en.md @@ -7,8 +7,8 @@ MDS is the center node of the system, responsible for managing metadata, collecting cluster status data and scheduling. MDS consists of following components: - Topology: Managing topology metadata of the cluster -- Nameserver: Managing file metadata -- Copyset: Replica placement strategy +- NameServer: Managing file metadata +- CopySet: Replica placement strategy - Heartbeat: Receiving and replying to heartbeat message from chunkserver, collecting load status and copyset info of chunkserver - Schedule: Module for fault tolerance and load balance @@ -26,13 +26,13 @@ Figure 1 shows the topological diagram of CURVE and the explanation of correspon Figure 1: Topological diagram of CURVE

-**chunkserver**:A chunkserver is an abstraction of a physical disk (SSD in our scenario) in a server (physical), and disk is the service unit of chunkserver. +**chunkserver**: A chunkserver is an abstraction of a physical disk (SSD in our scenario) in a server (physical), and disk is the service unit of chunkserver. -**server:** Server represent an actual physical server, to one of which any chunkservers must belong. +**server**: Server represent an actual physical server, to one of which any chunkservers must belong. -**zone:** Zone is the unit of failure isolation. In common cases, servers (a physical machine) of different zones should at least be deployed under different racks. To become stricter for some scenarios, they should be deployed under different groups of racks (racks that share the same set of leaf switches). A server must be owned by a certain zone. +**zone**: Zone is the unit of failure isolation. In common cases, servers (a physical machine) of different zones should at least be deployed under different racks. To become stricter for some scenarios, they should be deployed under different groups of racks (racks that share the same set of leaf switches). A server must be owned by a certain zone. -**pool:** Pool is for implementing physical isolation of resources. Servers are not able to communicate across their pool. In the maintenance of the system, we can arrange a pool for a new set of machines, and extend the storage by pools. Extending storage by adding machines inside a pool is supported, but this is not recommended since it will affect the copyset number of every chunkserver. +**pool**: Pool is for implementing physical isolation of resources. Servers are not able to communicate across their pool. In the maintenance of the system, we can arrange a pool for a new set of machines, and extend the storage by pools. Extending storage by adding machines inside a pool is supported, but this is not recommended since it will affect the copyset number of every chunkserver. Learned from the design of Ceph, CURVE introduced the concept of logical pool on top of a physical pool in order to satisfy the requirement of building a unified storage system. In our design, we support the coexist of block storage (based on multi-replica), online object storage (based on three replicas storage that support appends, to be implemented) and nearline object storage (based on Erasure Code storage that support appends, to be implemented). @@ -100,7 +100,7 @@ Figure 5 demonstrates the relation between ChunkServer, Copyset and Chunk: ## Heartbeat -Heartbeat is for data exchange between center node and data nodes, and it works in following ways: +Heartbeat is for data exchange between center nodes and data nodes, and it works in following ways: 1. Monitor online status(online/offline) of chunkservers by regular heartbeats from chunkserver. 2. Record status information(disk capacity, disk load, copyset load etc.) reported by chunkservers for Ops tools. @@ -123,7 +123,7 @@ On MDS side, heartbeat module consists of three parts: *ConfGenerator*: Forward info reported by copyset to scheduler, and fetch operations for copyset to execute. -*HealthyChecker:* Update chunkserver status by checking the time gap between current time and the last heartbeat of a chunkserver. +*HealthyChecker*: Update chunkserver status by checking the time gap between current time and the last heartbeat of a chunkserver. ##### Chunkserver side @@ -144,8 +144,8 @@ System scheduling is for implementing auto fault tolerance and load balancing, w Figure 7 shows the structure of the scheduler module. -**Coordinator:** Coordinator serves as the interface of the scheduler module. After receiving copyset info provided by heartbeats from chunkserver, coordinator will decide whether there's any configuration change for current copyset, and will distribute the change if there is. +**Coordinator**: Coordinator serves as the interface of the scheduler module. After receiving copyset info provided by heartbeats from chunkserver, coordinator will decide whether there's any configuration change for current copyset, and will distribute the change if there is. -**Task calculation:**Task calculation module is for generating tasks by calculating data of corresponding status. This module consists of a few regular tasks and a triggerable task. Regular tasks include CopySetScheduler, LeaderScheduler, ReplicaScheduler and RecoverScheduler. CopySetScheduler is the scheduler for copyset balancing, generating copysets immigration tasks according to their distribution. LeaderScheduler is the scheduler for leader balancing, which responsible for changing leader according to leaders' distribution. ReplicaScheduler is for scheduling replica number, managing the generation and deletion of replica by analysing current replica numbers of a copyset, while RecoverScheduler controls the immigration of copysets according to their liveness. For triggerable task, RapidLeaderScheduler is for quick leader balancing, triggered by external events, and generates multiple leader changing task at a time to make leaders of the cluster balance as quick as possible. Another two modules are TopoAdapter and CommonStrategy. The former one is for fetching data required by topology module, while the later one implements general strategies for adding and removing replica. +**Task calculation**: Task calculation module is for generating tasks by calculating data of corresponding status. This module consists of a few regular tasks and a triggerable task. Regular tasks include CopySetScheduler, LeaderScheduler, ReplicaScheduler and RecoverScheduler. CopySetScheduler is the scheduler for copyset balancing, generating copysets immigration tasks according to their distribution. LeaderScheduler is the scheduler for leader balancing, which responsible for changing leader according to leaders' distribution. ReplicaScheduler is for scheduling replica number, managing the generation and deletion of replica by analysing current replica numbers of a copyset, while RecoverScheduler controls the immigration of copysets according to their liveness. For triggerable task, RapidLeaderScheduler is for quick leader balancing, triggered by external events, and generates multiple leader changing task at a time to make leaders of the cluster balance as quick as possible. Another two modules are TopoAdapter and CommonStrategy. The former one is for fetching data required by topology module, while the later one implements general strategies for adding and removing replica. -**Task managing:**Task managing module manages tasks generated by task calculation module. Inside this module we can see components OperatorController, OperatorStateUpdate and Metric, responsible for fetching and storing tasks, updating status according to copyset info reported and measuring tasks number respectively. \ No newline at end of file +**Task managing**: Task managing module manages tasks generated by task calculation module. Inside this module we can see components OperatorController, OperatorStateUpdate and Metric, responsible for fetching and storing tasks, updating status according to copyset info reported and measuring tasks number respectively. \ No newline at end of file diff --git a/docs/en/monitor_en.md b/docs/en/monitor_en.md index 75cc5843bc..20c6a760d8 100644 --- a/docs/en/monitor_en.md +++ b/docs/en/monitor_en.md @@ -32,9 +32,9 @@ The specific usage of bvar in CURVE can be viewed: [chunkserver metric](../../src/chunkserver/chunkserver_metrics.h) -[mds topoloy metric](../../src/mds/topology/topology_metric.h) +[mds topology metric](../../src/mds/topology/topology_metric.h) -[mds shedule metric](../../src/mds/schedule/scheduleMetrics.h) +[mds schedule metric](../../src/mds/schedule/scheduleMetrics.h) ## prometheus + grafana diff --git a/docs/images/Curve-arch.odg b/docs/images/Curve-arch.odg new file mode 100644 index 0000000000..b0f2fdf9c7 Binary files /dev/null and b/docs/images/Curve-arch.odg differ diff --git a/docs/images/Curve-arch.png b/docs/images/Curve-arch.png index 100308e877..0f37ec63c7 100644 Binary files a/docs/images/Curve-arch.png and b/docs/images/Curve-arch.png differ diff --git a/docs/images/Curve-csi_Requirements.png b/docs/images/Curve-csi_Requirements.png new file mode 100644 index 0000000000..230f9c4b1c Binary files /dev/null and b/docs/images/Curve-csi_Requirements.png differ diff --git a/docs/practical/curvebs_csi.md b/docs/practical/curvebs_csi.md new file mode 100644 index 0000000000..26029b1caa --- /dev/null +++ b/docs/practical/curvebs_csi.md @@ -0,0 +1,306 @@ +[TOC] + +## Curve-CSI 与 CurveBS v2.5 使用对接 + + 本文的补充[curve-csi使用说明](https://github.com/opencurve/curve-csi/blob/master/docs/README.md)中Requirements部分,即用curveadm容器化部署curvebs服务端,用ansible在物理机上部署curvebs客户端,包括nebd nbd sdk。物理机上部署curvebs客户端后,curve-csi就可以调用物理机上curve命令创建卷,curve-csi通过nebd服务挂载nbd。curvebs版本使用release2.5。 + +### 环境信息 + +| 主机名 | 系统 | IP | +| ------- | ------- | ---------- | +| 主控机 | debian9 | ip | +| host109 | debian9 | ip.*.*.109 | +| host110 | debian9 | ip.*.*.110 | +| host111 | debian9 | ip.*.*.111 | + +### curve_release2.5 源码编译与镜像打包 + +#### 源码编译 + +```shell + # 下载curve release2.5 源码 + $ git clone https://github.com/opencurve/curve.git -b release2.5 + # 编译curve release2.5 + $ cd /编译路径/curve + $ bash replace-curve-repo.sh + $ bash mk-tar.sh + # 编译完成所需的二进制和ansile脚本,用于部署curvebs客户端 + $ cd /编译路径/curve/build + $ ls + curve nbd-package nebd-package +``` + +#### 镜像打包 + +```shell + # 编译curve release2.5镜像,用于部署curvebs服务端 + $ cd /编译路径/curve + $ make image stor=bs tag=wfcurvedocker/curvebs:v2.5 os=debian9 + # 根据需求查看Makefile和docker/debian9/中Dockefile文件 + $ make help + # 将镜像上传到本地docker仓库,需要建本地dokcer仓库 + $ docker tag wfcurvedocker/curvebs:v2.5 ip.*.*.202:5000/wfcurvedocker/curvebs:v2.5 +``` + docker离线仓库配置请参考[Docker入门私有库](https://blog.csdn.net/weixin_37926734/article/details/123279987) + +### curvebs 服务端部署 + +#### curveadm 部署 + + 请参考[安装CurveAdm](https://github.com/opencurve/curveadm/wiki/install-curveadm#%E5%AE%89%E8%A3%85-curveadm) + +```shell + $ bash -c "$(curl -fsSL https://curveadm.nos-eastchina1.126.net/script/install.sh)" +``` + +#### curvebs服务端配置 + + 请参考[yaml配置](https://github.com/opencurve/curveadm/tree/develop/configs/bs/cluster) + +```yaml +hosts.yaml + global: + user: root + ssh_port: 22 + private_key_file: /root/.ssh/id_rsa + + hosts: + - host: host109 + hostname: ip.*.*.109 + - host: host110 + hostname: ip.*.*.110 + - host: host111 + hostname: ip.*.*.111 +``` + +```yaml +format.yaml + host: + - host109 + - host110 + - host111 + disk: + - /dev/nvme0n1:/data/chunkserver0:50 # device:mount_path:format_percent% + - /dev/nvme1n1:/data/chunkserver1:50 + - /dev/nvme2n1:/data/chunkserver2:50 +``` + +```yaml +topology.yaml + kind: curvebs + global: + container_image: ip.*.*.202:5000/wfcurvedocker/curvebs:v2.5 #编译的镜像地址 + log_dir: ${home}/logs/${service_role}${service_replicas_sequence} + data_dir: ${home}/data/${service_role}${service_replicas_sequence} + variable: + home: /tmp + machine1: host109 + machine2: host110 + machine3: host111 + + etcd_services: + config: + listen.ip: ${service_host} + listen.port: 2391 + listen.client_port: 2390 + deploy: + - host: ${machine1} + - host: ${machine2} + - host: ${machine3} + + mds_services: + config: + listen.ip: ${service_host} + listen.port: 6666 + listen.dummy_port: 6667 + deploy: + - host: ${machine1} + - host: ${machine2} + - host: ${machine3} + + chunkserver_services: + config: + listen.ip: ${service_host} + listen.port: 82${format_replicas_sequence} # 8200,8201,8202 + data_dir: /data/chunkserver${service_replicas_sequence} # /data/chunkserver0, /data/chunksever1 + copysets: 100 + chunkfilepool.enable_get_chunk_from_pool: false + deploy: + - host: ${machine1} + replicas: 3 + - host: ${machine2} + replicas: 3 + - host: ${machine3} + replicas: 3 + + snapshotclone_services: + config: + listen.ip: ${service_host} + listen.port: 5555 + listen.dummy_port: 8081 + listen.proxy_port: 8080 + deploy: + - host: ${machine1} + - host: ${machine2} + - host: ${machine3} +``` + +#### curvebs 服务端部署步骤 + + 请参考[CurveAdm部署CurveBS集群](https://github.com/opencurve/curveadm/wiki/curvebs-cluster-deployment) + +### curvebs 客户端部署 + +#### 环境准备具体步骤 + + 请参考[Ansile部署Curvebs](https://github.com/opencurve/curve/blob/master/docs/cn/deploy.md) + +1. root用户登录机器,创建curve用户 + +```shell + $ adduser curve +``` + +2. 设置curve用户免密sudo + +```shell + $ su # 进入root用户 + $ 在/etc/sudoers.d下面创建一个新文件curve,里面添加一行:curve ALL=(ALL) NOPASSWD:ALL + $ sudo -iu curve # 切换到curve用户 + $ sudo ls # 测试sudo是否正确配置 +``` + +3. 安装ansible 2.5.9 + +```shell + $ apt install python + $ apt install python-pip + $ pip install ansible==2.5.9 + $ ansible-playbook # 如果没有报错的话说明安装成功,报错的话执行下面两步 + $ pip install --upgrade pip + $ pip install --upgrade setuptools +``` + +4. 配置ssh登陆到所有机器(包括自己),假设三台机器的ip分别为ip.*.*.109,ip.*.*.110,ip.*.*.111 + +```shell + $ ssh-keygen # 生成ssh秘钥 + $ ssh-copy-id root@ip.*.*.109 # 拷贝key到第一个机器 + $ ssh-copy-id root@ip.*.*.110 # 拷贝key到第二个机器 + $ ssh-copy-id root@ip.*.*.111 # 拷贝key到第三个机器 + $ ssh ip.*.*.109 # 挨个验证一下配置是否正确 +``` + +#### curvebs客户端配置 + +1. 拷贝脚本和二进制到ip.*.*.109节点 /home/curve目录 + +```shell + $ cp /编译目录/curve/build/curve /home/curve/ + $ cp /编译目录/curve/build/nebd-package /home/curve/ + $ cp /编译目录/curve/build/nbd-package /home/curve/ +``` + +2. 修改curve-ansible中client.ini配置文件 + +```shell + $ cd /home/curve/curve/curve-ansible + $ vim client.ini +``` + +```yaml +client.ini + + [client] + #所需部署客户端的地址 + client109 ansible_ssh_host=ip.*.*.109 + # 仅用于生成配置中的mds地址,curvebs服务端mds地址,curveadm status 可以查看 + [mds] + mds109 ansible_ssh_host=ip.*.*.109 + mds110 ansible_ssh_host=ip.*.*.110 + mds111 ansible_ssh_host=ip.*.*.111 + [client:vars] + nebd_package_version="1.0.2+e3fa47f" + nbd_package_version="" + sdk_package_version="0.0.6.1+160be351" + deploy_dir="${HOME}" + nebd_start_port=9000 + nebd_port_max_range=5 + nebd_need_sudo=true + client_config_path=/etc/curve/client.conf + nebd_client_config_path=/etc/nebd/nebd-client.conf + nebd_server_config_path=/etc/nebd/nebd-server.conf + nebd_data_dir=/data/nebd + nebd_log_dir=/data/log/nebd + curve_sdk_log_dir=/data/log/curve + py_client_config_path=/etc/curve/py_client.conf + clean_log_when_clean=true + curvetab_path=/etc/curve + curve_service_path=/etc/systemd/system + + [mds:vars] + mds_port=6666 + + [all:vars] + need_confirm=true + update_config_with_puppet=false + ansible_ssh_port=22 + lib_install_prefix=/usr/local + bin_install_prefix=/usr + #需要改成ssh + ansible_connection=ssh + wait_service_timeout=20 + curve_bin_dir=/usr/bin + start_by_daemon=true + install_with_deb=false + sudo_or_not=True + ansible_become_user=curve + ansible_become_flags=-iu curve +``` + +#### curvebs服务端安装 + +1. 安装 Nebd 服务和 NBD 包 + +```shell + $ cd /home/curve/curve/curve-ansible + $ ansible-playbook -i client.ini deploy_nebd.yml + $ ansible-playbook -i client.ini deploy_nbd.yml + $ ansible-playbook -i client.ini deploy_curve_sdk.yml +``` + +2. nebd配置修改和状态查看 + +```shell + # nebd配置路径 + $ cat /etc/curve/client.conf + # nebd服务查看 + $ systemctl status nebd-daemon.service + # nebd重启服务 + $ systemctl restart nebd-daemon.service +``` + +3. 验证curvebs集群是否可用 + + 请参考[curve卷命令](https://github.com/opencurve/curve/blob/master/docs/cn/k8s_csi_interface.md) + +```shell + # 创建卷 + $ curve create --filename /test --length 10 --user curve + # 挂载卷 + $ sudo curve-nbd map cbd:pool//test_curve_ + # 查看挂载状态 + $ curve-nbd list-mapped +``` +Curve-csi_Requirements + +### curve-csi 部署 + + 请参考[curve-csi使用说明](https://github.com/opencurve/curve-csi/blob/master/docs/README.md) + +### 总结 + + 本篇文章我们主要解决的问题是curve-csi创建卷,查询卷,挂载卷失败的问题。目前最新curvebs客户端的部署是用curveadm进行容器化部署, + 物理主机上没有curve curve-nebd命令和nebd相关的服务和配置,因此curve-csi部署的时候会产生报错。本文部署curvebs服务端依旧使用curveadm + 进行容器化部署,curvebs客户端使用curve源码中旧的部署方式ansible部署方式,可以解决客户端在物理机上的部署,部署只需修改client.ini配置 + 执行三个部署命令。 diff --git a/docs/practical/curvefs_rainbond.md b/docs/practical/curvefs_rainbond.md new file mode 100644 index 0000000000..873c052de4 --- /dev/null +++ b/docs/practical/curvefs_rainbond.md @@ -0,0 +1,281 @@ +# 在 Rainbond 上使用 Curve 云原生存储 + +本文介绍如何在 Rainbond 云原生应用管理平台上使用 Curve 云原生存储。 + +## 部署 Rainbond + +[Rainbond](https://www.rainbond.com/) 是一个云原生应用管理平台,使用简单,不需要懂容器、Kubernetes和底层复杂技术,支持管理多个Kubernetes集群,和管理企业应用全生命周期。 + +可以通过一条命令快速安装 Rainbond 单机版。 + +```bash +curl -o install.sh https://get.rainbond.com && bash ./install.sh +``` + +执行完上述脚本后,耐心等待 3-5 分钟,可以看到如下日志输出,表示 Rainbond 已启动完成。 + +```bash +INFO: Rainbond started successfully, Please pass http://$EIP:7070 Access Rainbond +``` + +## 使用 CurveAdm 部署 CurveFS + +### 安装 CurveAdm + +```bash +bash -c "$(curl -fsSL https://curveadm.nos-eastchina1.126.net/script/install.sh)" +``` + +### 配置主机列表 + +#### 配置免密登陆 + +生成密钥并配置服务器免密登陆 + +```bash +# 一直回车即可 +ssh-keygen + +# 使用 ssh-copy-id 配置 +ssh-copy-id root@172.31.98.243 + +# 验证免密 +ssh root@172.31.98.243 + +# 无需输入密码登陆成功即可 +``` +#### 导入主机列表 + +准备主机列表文件 `hosts.yaml` + +```yaml +$ vim hosts.yaml + +global: + user: root # ssh 免密登陆用户名 + ssh_port: 22 # ssh 端口 + private_key_file: /root/.ssh/id_rsa # 密钥路径 + +hosts: + - host: curve + hostname: 172.31.98.243 +``` + +导入主机列表 + +```bash +$ curveadm hosts commit hosts.yaml +``` + +查看主机列表 + +```bash +$ curveadm hosts ls +``` + +### 准备集群拓扑文件 + +CurveFS 支持单机部署和高可用部署,这里我们采用单机部署验证。 + +创建 `topology.yaml` 文件,只需修改 `target: curve`,其他都默认即可。 + +```yaml +$ vim topology.yaml + +kind: curvefs +global: + report_usage: true + data_dir: ${home}/curvefs/data/${service_role}${service_host_sequence} + log_dir: ${home}/curvefs/logs/${service_role}${service_host_sequence} + container_image: opencurvedocker/curvefs:v2.4 + variable: + home: /tmp + target: curve + +etcd_services: + config: + listen.ip: ${service_host} + listen.port: 2380${service_host_sequence} # 23800,23801,23802 + listen.client_port: 2379${service_host_sequence} # 23790,23791,23792 + deploy: + - host: ${target} + - host: ${target} + - host: ${target} + +mds_services: + config: + listen.ip: ${service_host} + listen.port: 670${service_host_sequence} # 6700,6701,6702 + listen.dummy_port: 770${service_host_sequence} # 7700,7701,7702 + deploy: + - host: ${target} + - host: ${target} + - host: ${target} + +metaserver_services: + config: + listen.ip: ${service_host} + listen.port: 680${service_host_sequence} # 6800,6801,6802 + listen.external_port: 780${service_host_sequence} # 7800,7801,7802 + global.enable_external_server: true + metaserver.loglevel: 0 + braft.raft_sync: false + deploy: + - host: ${target} + - host: ${target} + - host: ${target} + config: + metaserver.loglevel: 0 +``` + +### 部署集群 + +添加 `my-cluster` 集群,并指定集群拓扑文件 + +```bash +curveadm cluster add my-cluster -f topology.yaml +``` + +切换 `my-cluster` 集群为当前管理集群 + +```bash +curveadm cluster checkout my-cluster +``` + +开始部署集群 + +```bash +$ curveadm deploy +...... +Cluster 'my-cluster' successfully deployed ^_^. +``` + +终端出现 `Cluster 'my-cluster' successfully deployed ^_^.` 即部署成功。 + +查看集群运行情况 + +```bash +$ curveadm status +Get Service Status: [OK] + +cluster name : my-cluster +cluster kind : curvefs +cluster mds addr : 192.168.3.81:6700,192.168.3.81:6701,192.168.3.81:6702 +cluster mds leader: 192.168.3.81:6702 / 7f5b7443c563 + +Id Role Host Replicas Container Id Status +-- ---- ---- -------- ------------ ------ +6ae9ac1ae448 etcd curve 1/1 d3ecb4e81318 Up 17 minutes +c45e2f0b9266 etcd curve 1/1 8ce9befa54b8 Up 17 minutes +6c6bde442a04 etcd curve 1/1 cbf093c6605f Up 17 minutes +9516d8f5d9ae mds curve 1/1 f338ec63c493 Up 17 minutes +fe2bf5d8a072 mds curve 1/1 b423c3351256 Up 17 minutes +7f5b7443c563 mds curve 1/1 7ad99cee6b61 Up 17 minutes +e6fe68d23220 metaserver curve 1/1 d4a8662d4ed2 Up 17 minutes +b2b4dbabd7bf metaserver curve 1/1 65d7475e0bc4 Up 17 minutes +426ac76e28f9 metaserver curve 1/1 f413efeeb5c9 Up 17 minutes +``` + +## 通过 Rainbond 部署 MinIO 集群 + +由于目前 CurveFS 只支持 S3 作为后端存储,CurveBS 后端即将支持。 所以我们需要部署一个 MinIO 对象存储。 + +通过 Rainbond 开源应用商店一键部署单机版 MinIO 或者集群版 MinIO。进入到 Rainbond 的 **平台管理 -> 应用市场**,在开源应用商店中搜索 `minio` 进行一键安装。 + +![](https://static.goodrain.com/wechat/curve/2.png) + +部署完成后,通过 Rainbond 提供的域名访问 MinIO 控制台,默认用户密码 `minio/minio123456`。然后需要创建一个 Bucket 供 CurveFS 使用。 + +## 在 Rainbond 上部署 CurveFS-CSI + +* 前提:Rainbond 版本要在 v5.13+ + +通过 Rainbond 开源应用商店一键部署,进入到 Rainbond 的 **平台管理 -> 应用市场**,在开源应用商店中搜索 `curve-csi` 进行一键安装。 + +![](https://static.goodrain.com/wechat/curve/3.png) + +由于 CurveFS-CSI 没有 Rainbond 应用模型类的组件,都属于 k8s 资源类型,可在 **应用视图内 -> k8s资源** 下看到。 + +![](https://static.goodrain.com/wechat/curve/4.png) + +安装完成后,需要修改 `curvefs-csi-cluster-role-binding` 和 `curvefs-csi-role-binding` 的 namespace 为当前团队的 namespace,如当前团队 namespace 为 `dev`,如下: + +```yaml +# curvefs-csi-role-binding +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: curvefs-csi-role-binding +...... +subjects: +- kind: ServiceAccount + name: curvefs-csi-service-account + namespace: dev # changed + +# curvefs-csi-cluster-role-binding +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: curvefs-csi-cluster-role-binding +...... +subjects: +- kind: ServiceAccount + name: curvefs-csi-service-account + namespace: dev # changed +``` + +创建 `storageclass` 资源,同样在 **应用视图内 -> k8s资源 -> 添加**: + +```bash +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: curvefs-sc +provisioner: csi.curvefs.com +allowVolumeExpansion: false +reclaimPolicy: Delete +parameters: + mdsAddr: "172.31.98.243:6700,172.31.98.243:6701,172.31.98.243:6702" + fsType: "s3" + s3Endpoint: "http://9000.grda6567.1frt0lmq.b836cf.grapps.cn" + s3AccessKey: "minio" + s3SecretKey: "minio123456" + s3Bucket: "curve" +``` + +* mdsAddr:通过 `curveadm status` 命令获取。 + + ```bash + $ curveadm status + ...... + cluster mds addr : 172.31.98.243:6700,172.31.98.243:6701,172.31.98.243:6702 + ``` + +* s3Endpoint:填写 MinIO 组件的 9000 端口对外服务域名。 +* s3AccessKey:MinIO 访问 Key,填 root 用户或生成 AccessKey。 +* s3SecretKey:MinIO 密钥 Key,填 root 密码或生成 SecretKey。 +* s3Bucket:MinIO 桶名称。 + +![](https://static.goodrain.com/wechat/curve/5.png) + +## 在 Rainbond 上使用 CurveFS + +通过镜像创建一个 Nginx 组件,在 **组件 -> 其他设置** 修改组件部署类型为 `有状态服务`。在 Rainbond 上只有 有状态服务 可以使用自定义存储,无状态服务使用默认的共享存储。 + +![](https://static.goodrain.com/wechat/curve/6.png) + +进入到 **组件 -> 存储** 添加存储,选择类型为 `curvefs-sc`,保存并重启组件。 + +![](https://static.goodrain.com/wechat/curve/7.png) + +等待组件启动完成后,进入组件的 Web 终端内,测试写入数据。 + +![](https://static.goodrain.com/wechat/curve/8.png) + +然后进入到 MinIO 桶内查看,数据已写入。 + +![](https://static.goodrain.com/wechat/curve/9.png) + +## 未来规划 + +[Rainbond](https://www.rainbond.com/) 社区未来会使用 Curve 云原生存储作为 Rainbond 底层的共享存储,为用户提供更好、更简单的云原生应用管理平台和云原生存储,共同推进开源社区生态以及给用户提供一体化的解决方案。 diff --git a/include/client/libcurve.h b/include/client/libcurve.h index 890e4f6bdb..7431bb7544 100644 --- a/include/client/libcurve.h +++ b/include/client/libcurve.h @@ -126,20 +126,6 @@ int Create(const char* filename, const C_UserInfo_t* userinfo, size_t size); -/** - * create file with stripe - * @param: filename file name - * @param: userinfo user info - * @param: size file size - * @param: stripeUnit block in stripe size - * @param: stripeCount stripe count in one stripe - * - * @return: success return 0, fail return less than 0 - */ -int Create2(const char* filename, - const C_UserInfo_t* userinfo, - size_t size, uint64_t stripeUnit, uint64_t stripeCount); - /** * 同步模式读 * @param: fd为当前open返回的文件描述符 diff --git a/include/client/libcurve_define.h b/include/client/libcurve_define.h index f92d08f223..75dbd7b2a9 100644 --- a/include/client/libcurve_define.h +++ b/include/client/libcurve_define.h @@ -63,7 +63,7 @@ enum LIBCURVE_ERROR { // parameter error PARAM_ERROR = 16, // internal error - INTERNAL_ERROR = 17, + INTERNAL_ERROR = 17, // CRC error CRC_ERROR = 18, // parameter invalid diff --git a/include/curve_compiler_specific.h b/include/curve_compiler_specific.h index aa7219c504..ff45221bbe 100644 --- a/include/curve_compiler_specific.h +++ b/include/curve_compiler_specific.h @@ -26,20 +26,13 @@ #define CURVE_CACHELINE_SIZE 64 #define CURVE_CACHELINE_ALIGNMENT alignas(CURVE_CACHELINE_SIZE) -#if defined(COMPILER_GCC) -#if defined(__cplusplus) -#define CURVE_LIKELY(expr) \ - (__builtin_expect(static_cast(expr), true)) -#define CURVE_UNLIKELY(expr) \ - (__builtin_expect(static_cast(expr), false)) -#else +#if defined(__GNUC__) || defined(__clang__) #define CURVE_LIKELY(expr) (__builtin_expect(!!(expr), 1)) #define CURVE_UNLIKELY(expr) (__builtin_expect(!!(expr), 0)) -#endif #else #define CURVE_LIKELY(expr) (expr) #define CURVE_UNLIKELY(expr) (expr) -#endif // defined(COMPILER_GCC) +#endif // defined(__GNUC__) || defined(__clang__) #ifdef UNIT_TEST #define CURVE_MOCK virtual @@ -55,4 +48,6 @@ #define FALLTHROUGH_INTENDED ((void)0) #endif /* __GNUC__ >= 7 */ +#define CURVE_UNUSED __attribute__((__unused__)) + #endif // INCLUDE_CURVE_COMPILER_SPECIFIC_H_ diff --git a/mk-deb.sh b/mk-deb.sh old mode 100644 new mode 100755 index de0331e23c..9a448a6bce --- a/mk-deb.sh +++ b/mk-deb.sh @@ -16,37 +16,39 @@ # limitations under the License. # -dir=`pwd` -#step1 清除生成的目录和文件 +set -o errexit + +dir=$(pwd) + +# step1 清除生成的目录和文件 bazel clean -rm -rf curvefs_python/BUILD -rm -rf curvefs_python/tmplib/ -rm -rf curvesnapshot_python/BUILD -rm -rf curvesnapshot_python/tmplib/ -rm -rf *.deb -rm -rf *.whl -rm -rf build + +cleandir=( + curvefs_python/BUILD + curvefs_python/tmplib/ + curvesnapshot_python/BUILD + curvesnapshot_python/tmplib/ + *.deb + *.whl + *.tar.gz + build +) + +rm -rf "${cleandir[@]}" git submodule update --init -if [ $? -ne 0 ] -then - echo "submodule init failed" - exit -fi -#step2 获取tag版本和git提交版本信息 -#获取tag版本 -tag_version=`git status | grep -w "HEAD detached at" | awk '{print $NF}' | awk -F"v" '{print $2}'` -if [ -z ${tag_version} ] -then +# step2 获取tag版本和git提交版本信息 +# 获取tag版本 +tag_version=$(git status | grep -Ew "HEAD detached at|On branch" | awk '{print $NF}' | awk -F"v" '{print $2}') +if [ -z ${tag_version} ]; then echo "not found version info, set version to 9.9.9" tag_version=9.9.9 fi -#获取git提交版本信息 -commit_id=`git show --abbrev-commit HEAD|head -n 1|awk '{print $2}'` -if [ "$1" = "debug" ] -then +# 获取git提交版本信息 +commit_id=$(git rev-parse --short HEAD) +if [ "$1" = "debug" ]; then debug="+debug" else debug="" @@ -55,9 +57,9 @@ fi curve_version=${tag_version}+${commit_id}${debug} function create_python_wheel() { - PYTHON_VER=$(basename $1) - curdir=$(pwd) - basedir="build/curvefs_${PYTHON_VER}/" + local PYTHON_VER=$(basename $1) + local curdir=$(pwd) + local basedir="build/curvefs_${PYTHON_VER}/" mkdir -p ${basedir}/tmplib mkdir -p ${basedir}/curvefs @@ -93,9 +95,7 @@ function build_curvefs_python() { continue fi - bash ./curvefs_python/configure.sh $(basename ${bin}) - - if [ $? -ne 0 ]; then + if ! bash ./curvefs_python/configure.sh $(basename ${bin}); then echo "configure for ${bin} failed" continue fi @@ -108,13 +108,13 @@ function build_curvefs_python() { rm -rf ./bazel-bin/curvefs_python if [ "$1" = "release" ]; then - bazel build curvefs_python:curvefs --copt -DHAVE_ZLIB=1 --copt -O2 -s \ + bazel build curvefs_python:curvefs --copt -DHAVE_ZLIB=1 --copt -O2 \ --define=with_glog=true --define=libunwind=true --copt -DGFLAGS_NS=google \ --copt -Wno-error=format-security --copt -DUSE_BTHREAD_MUTEX --linkopt \ -L${dir}/curvefs_python/tmplib/ --copt -DCURVEVERSION=${curve_version} \ ${bazelflags} else - bazel build curvefs_python:curvefs --copt -DHAVE_ZLIB=1 --compilation_mode=dbg -s \ + bazel build curvefs_python:curvefs --copt -DHAVE_ZLIB=1 --compilation_mode=dbg \ --define=with_glog=true --define=libunwind=true --copt -DGFLAGS_NS=google \ --copt -Wno-error=format-security --copt -DUSE_BTHREAD_MUTEX --linkopt \ -L${dir}/curvefs_python/tmplib/ --copt -DCURVEVERSION=${curve_version} \ @@ -125,373 +125,161 @@ function build_curvefs_python() { done } -#step3 执行编译 -bazel_version=`bazel version | grep "Build label" | awk '{print $3}'` -if [ -z ${bazel_version} ] -then +# step3 执行编译 +bazel_version=$(bazel version | grep "Build label" | awk '{print $3}') +if [ -z ${bazel_version} ]; then echo "please install bazel 4.2.2 first" - exit + exit 1 fi -if [ ${bazel_version} != "4.2.2" ] -then - echo "bazel version must 4.2.2" - echo "now version is ${bazel_version}" - exit +if [ ${bazel_version} != "4.2.2" ]; then + echo "bazel version must be 4.2.2" + echo "current version is ${bazel_version}" + exit 1 fi echo "bazel version : ${bazel_version}" - # check gcc version, gcc version must >= 4.8.5 -gcc_version_major=`gcc -dumpversion | awk -F'.' '{print $1}'` -gcc_version_minor=`gcc -dumpversion | awk -F'.' '{print $2}'` -gcc_version_pathlevel=`gcc -dumpversion | awk -F'.' '{print $3}'` -if [ ${gcc_version_major} -lt 4 ] -then - echo "gcc version must >= 4.8.5, current version is "`gcc -dumpversion` - exit +gcc_version_major=$(gcc -dumpversion | awk -F'.' '{print $1}') +gcc_version_minor=$(gcc -dumpversion | awk -F'.' '{print $2}') +gcc_version_pathlevel=$(gcc -dumpversion | awk -F'.' '{print $3}') +if [ ${gcc_version_major} -lt 4 ]; then + echo "gcc version must >= 4.8.5, current version is "$(gcc -dumpversion) + exit 1 fi -if [[ ${gcc_version_major} -eq 4 ]] && [[ ${gcc_version_minor} -lt 8 ]] -then - echo "gcc version must >= 4.8.5, current version is "`gcc -dumpversion` - exit +if [[ ${gcc_version_major} -eq 4 ]] && [[ ${gcc_version_minor} -lt 8 ]]; then + echo "gcc version must >= 4.8.5, current version is "$(gcc -dumpversion) + exit 1 fi -if [[ ${gcc_version_major} -eq 4 ]] && [[ ${gcc_version_minor} -eq 8 ]] && [[ ${gcc_version_pathlevel} -lt 5 ]] -then - echo "gcc version must >= 4.8.5, current version is "`gcc -dumpversion` - exit +if [[ ${gcc_version_major} -eq 4 ]] && [[ ${gcc_version_minor} -eq 8 ]] && [[ ${gcc_version_pathlevel} -lt 5 ]]; then + echo "gcc version must >= 4.8.5, current version is "$(gcc -dumpversion) + exit 1 fi -echo "gcc version : "`gcc -dumpversion` -echo "start compile" +echo "gcc version : "$(gcc -dumpversion) -cd ${dir}/thirdparties/etcdclient -make clean -make all -if [ $? -ne 0 ] -then - echo "make etcd client failed" - exit -fi -cd ${dir} +echo "start compiling" + +cd ${dir}/thirdparties/etcdclient && + make clean && + make all && + cd $OLDPWD cp ${dir}/thirdparties/etcdclient/libetcdclient.h ${dir}/include/etcdclient/etcdclient.h -if [ `gcc -dumpversion | awk -F'.' '{print $1}'` -le 6 ] -then + +if [ $(gcc -dumpversion | awk -F'.' '{print $1}') -le 6 ]; then bazelflags='' else bazelflags='--copt -faligned-new' fi -if [ "$1" = "debug" ] -then -bazel build ... --copt -DHAVE_ZLIB=1 --compilation_mode=dbg -s --define=with_glog=true \ ---define=libunwind=true --copt -DGFLAGS_NS=google --copt \ --Wno-error=format-security --copt -DUSE_BTHREAD_MUTEX --copt -DCURVEVERSION=${curve_version} \ ---linkopt -L/usr/local/lib ${bazelflags} -if [ $? -ne 0 ] -then - echo "build phase1 failed" - exit -fi -bash ./curvefs_python/configure.sh python2 -if [ $? -ne 0 ] -then - echo "configure failed" - exit -fi -bazel build curvefs_python:curvefs --copt -DHAVE_ZLIB=1 --compilation_mode=dbg -s \ ---define=with_glog=true --define=libunwind=true --copt -DGFLAGS_NS=google \ ---copt \ --Wno-error=format-security --copt -DUSE_BTHREAD_MUTEX --linkopt \ --L${dir}/curvefs_python/tmplib/ --copt -DCURVEVERSION=${curve_version} \ ---linkopt -L/usr/local/lib ${bazelflags} -if [ $? -ne 0 ] -then - echo "build phase2 failed" - exit -fi +if [ "$1" = "debug" ]; then + make build stor=bs release=0 dep=1 only=src/* + + fail_count=0 + for python in "python2" "python3"; do + if ! bash ./curvefs_python/configure.sh ${python}; then + echo "configure ${python} failed" + let fail_count++ + fi + done + + if [[ $fail_count -ge 2 ]]; then + echo "configure python2/3 failed" + exit + fi + + bazel build curvefs_python:curvefs --copt -DHAVE_ZLIB=1 --compilation_mode=dbg \ + --define=with_glog=true --define=libunwind=true --copt -DGFLAGS_NS=google \ + --copt \ + -Wno-error=format-security --copt -DUSE_BTHREAD_MUTEX --linkopt \ + -L${dir}/curvefs_python/tmplib/ --copt -DCURVEVERSION=${curve_version} \ + --linkopt -L/usr/local/lib ${bazelflags} else -bazel build ... --copt -DHAVE_ZLIB=1 --copt -O2 -s --define=with_glog=true \ ---define=libunwind=true --copt -DGFLAGS_NS=google --copt \ --Wno-error=format-security --copt -DUSE_BTHREAD_MUTEX --copt -DCURVEVERSION=${curve_version} \ ---linkopt -L/usr/local/lib ${bazelflags} -if [ $? -ne 0 ] -then - echo "build phase1 failed" - exit -fi -bash ./curvefs_python/configure.sh python2 -if [ $? -ne 0 ] -then - echo "configure failed" - exit -fi -bazel build curvefs_python:curvefs --copt -DHAVE_ZLIB=1 --copt -O2 -s \ ---define=with_glog=true --define=libunwind=true --copt -DGFLAGS_NS=google \ ---copt \ --Wno-error=format-security --copt -DUSE_BTHREAD_MUTEX --linkopt \ --L${dir}/curvefs_python/tmplib/ --copt -DCURVEVERSION=${curve_version} \ ---linkopt -L/usr/local/lib ${bazelflags} -if [ $? -ne 0 ] -then - echo "build phase2 failed" - exit -fi + make build stor=bs release=1 dep=1 only=src/* + + fail_count=0 + for python in "python2" "python3"; do + if ! bash ./curvefs_python/configure.sh ${python}; then + echo "configure ${python} failed" + let fail_count++ + fi + done + + if [[ $fail_count -ge 2 ]]; then + echo "configure python2/3 failed" + exit + fi + + bazel build curvefs_python:curvefs --copt -DHAVE_ZLIB=1 --copt -O2 \ + --define=with_glog=true --define=libunwind=true --copt -DGFLAGS_NS=google \ + --copt \ + -Wno-error=format-security --copt -DUSE_BTHREAD_MUTEX --linkopt \ + -L${dir}/curvefs_python/tmplib/ --copt -DCURVEVERSION=${curve_version} \ + --linkopt -L/usr/local/lib ${bazelflags} fi echo "end compile" #step4 创建临时目录,拷贝二进制、lib库和配置模板 mkdir build -if [ $? -ne 0 ] -then - exit -fi cp -r curve-mds build/ -if [ $? -ne 0 ] -then - exit -fi cp -r curve-chunkserver build/ -if [ $? -ne 0 ] -then - exit -fi cp -r curve-sdk build/ -if [ $? -ne 0 ] -then - exit -fi cp -r curve-tools build/ -if [ $? -ne 0 ] -then - exit -fi cp -r curve-monitor build/ -if [ $? -ne 0 ] -then - exit -fi cp -r curve-snapshotcloneserver build/ -if [ $? -ne 0 ] -then - exit -fi cp -r curve-nginx build/ -if [ $? -ne 0 ] -then - exit -fi mkdir -p build/curve-mds/usr/bin -if [ $? -ne 0 ] -then - exit -fi + mkdir -p build/curve-mds/etc/curve -if [ $? -ne 0 ] -then - exit -fi mkdir -p build/curve-mds/usr/lib -if [ $? -ne 0 ] -then - exit -fi mkdir -p build/curve-tools/usr/bin -if [ $? -ne 0 ] -then - exit -fi cp ./bazel-bin/src/mds/main/curvemds build/curve-mds/usr/bin/curve-mds -if [ $? -ne 0 ] -then - exit -fi -#cp ./bazel-bin/src/tools/curve_status_tool \ -#build/curve-mds/usr/bin/curve_status_tool -#if [ $? -ne 0 ] -#then -# exit -#fi cp thirdparties/etcdclient/libetcdclient.so \ -build/curve-mds/usr/lib/libetcdclient.so -if [ $? -ne 0 ] -then - exit -fi -#cp ./conf/mds.conf build/curve-mds/etc/curve/mds.conf -#if [ $? -ne 0 ] -#then -# exit -#fi + build/curve-mds/usr/lib/libetcdclient.so cp ./bazel-bin/tools/curvefsTool build/curve-mds/usr/bin/curve-tool -if [ $? -ne 0 ] -then - exit -fi cp -r tools/snaptool build/curve-tools/usr/bin/snaptool-lib cp tools/snaptool/snaptool build/curve-tools/usr/bin/snaptool chmod a+x build/curve-tools/usr/bin/snaptool -if [ $? -ne 0 ] -then - exit -fi cp ./bazel-bin/src/tools/curve_tool \ -build/curve-tools/usr/bin/curve_ops_tool -if [ $? -ne 0 ] -then - exit -fi + build/curve-tools/usr/bin/curve_ops_tool mkdir -p build/curve-chunkserver/usr/bin -if [ $? -ne 0 ] -then - exit -fi mkdir -p build/curve-chunkserver/etc/curve -if [ $? -ne 0 ] -then - exit -fi cp ./bazel-bin/src/chunkserver/chunkserver \ -build/curve-chunkserver/usr/bin/curve-chunkserver -if [ $? -ne 0 ] -then - exit -fi + build/curve-chunkserver/usr/bin/curve-chunkserver cp ./bazel-bin/src/tools/curve_chunkserver_tool \ -build/curve-chunkserver/usr/bin/curve_chunkserver_tool -if [ $? -ne 0 ] -then - exit -fi -#cp ./conf/chunkserver.conf.example \ -#build/curve-chunkserver/etc/curve/chunkserver.conf -#if [ $? -ne 0 ] -#then -# exit -#fi -#cp ./conf/s3.conf build/curve-chunkserver/etc/curve/s3.conf -#if [ $? -ne 0 ] -#then -# exit -#fi + build/curve-chunkserver/usr/bin/curve_chunkserver_tool cp ./bazel-bin/src/tools/curve_format \ -build/curve-chunkserver/usr/bin/curve-format -if [ $? -ne 0 ] -then - exit -fi + build/curve-chunkserver/usr/bin/curve-format mkdir -p build/curve-sdk/usr/curvefs -if [ $? -ne 0 ] -then - exit -fi mkdir -p build/curve-sdk/usr/bin -if [ $? -ne 0 ] -then - exit -fi mkdir -p build/curve-sdk/etc/curve -if [ $? -ne 0 ] -then - exit -fi mkdir -p build/curve-sdk/usr/lib -if [ $? -ne 0 ] -then - exit -fi mkdir -p build/curve-sdk/usr/include -if [ $? -ne 0 ] -then - exit -fi cp ./bazel-bin/curvefs_python/libcurvefs.so \ -build/curve-sdk/usr/curvefs/_curvefs.so -if [ $? -ne 0 ] -then - exit -fi + build/curve-sdk/usr/curvefs/_curvefs.so cp curvefs_python/curvefs.py build/curve-sdk/usr/curvefs/curvefs.py -if [ $? -ne 0 ] -then - exit -fi cp curvefs_python/__init__.py build/curve-sdk/usr/curvefs/__init__.py -if [ $? -ne 0 ] -then - exit -fi cp curvefs_python/curvefs_tool.py build/curve-sdk/usr/curvefs/curvefs_tool.py -if [ $? -ne 0 ] -then - exit -fi cp curvefs_python/parser.py build/curve-sdk/usr/curvefs/parser.py -if [ $? -ne 0 ] -then - exit -fi cp curvefs_python/curve build/curve-sdk/usr/bin/curve -if [ $? -ne 0 ] -then - exit -fi chmod a+x build/curve-sdk/usr/bin/curve cp curvefs_python/tmplib/* build/curve-sdk/usr/lib/ -if [ $? -ne 0 ] -then - exit -fi -cp ./bazel-bin/src/client/libcurve.so build/curve-sdk/usr/lib cp include/client/libcurve.h build/curve-sdk/usr/include cp include/client/libcbd.h build/curve-sdk/usr/include cp include/client/libcurve_define.h build/curve-sdk/usr/include -if [ $? -ne 0 ] -then - exit -fi -#cp ./conf/client.conf build/curve-sdk/etc/curve/client.conf -#if [ $? -ne 0 ] -#then -# exit -#fi mkdir -p build/curve-monitor/etc/curve/monitor -if [ $? -ne 0 ] -then - exit -fi cp -r monitor/* build/curve-monitor/etc/curve/monitor -if [ $? -ne 0 ] -then - exit -fi mkdir -p build/curve-snapshotcloneserver/usr/bin -if [ $? -ne 0 ] -then - exit -fi cp ./bazel-bin/src/snapshotcloneserver/snapshotcloneserver \ -build/curve-snapshotcloneserver/usr/bin/curve-snapshotcloneserver -if [ $? -ne 0 ] -then - exit -fi + build/curve-snapshotcloneserver/usr/bin/curve-snapshotcloneserver mkdir -p build/curve-nginx/etc/curve/nginx/app/etc -if [ $? -ne 0 ] -then - exit -fi mkdir -p build/curve-nginx/etc/curve/nginx/conf -if [ $? -ne 0 ] -then - exit -fi # step 4.1 prepare for nebd-package cp -r nebd/nebd-package build/ mkdir -p build/nebd-package/usr/include/nebd @@ -505,8 +293,7 @@ cp -r k8s/nebd/nebd-package build/k8s-nebd-package mkdir -p build/k8s-nebd-package/usr/bin mkdir -p build/k8s-nebd-package/usr/lib/nebd -for i in `find bazel-bin/|grep -w so|grep -v solib|grep -v params|grep -v test|grep -v fake` -do +for i in $(find bazel-bin/ | grep -w so | grep -v solib | grep -v params | grep -v test | grep -v fake); do cp -f $i build/nebd-package/usr/lib/nebd cp -f $i build/k8s-nebd-package/usr/lib/nebd done @@ -524,19 +311,19 @@ cp -r k8s/nbd/nbd-package build/k8s-nbd-package mkdir -p build/k8s-nbd-package/usr/bin cp bazel-bin/nbd/src/curve-nbd build/k8s-nbd-package/usr/bin -#step5 记录到debian包的配置文件,打包debian包 +# step5 记录到debian包的配置文件,打包debian包 version="Version: ${curve_version}" -echo ${version} >> build/curve-mds/DEBIAN/control -echo ${version} >> build/curve-sdk/DEBIAN/control -echo ${version} >> build/curve-chunkserver/DEBIAN/control -echo ${version} >> build/curve-tools/DEBIAN/control -echo ${version} >> build/curve-monitor/DEBIAN/control -echo ${version} >> build/curve-snapshotcloneserver/DEBIAN/control -echo ${version} >> build/curve-nginx/DEBIAN/control -echo ${version} >> build/nebd-package/DEBIAN/control -echo ${version} >> build/k8s-nebd-package/DEBIAN/control -echo ${version} >> build/nbd-package/DEBIAN/control -echo ${version} >> build/k8s-nbd-package/DEBIAN/control +echo ${version} >>build/curve-mds/DEBIAN/control +echo ${version} >>build/curve-sdk/DEBIAN/control +echo ${version} >>build/curve-chunkserver/DEBIAN/control +echo ${version} >>build/curve-tools/DEBIAN/control +echo ${version} >>build/curve-monitor/DEBIAN/control +echo ${version} >>build/curve-snapshotcloneserver/DEBIAN/control +echo ${version} >>build/curve-nginx/DEBIAN/control +echo ${version} >>build/nebd-package/DEBIAN/control +echo ${version} >>build/k8s-nebd-package/DEBIAN/control +echo ${version} >>build/nbd-package/DEBIAN/control +echo ${version} >>build/k8s-nbd-package/DEBIAN/control dpkg-deb -b build/curve-mds . dpkg-deb -b build/curve-sdk . @@ -550,7 +337,7 @@ dpkg-deb -b build/k8s-nebd-package . dpkg-deb -b build/nbd-package . dpkg-deb -b build/k8s-nbd-package . -#step6 清理libetcdclient.so编译出现的临时文件 +# step6 清理libetcdclient.so编译出现的临时文件 cd ${dir}/thirdparties/etcdclient make clean cd ${dir} diff --git a/mk-tar.sh b/mk-tar.sh old mode 100644 new mode 100755 index d16aef94ac..0d646683bf --- a/mk-tar.sh +++ b/mk-tar.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # # Copyright (c) 2020 NetEase Inc. @@ -16,37 +16,39 @@ # limitations under the License. # -dir=`pwd` -#step1 清除生成的目录和文件 +set -o errexit + +dir=$(pwd) + +# step1 清除生成的目录和文件 bazel clean -rm -rf curvefs_python/BUILD -rm -rf curvefs_python/tmplib/ -rm -rf curvesnapshot_python/BUILD -rm -rf curvesnapshot_python/tmplib/ -rm -rf *.whl -rm -rf *.tar.gz -rm -rf build + +cleandir=( + curvefs_python/BUILD + curvefs_python/tmplib/ + curvesnapshot_python/BUILD + curvesnapshot_python/tmplib/ + *.deb + *.whl + *.tar.gz + build +) + +rm -rf "${cleandir[@]}" git submodule update --init -if [ $? -ne 0 ] -then - echo "submodule init failed" - exit -fi -#step2 获取tag版本和git提交版本信息 -#获取tag版本 -tag_version=`git status | grep -Ew "HEAD detached at|On branch" | awk '{print $NF}' | awk -F"v" '{print $2}'` -if [ -z ${tag_version} ] -then +# step2 获取tag版本和git提交版本信息 +# 获取tag版本 +tag_version=$(git status | grep -Ew "HEAD detached at|On branch" | awk '{print $NF}' | awk -F"v" '{print $2}') +if [ -z ${tag_version} ]; then echo "not found version info, set version to 9.9.9" tag_version=9.9.9 fi -#获取git提交版本信息 -commit_id=`git show --abbrev-commit HEAD|head -n 1|awk '{print $2}'` -if [ "$1" = "debug" ] -then +# 获取git提交版本信息 +commit_id=$(git rev-parse --short HEAD) +if [ "$1" = "debug" ]; then debug="+debug" else debug="" @@ -55,9 +57,9 @@ fi curve_version=${tag_version}+${commit_id}${debug} function create_python_wheel() { - PYTHON_VER=$(basename $1) - curdir=$(pwd) - basedir="build/curvefs_${PYTHON_VER}/" + local PYTHON_VER=$(basename $1) + local curdir=$(pwd) + local basedir="build/curvefs_${PYTHON_VER}/" mkdir -p ${basedir}/tmplib mkdir -p ${basedir}/curvefs @@ -93,9 +95,7 @@ function build_curvefs_python() { continue fi - bash ./curvefs_python/configure.sh $(basename ${bin}) - - if [ $? -ne 0 ]; then + if ! bash ./curvefs_python/configure.sh $(basename ${bin}); then echo "configure for ${bin} failed" continue fi @@ -125,310 +125,160 @@ function build_curvefs_python() { done } -#step3 执行编译 -bazel_version=`bazel version | grep "Build label" | awk '{print $3}'` -if [ -z ${bazel_version} ] -then +# step3 执行编译 +bazel_version=$(bazel version | grep "Build label" | awk '{print $3}') +if [ -z ${bazel_version} ]; then echo "please install bazel 4.2.2 first" - exit + exit 1 fi -if [ ${bazel_version} != "4.2.2" ] -then - echo "bazel version must 4.2.2" - echo "now version is ${bazel_version}" - exit +if [ ${bazel_version} != "4.2.2" ]; then + echo "bazel version must be 4.2.2" + echo "current version is ${bazel_version}" + exit 1 fi echo "bazel version : ${bazel_version}" # check gcc version, gcc version must >= 4.8.5 -gcc_version_major=`gcc -dumpversion | awk -F'.' '{print $1}'` -gcc_version_minor=`gcc -dumpversion | awk -F'.' '{print $2}'` -gcc_version_pathlevel=`gcc -dumpversion | awk -F'.' '{print $3}'` -if [ ${gcc_version_major} -lt 4 ] -then - echo "gcc version must >= 4.8.5, current version is "`gcc -dumpversion` - exit +gcc_version_major=$(gcc -dumpversion | awk -F'.' '{print $1}') +gcc_version_minor=$(gcc -dumpversion | awk -F'.' '{print $2}') +gcc_version_pathlevel=$(gcc -dumpversion | awk -F'.' '{print $3}') +if [ ${gcc_version_major} -lt 4 ]; then + echo "gcc version must >= 4.8.5, current version is "$(gcc -dumpversion) + exit 1 fi -if [[ ${gcc_version_major} -eq 4 ]] && [[ ${gcc_version_minor} -lt 8 ]] -then - echo "gcc version must >= 4.8.5, current version is "`gcc -dumpversion` - exit +if [[ ${gcc_version_major} -eq 4 ]] && [[ ${gcc_version_minor} -lt 8 ]]; then + echo "gcc version must >= 4.8.5, current version is "$(gcc -dumpversion) + exit 1 fi -if [[ ${gcc_version_major} -eq 4 ]] && [[ ${gcc_version_minor} -eq 8 ]] && [[ ${gcc_version_pathlevel} -lt 5 ]] -then - echo "gcc version must >= 4.8.5, current version is "`gcc -dumpversion` - exit +if [[ ${gcc_version_major} -eq 4 ]] && [[ ${gcc_version_minor} -eq 8 ]] && [[ ${gcc_version_pathlevel} -lt 5 ]]; then + echo "gcc version must >= 4.8.5, current version is "$(gcc -dumpversion) + exit 1 fi -echo "gcc version : "`gcc -dumpversion` +echo "gcc version : "$(gcc -dumpversion) -echo "start compile" -cd ${dir}/thirdparties/etcdclient -make clean -make all -if [ $? -ne 0 ] -then - echo "make etcd client failed" - exit -fi -cd ${dir} +echo "start compiling" + +cd ${dir}/thirdparties/etcdclient && + make clean && + make all && + cd $OLDPWD cp ${dir}/thirdparties/etcdclient/libetcdclient.h ${dir}/include/etcdclient/etcdclient.h -if [ `gcc -dumpversion | awk -F'.' '{print $1}'` -le 6 ] -then +if [ $(gcc -dumpversion | awk -F'.' '{print $1}') -le 6 ]; then bazelflags='' else bazelflags='--copt -faligned-new' fi -if [ "$1" = "debug" ] -then -bazel build ... --copt -DHAVE_ZLIB=1 --compilation_mode=dbg -s --define=with_glog=true \ ---define=libunwind=true --copt -DGFLAGS_NS=google --copt \ --Wno-error=format-security --copt -DUSE_BTHREAD_MUTEX --copt -DCURVEVERSION=${curve_version} \ ---linkopt -L/usr/local/lib ${bazelflags} -if [ $? -ne 0 ] -then - echo "build phase1 failed" - exit -fi -bash ./curvefs_python/configure.sh python2 -if [ $? -ne 0 ] -then - echo "configure failed" - exit -fi -bazel build curvefs_python:curvefs --copt -DHAVE_ZLIB=1 --compilation_mode=dbg -s \ ---define=with_glog=true --define=libunwind=true --copt -DGFLAGS_NS=google \ ---copt \ --Wno-error=format-security --copt -DUSE_BTHREAD_MUTEX --linkopt \ --L${dir}/curvefs_python/tmplib/ --copt -DCURVEVERSION=${curve_version} \ ---linkopt -L/usr/local/lib ${bazelflags} -if [ $? -ne 0 ] -then - echo "build phase2 failed" - exit -fi +if [ "$1" = "debug" ]; then + make build stor=bs release=0 dep=1 only=src/* + + fail_count=0 + for python in "python2" "python3"; do + if ! bash ./curvefs_python/configure.sh ${python}; then + echo "configure ${python} failed" + let fail_count++ + fi + done + + if [[ $fail_count -ge 2 ]]; then + echo "configure python2/3 failed" + exit + fi + + bazel build curvefs_python:curvefs --copt -DHAVE_ZLIB=1 --compilation_mode=dbg -s \ + --define=with_glog=true --define=libunwind=true --copt -DGFLAGS_NS=google \ + --copt \ + -Wno-error=format-security --copt -DUSE_BTHREAD_MUTEX --linkopt \ + -L${dir}/curvefs_python/tmplib/ --copt -DCURVEVERSION=${curve_version} \ + --linkopt -L/usr/local/lib ${bazelflags} else -bazel build ... --copt -DHAVE_ZLIB=1 --copt -O2 -s --define=with_glog=true \ ---define=libunwind=true --copt -DGFLAGS_NS=google --copt \ --Wno-error=format-security --copt -DUSE_BTHREAD_MUTEX --copt -DCURVEVERSION=${curve_version} \ ---linkopt -L/usr/local/lib ${bazelflags} -if [ $? -ne 0 ] -then - echo "build phase1 failed" - exit -fi -bash ./curvefs_python/configure.sh python2 -if [ $? -ne 0 ] -then - echo "configure failed" - exit -fi -bazel build curvefs_python:curvefs --copt -DHAVE_ZLIB=1 --copt -O2 -s \ ---define=with_glog=true --define=libunwind=true --copt -DGFLAGS_NS=google \ ---copt \ --Wno-error=format-security --copt -DUSE_BTHREAD_MUTEX --linkopt \ --L${dir}/curvefs_python/tmplib/ --copt -DCURVEVERSION=${curve_version} \ ---linkopt -L/usr/local/lib ${bazelflags} -if [ $? -ne 0 ] -then - echo "build phase2 failed" - exit -fi + make build stor=bs release=1 dep=1 only=src/* + + fail_count=0 + for python in "python2" "python3"; do + if ! bash ./curvefs_python/configure.sh ${python}; then + echo "configure ${python} failed" + let fail_count++ + fi + done + + if [[ $fail_count -ge 2 ]]; then + echo "configure python2/3 failed" + exit + fi + + bazel build curvefs_python:curvefs --copt -DHAVE_ZLIB=1 --copt -O2 -s \ + --define=with_glog=true --define=libunwind=true --copt -DGFLAGS_NS=google \ + --copt \ + -Wno-error=format-security --copt -DUSE_BTHREAD_MUTEX --linkopt \ + -L${dir}/curvefs_python/tmplib/ --copt -DCURVEVERSION=${curve_version} \ + --linkopt -L/usr/local/lib ${bazelflags} fi echo "end compile" #step4 创建临时目录,拷贝二进制、lib库和配置模板 echo "start copy" mkdir -p build/curve/ -if [ $? -ne 0 ] -then - exit -fi # curve-mds mkdir -p build/curve/curve-mds/bin -if [ $? -ne 0 ] -then - exit -fi mkdir -p build/curve/curve-mds/lib -if [ $? -ne 0 ] -then - exit -fi cp ./bazel-bin/src/mds/main/curvemds build/curve/curve-mds/bin/curve-mds -if [ $? -ne 0 ] -then - exit -fi cp thirdparties/etcdclient/libetcdclient.so \ -build/curve/curve-mds/lib/libetcdclient.so -if [ $? -ne 0 ] -then - exit -fi + build/curve/curve-mds/lib/libetcdclient.so cp ./bazel-bin/tools/curvefsTool build/curve/curve-mds/bin/curve-tool -if [ $? -ne 0 ] -then - exit -fi # curve-tools mkdir -p build/curve/curve-tools/bin -if [ $? -ne 0 ] -then - exit -fi cp ./bazel-bin/src/tools/curve_tool \ -build/curve/curve-tools/bin/curve_ops_tool -if [ $? -ne 0 ] -then - exit -fi + build/curve/curve-tools/bin/curve_ops_tool cp -r tools/snaptool build/curve/curve-tools/bin/snaptool-lib cp tools/snaptool/snaptool build/curve/curve-tools/bin/snaptool chmod a+x build/curve/curve-tools/bin/snaptool -if [ $? -ne 0 ] -then - exit -fi # curve-chunkserver mkdir -p build/curve/curve-chunkserver/bin -if [ $? -ne 0 ] -then - exit -fi cp ./bazel-bin/src/chunkserver/chunkserver \ -build/curve/curve-chunkserver/bin/curve-chunkserver -if [ $? -ne 0 ] -then - exit -fi + build/curve/curve-chunkserver/bin/curve-chunkserver cp ./bazel-bin/src/tools/curve_chunkserver_tool \ -build/curve/curve-chunkserver/bin/curve_chunkserver_tool -if [ $? -ne 0 ] -then - exit -fi + build/curve/curve-chunkserver/bin/curve_chunkserver_tool cp ./bazel-bin/src/tools/curve_format \ -build/curve/curve-chunkserver/bin/curve-format -if [ $? -ne 0 ] -then - exit -fi + build/curve/curve-chunkserver/bin/curve-format # curve-sdk mkdir -p build/curve/curve-sdk/curvefs -if [ $? -ne 0 ] -then - exit -fi mkdir -p build/curve/curve-sdk/bin -if [ $? -ne 0 ] -then - exit -fi mkdir -p build/curve/curve-sdk/lib -if [ $? -ne 0 ] -then - exit -fi mkdir -p build/curve/curve-sdk/include -if [ $? -ne 0 ] -then - exit -fi cp ./bazel-bin/curvefs_python/libcurvefs.so \ -build/curve/curve-sdk/curvefs/_curvefs.so -if [ $? -ne 0 ] -then - exit -fi + build/curve/curve-sdk/curvefs/_curvefs.so cp curvefs_python/curvefs.py build/curve/curve-sdk/curvefs/curvefs.py -if [ $? -ne 0 ] -then - exit -fi cp curvefs_python/__init__.py build/curve/curve-sdk/curvefs/__init__.py -if [ $? -ne 0 ] -then - exit -fi cp curvefs_python/curvefs_tool.py build/curve/curve-sdk/curvefs/curvefs_tool.py -if [ $? -ne 0 ] -then - exit -fi cp curvefs_python/parser.py build/curve/curve-sdk/curvefs/parser.py -if [ $? -ne 0 ] -then - exit -fi cp curvefs_python/curve build/curve/curve-sdk/bin/curve -if [ $? -ne 0 ] -then - exit -fi chmod a+x build/curve/curve-sdk/bin/curve cp curvefs_python/tmplib/* build/curve/curve-sdk/lib/ -if [ $? -ne 0 ] -then - exit -fi -cp ./bazel-bin/src/client/libcurve.so build/curve/curve-sdk/lib/ cp include/client/libcurve.h build/curve/curve-sdk/include cp include/client/libcbd.h build/curve/curve-sdk/include cp include/client/libcurve_define.h build/curve/curve-sdk/include -if [ $? -ne 0 ] -then - exit -fi # curve-snapshotcloneserver mkdir -p build/curve/curve-snapshotcloneserver/bin -if [ $? -ne 0 ] -then - exit -fi cp ./bazel-bin/src/snapshotcloneserver/snapshotcloneserver \ -build/curve/curve-snapshotcloneserver/bin/curve-snapshotcloneserver -if [ $? -ne 0 ] -then - exit -fi + build/curve/curve-snapshotcloneserver/bin/curve-snapshotcloneserver mkdir -p build/curve/curve-snapshotcloneserver/lib cp thirdparties/etcdclient/libetcdclient.so \ -build/curve/curve-snapshotcloneserver/lib/libetcdclient.so -if [ $? -ne 0 ] -then - exit -fi + build/curve/curve-snapshotcloneserver/lib/libetcdclient.so # curve-nginx mkdir -p build/curve/curve-nginx/app/etc -if [ $? -ne 0 ] -then - exit -fi mkdir -p build/curve/curve-nginx/conf -if [ $? -ne 0 ] -then - exit -fi # ansible cp -r curve-ansible build/curve/ -if [ $? -ne 0 ] -then - exit -fi # README # curve-monitor mkdir -p build/curve-monitor cp -r monitor/* build/curve-monitor/ -if [ $? -ne 0 ] -then - exit -fi echo "end copy" # step 4.1 prepare for nebd-package @@ -436,8 +286,7 @@ mkdir -p build/nebd-package/include/nebd mkdir -p build/nebd-package/bin mkdir -p build/nebd-package/lib/nebd -for i in `find bazel-bin/|grep -w so|grep -v solib|grep -v params|grep -v test|grep -v fake` -do +for i in $(find bazel-bin/ | grep -w so | grep -v solib | grep -v params | grep -v test | grep -v fake); do cp -f $i build/nebd-package/lib/nebd done @@ -452,35 +301,35 @@ cp nbd/nbd-package/usr/bin/map_curve_disk.sh build/nbd-package/bin cp nbd/nbd-package/etc/curve/curvetab build/nbd-package/etc cp nbd/nbd-package/etc/systemd/system/map_curve_disk.service build/nbd-package/etc -#step5 打包tar包 +# step5 打包tar包 echo "start make tarball" cd ${dir}/build curve_name="curve_${curve_version}.tar.gz" echo "curve_name: ${curve_name}" -tar zcvf ${curve_name} curve +tar zcf ${curve_name} curve cp ${curve_name} $dir monitor_name="curve-monitor_${curve_version}.tar.gz" -echo "curve_name: ${curve_name}" -tar zcvf ${monitor_name} curve-monitor +echo "monitor_name: ${monitor_name}" +tar zcf ${monitor_name} curve-monitor cp ${monitor_name} $dir nebd_name="nebd_${curve_version}.tar.gz" echo "nebd_name: ${nebd_name}" -tar zcvf ${nebd_name} nebd-package +tar zcf ${nebd_name} nebd-package cp ${nebd_name} $dir nbd_name="nbd_${curve_version}.tar.gz" echo "nbd_name: ${nbd_name}" -tar zcvf ${nbd_name} nbd-package +tar zcf ${nbd_name} nbd-package cp ${nbd_name} $dir echo "end make tarball" -#step6 清理libetcdclient.so编译出现的临时文件 +# step6 清理libetcdclient.so编译出现的临时文件 echo "start clean etcd" cd ${dir}/thirdparties/etcdclient make clean cd ${dir} echo "end clean etcd" -# step7 打包python whell -echo "start make python whell" +# step7 打包python wheel +echo "start make python wheel" build_curvefs_python $1 -echo "end make python whell" +echo "end make python wheel" diff --git a/nebd/src/common/stringstatus.cpp b/nebd/src/common/stringstatus.cpp index ab18f72c20..12e9806708 100644 --- a/nebd/src/common/stringstatus.cpp +++ b/nebd/src/common/stringstatus.cpp @@ -42,7 +42,7 @@ void StringStatus::Update() { int count = 0; for (auto &item : kvs_) { count += 1; - if (count == kvs_.size()) { + if (count == static_cast(kvs_.size())) { jsonStr += "\"" + item.first + "\"" + ":" + "\"" + item.second + "\""; } else { diff --git a/nebd/src/part1/libnebd.cpp b/nebd/src/part1/libnebd.cpp index 06898883cc..d9cb15d071 100644 --- a/nebd/src/part1/libnebd.cpp +++ b/nebd/src/part1/libnebd.cpp @@ -80,11 +80,19 @@ int nebd_lib_close(int fd) { } int nebd_lib_pread(int fd, void* buf, off_t offset, size_t length) { + (void)fd; + (void)buf; + (void)offset; + (void)length; // not support sync read return -1; } int nebd_lib_pwrite(int fd, const void* buf, off_t offset, size_t length) { + (void)fd; + (void)buf; + (void)offset; + (void)length; // not support sync write return -1; } @@ -102,6 +110,7 @@ int nebd_lib_aio_pwrite(int fd, NebdClientAioContext* context) { } int nebd_lib_sync(int fd) { + (void)fd; return 0; } diff --git a/nebd/src/part1/nebd_client.cpp b/nebd/src/part1/nebd_client.cpp index 734fee9480..a8d942a5b7 100644 --- a/nebd/src/part1/nebd_client.cpp +++ b/nebd/src/part1/nebd_client.cpp @@ -243,6 +243,7 @@ int NebdClient::Extend(int fd, int64_t newsize) { auto task = [&](brpc::Controller* cntl, brpc::Channel* channel, bool* rpcFailed) -> int64_t { + (void)channel; nebd::client::NebdFileService_Stub stub(&channel_); nebd::client::ResizeRequest request; nebd::client::ResizeResponse response; @@ -359,7 +360,9 @@ int NebdClient::AioRead(int fd, NebdClientAioContext* aioctx) { return 0; } -static void EmptyDeleter(void* m) {} +static void EmptyDeleter(void* m) { + (void)m; +} int NebdClient::AioWrite(int fd, NebdClientAioContext* aioctx) { auto task = [this, fd, aioctx]() { @@ -622,6 +625,7 @@ void NebdClient::InitLogger(const LogOption& logOption) { int NebdClient::ExecAsyncRpcTask(void* meta, bthread::TaskIterator& iter) { // NOLINT + (void)meta; if (iter.is_queue_stopped()) { return 0; } diff --git a/nebd/src/part2/file_entity.cpp b/nebd/src/part2/file_entity.cpp index 6bd655e18b..0899472c72 100644 --- a/nebd/src/part2/file_entity.cpp +++ b/nebd/src/part2/file_entity.cpp @@ -33,6 +33,8 @@ namespace nebd { namespace server { +extern const char* kOpenFlagsAttrKey; + bool IsOpenFlagsExactlySame(const OpenFlags* lhs, const OpenFlags* rhs) { if (lhs == nullptr && rhs == nullptr) { return true; @@ -145,6 +147,15 @@ int NebdFileEntity::Reopen(const ExtendAttribute& xattr) { << "filename: " << fileName_; return -1; } + + OpenFlags flags; + bool hasOpenFlags = + fileInstance->xattr.count(kOpenFlagsAttrKey) && + flags.ParseFromString(fileInstance->xattr.at(kOpenFlagsAttrKey)); + if (hasOpenFlags) { + openFlags_.reset(new OpenFlags{flags}); + } + LOG(INFO) << "Reopen file success. " << "fd: " << fd_ << ", filename: " << fileName_; @@ -361,6 +372,11 @@ int NebdFileEntity::UpdateFileStatus(NebdFileInstancePtr fileInstance) { fileInstance_ = fileInstance; status_ = NebdFileStatus::OPENED; timeStamp_ = TimeUtility::GetTimeofDayMs(); + OpenFlags flags; + if (fileInstance->xattr.count(kOpenFlagsAttrKey) && + flags.ParseFromString(fileInstance->xattr.at(kOpenFlagsAttrKey))) { + openFlags_.reset(new OpenFlags{flags}); + } return 0; } diff --git a/nebd/src/part2/file_service.cpp b/nebd/src/part2/file_service.cpp index 5a7f24bee1..984a638bfc 100644 --- a/nebd/src/part2/file_service.cpp +++ b/nebd/src/part2/file_service.cpp @@ -118,6 +118,7 @@ void NebdFileServiceImpl::OpenFile( const nebd::client::OpenFileRequest* request, nebd::client::OpenFileResponse* response, google::protobuf::Closure* done) { + (void)cntl_base; brpc::ClosureGuard doneGuard(done); response->set_retcode(RetCode::kNoOK); @@ -282,6 +283,7 @@ void NebdFileServiceImpl::GetInfo( const nebd::client::GetInfoRequest* request, nebd::client::GetInfoResponse* response, google::protobuf::Closure* done) { + (void)cntl_base; brpc::ClosureGuard doneGuard(done); response->set_retcode(RetCode::kNoOK); @@ -306,6 +308,7 @@ void NebdFileServiceImpl::CloseFile( const nebd::client::CloseFileRequest* request, nebd::client::CloseFileResponse* response, google::protobuf::Closure* done) { + (void)cntl_base; brpc::ClosureGuard doneGuard(done); response->set_retcode(RetCode::kNoOK); @@ -326,6 +329,7 @@ void NebdFileServiceImpl::ResizeFile( const nebd::client::ResizeRequest* request, nebd::client::ResizeResponse* response, google::protobuf::Closure* done) { + (void)cntl_base; brpc::ClosureGuard doneGuard(done); response->set_retcode(RetCode::kNoOK); @@ -345,6 +349,7 @@ void NebdFileServiceImpl::InvalidateCache( const nebd::client::InvalidateCacheRequest* request, nebd::client::InvalidateCacheResponse* response, google::protobuf::Closure* done) { + (void)cntl_base; brpc::ClosureGuard doneGuard(done); response->set_retcode(RetCode::kNoOK); diff --git a/nebd/src/part2/heartbeat_service.cpp b/nebd/src/part2/heartbeat_service.cpp index 3b564bd80b..00e897975c 100644 --- a/nebd/src/part2/heartbeat_service.cpp +++ b/nebd/src/part2/heartbeat_service.cpp @@ -34,6 +34,7 @@ void NebdHeartbeatServiceImpl::KeepAlive( const nebd::client::HeartbeatRequest* request, nebd::client::HeartbeatResponse* response, google::protobuf::Closure* done) { + (void)cntl_base; brpc::ClosureGuard doneGuard(done); bool ok = true; uint64_t curTime = TimeUtility::GetTimeofDayMs(); diff --git a/nebd/src/part2/metafile_manager.cpp b/nebd/src/part2/metafile_manager.cpp index 0dd133b78f..288fde68ce 100644 --- a/nebd/src/part2/metafile_manager.cpp +++ b/nebd/src/part2/metafile_manager.cpp @@ -118,7 +118,7 @@ int NebdMetaFileManager::AtomicWriteFile(const Json::Value& root) { int writeSize = wrapper_->pwrite(fd, jsonString.c_str(), jsonString.size(), 0); wrapper_->close(fd); - if (writeSize != jsonString.size()) { + if (writeSize != static_cast(jsonString.size())) { LOG(ERROR) << "Write tmp file " << tmpFilePath << " fail"; return -1; } @@ -206,7 +206,6 @@ int NebdMetaFileParser::Parse(Json::Value root, for (const auto& volume : volumes) { std::string fileName; - int fd; NebdFileMeta meta; if (volume[kFileName].isNull()) { diff --git a/nebd/src/part2/request_executor_curve.cpp b/nebd/src/part2/request_executor_curve.cpp index 37f1afae37..670d05879c 100644 --- a/nebd/src/part2/request_executor_curve.cpp +++ b/nebd/src/part2/request_executor_curve.cpp @@ -270,6 +270,7 @@ int CurveRequestExecutor::AioWrite( int CurveRequestExecutor::Flush( NebdFileInstance* fd, NebdServerAioContext* aioctx) { + (void)fd; aioctx->ret = 0; aioctx->cb(aioctx); diff --git a/nebd/test/common/rw_lock_test.cpp b/nebd/test/common/rw_lock_test.cpp index aca4c3f84e..59c5b22787 100644 --- a/nebd/test/common/rw_lock_test.cpp +++ b/nebd/test/common/rw_lock_test.cpp @@ -78,7 +78,6 @@ TEST(RWLockTest, basic_test) { auto readFunc = [&] { for (uint64_t i = 0; i < 10000; ++i) { ReadLockGuard readLockGuard(rwlock); - auto j = writeCnt + i; } }; { @@ -147,7 +146,6 @@ TEST(BthreadRWLockTest, basic_test) { auto readFunc = [&] { for (uint64_t i = 0; i < 10000; ++i) { ReadLockGuard readLockGuard(rwlock); - auto j = writeCnt + i; } }; { diff --git a/nebd/test/utils/config_generator.h b/nebd/test/utils/config_generator.h index 4b9cef788e..450e78ffb9 100644 --- a/nebd/test/utils/config_generator.h +++ b/nebd/test/utils/config_generator.h @@ -33,7 +33,6 @@ namespace nebd { namespace common { static const char* kNebdClientConfigPath = "nebd/etc/nebd/nebd-client.conf"; -static const char* kNebdServerConfigPath = "nebd/etc/nebd/nebd-server.conf"; class NebdClientConfigGenerator { public: diff --git a/proto/heartbeat.proto b/proto/heartbeat.proto index 3802c39422..d54723dfb8 100644 --- a/proto/heartbeat.proto +++ b/proto/heartbeat.proto @@ -108,6 +108,7 @@ message ChunkServerHeartbeatRequest { required uint32 copysetCount = 11; // chunkServer相关的统计信息 optional ChunkServerStatisticInfo stats = 12; + optional string version = 13; }; enum ConfigChangeType { diff --git a/proto/nameserver2.proto b/proto/nameserver2.proto index 0fa83edd29..af17afe99a 100644 --- a/proto/nameserver2.proto +++ b/proto/nameserver2.proto @@ -93,6 +93,7 @@ message FileInfo { optional FileThrottleParams throttleParams = 17; optional uint64 epoch = 18; + optional string poolset = 19; } // status code @@ -166,7 +167,8 @@ enum StatusCode { kRecoverFileError = 139; // epoch too old kEpochTooOld = 140; - + // poolset doesn't exist + kPoolsetNotExist = 141; // 元数据存储错误 kStorageError = 501; // 内部错误 @@ -202,6 +204,7 @@ message CreateFileRequest { required uint64 date = 6; optional uint64 stripeUnit = 7; optional uint64 stripeCount = 8; + optional string poolset = 9; }; message CreateFileResponse { @@ -499,6 +502,7 @@ message CreateCloneFileRequest { required string cloneSource = 9; optional uint64 stripeUnit = 10; optional uint64 stripeCount = 11; + optional string poolset = 12; } message CreateCloneFileResponse { diff --git a/proto/snapshotcloneserver.proto b/proto/snapshotcloneserver.proto index e8c0c70005..d245a0c805 100644 --- a/proto/snapshotcloneserver.proto +++ b/proto/snapshotcloneserver.proto @@ -51,6 +51,7 @@ message SnapshotInfoData { required int32 status = 10; optional uint64 stripeUnit = 11; optional uint64 stripeCount = 12; + optional string poolset = 13; }; message CloneInfoData { @@ -66,6 +67,7 @@ message CloneInfoData { required bool isLazy = 10; required int32 nextStep = 11; required int32 status = 12; + optional string poolset = 13; }; message HttpRequest {}; @@ -75,4 +77,3 @@ message HttpResponse {}; service SnapshotCloneService{ rpc default_method(HttpRequest) returns (HttpResponse); }; - diff --git a/proto/topology.proto b/proto/topology.proto index 499018a7b9..c260a60aa1 100644 --- a/proto/topology.proto +++ b/proto/topology.proto @@ -61,6 +61,13 @@ message ClusterInfoData { required string clusterId = 1; } +message PoolsetData { + required uint32 poolsetId = 1; + required string poolsetName = 2; + required string type = 3; + required string desc = 4; +} + message LogicalPoolData { required uint32 logicalPoolId = 1; required string logicalPoolName = 2; @@ -79,6 +86,7 @@ message PhysicalPoolData { required uint32 physicalPoolId = 1; required string physicalPoolName = 2; required string desc = 3; + optional uint32 poolsetId = 4; } message ZoneData { @@ -113,6 +121,7 @@ message ChunkServerData { required string mountPoint = 10; required int64 diskCapacity = 11; required int64 diskUsed = 12; + optional string version = 13; } message CopysetData { @@ -141,6 +150,7 @@ message ChunkServerInfo { required uint64 diskCapacity = 9; required uint64 diskUsed = 10; optional string externalIp = 11; + optional string version = 12; } //chunkserver message @@ -244,6 +254,7 @@ message ServerRegistRequest { optional uint32 physicalPoolID = 8; optional string physicalPoolName = 9; //physicalPoolName is unique required string desc = 10; + required string poolsetName = 11; } message ServerRegistResponse { @@ -325,6 +336,8 @@ message PhysicalPoolInfo { required uint32 physicalPoolID = 1; required string physicalPoolName = 2; optional string desc = 3; + required uint32 poolsetId = 4; + required string poolsetName = 5; } message PhysicalPoolRequest { @@ -332,6 +345,7 @@ message PhysicalPoolRequest { optional uint32 physicalPoolID = 1; optional string physicalPoolName = 2; optional string desc = 3; + optional string poolsetName = 4; } message PhysicalPoolResponse { @@ -342,11 +356,43 @@ message PhysicalPoolResponse { message ListPhysicalPoolRequest { } +message ListPhysicalPoolsInPoolsetRequest { + repeated uint32 poolsetId = 1; +} + message ListPhysicalPoolResponse { required sint32 statusCode = 1; repeated PhysicalPoolInfo physicalPoolInfos = 2; } +//poolset message +message PoolsetRequest { + //use either poolsetID or poolsetName + optional uint32 poolsetID = 1; + optional string poolsetName = 2; + optional string type = 3; + optional string desc = 4; +} + +message PoolsetInfo { + required uint32 poolsetID = 1; + required string poolsetName = 2; + required string type = 3; + optional string desc = 4; +} + +message PoolsetResponse { + required sint32 statusCode = 1; + optional PoolsetInfo poolsetInfo = 2; +} + +message ListPoolsetRequest {} + +message ListPoolsetResponse { + required sint32 statusCode = 1; + repeated PoolsetInfo poolsetInfos = 2; +} + //logicalpool message message LogicalPoolInfo { required uint32 logicalPoolID = 1; @@ -512,7 +558,7 @@ service TopologyService { rpc DeleteChunkServer(DeleteChunkServerRequest) returns (DeleteChunkServerResponse); rpc SetChunkServer(SetChunkServerStatusRequest) returns (SetChunkServerStatusResponse); - rpc RegistServer(ServerRegistRequest) returns (ServerRegistResponse); + rpc RegistServer(ServerRegistRequest) returns (ServerRegistResponse); rpc GetServer(GetServerRequest) returns (GetServerResponse); rpc DeleteServer(DeleteServerRequest) returns (DeleteServerResponse); rpc ListZoneServer(ListZoneServerRequest) returns (ListZoneServerResponse); @@ -522,11 +568,16 @@ service TopologyService { rpc GetZone(ZoneRequest) returns (ZoneResponse); rpc ListPoolZone(ListPoolZoneRequest) returns (ListPoolZoneResponse); + rpc CreatePhysicalPool(PhysicalPoolRequest) returns (PhysicalPoolResponse); + rpc DeletePhysicalPool(PhysicalPoolRequest) returns (PhysicalPoolResponse); + rpc GetPhysicalPool(PhysicalPoolRequest) returns (PhysicalPoolResponse); + rpc ListPhysicalPool(ListPhysicalPoolRequest) returns (ListPhysicalPoolResponse); + rpc ListPhysicalPoolsInPoolset(ListPhysicalPoolsInPoolsetRequest) returns (ListPhysicalPoolResponse); - rpc CreatePhysicalPool(PhysicalPoolRequest) returns (PhysicalPoolResponse); - rpc DeletePhysicalPool(PhysicalPoolRequest) returns (PhysicalPoolResponse); - rpc GetPhysicalPool(PhysicalPoolRequest) returns (PhysicalPoolResponse); - rpc ListPhysicalPool(ListPhysicalPoolRequest) returns (ListPhysicalPoolResponse); + rpc CreatePoolset(PoolsetRequest) returns (PoolsetResponse); + rpc DeletePoolset(PoolsetRequest) returns (PoolsetResponse); + rpc GetPoolset(PoolsetRequest) returns (PoolsetResponse); + rpc ListPoolset(ListPoolsetRequest) returns (ListPoolsetResponse); rpc CreateLogicalPool(CreateLogicalPoolRequest) returns (CreateLogicalPoolResponse); rpc DeleteLogicalPool(DeleteLogicalPoolRequest) returns (DeleteLogicalPoolResponse); @@ -543,4 +594,3 @@ service TopologyService { rpc SetCopysetsAvailFlag(SetCopysetsAvailFlagRequest) returns (SetCopysetsAvailFlagResponse); rpc ListUnAvailCopySets(ListUnAvailCopySetsRequest) returns (ListUnAvailCopySetsResponse); } - diff --git a/src/chunkserver/braft_cli_service.cpp b/src/chunkserver/braft_cli_service.cpp index 8c7d9ec7dc..6b21d87c44 100755 --- a/src/chunkserver/braft_cli_service.cpp +++ b/src/chunkserver/braft_cli_service.cpp @@ -207,6 +207,7 @@ void BRaftCliServiceImpl::transfer_leader( const TransferLeaderRequest *request, TransferLeaderResponse *response, ::google::protobuf::Closure *done) { + (void)response; brpc::Controller *cntl = (brpc::Controller *) controller; brpc::ClosureGuard done_guard(done); scoped_refptr node; diff --git a/src/chunkserver/braft_cli_service2.cpp b/src/chunkserver/braft_cli_service2.cpp index 3fb8f569fc..0d13ac3176 100755 --- a/src/chunkserver/braft_cli_service2.cpp +++ b/src/chunkserver/braft_cli_service2.cpp @@ -161,6 +161,7 @@ static void change_peers_returned(brpc::Controller* cntl, scoped_refptr /*node*/, ::google::protobuf::Closure* done, const butil::Status& st) { + (void)request; brpc::ClosureGuard done_guard(done); if (!st.ok()) { cntl->SetFailed(st.error_code(), "%s", st.error_cstr()); @@ -275,6 +276,7 @@ void BRaftCliServiceImpl2::TransferLeader( const TransferLeaderRequest2 *request, TransferLeaderResponse2 *response, ::google::protobuf::Closure *done) { + (void)response; brpc::Controller *cntl = (brpc::Controller *) controller; brpc::ClosureGuard done_guard(done); scoped_refptr node; @@ -305,6 +307,7 @@ void BRaftCliServiceImpl2::ResetPeer(RpcController* controller, const ResetPeerRequest2* request, ResetPeerResponse2* response, Closure* done) { + (void)response; brpc::Controller* cntl = (brpc::Controller*)controller; brpc::ClosureGuard done_guard(done); scoped_refptr node; @@ -342,6 +345,7 @@ static void snapshot_returned(brpc::Controller* cntl, scoped_refptr node, ::google::protobuf::Closure* done, const butil::Status& st) { + (void)node; brpc::ClosureGuard done_guard(done); if (!st.ok()) { cntl->SetFailed(st.error_code(), "%s", st.error_cstr()); @@ -352,6 +356,7 @@ void BRaftCliServiceImpl2::Snapshot(RpcController* controller, const SnapshotRequest2* request, SnapshotResponse2* response, Closure* done) { + (void)response; brpc::Controller* cntl = (brpc::Controller*)controller; brpc::ClosureGuard done_guard(done); scoped_refptr node; @@ -374,6 +379,8 @@ void BRaftCliServiceImpl2::SnapshotAll(RpcController* controller, const SnapshotAllRequest* request, SnapshotAllResponse* response, Closure* done) { + (void)request; + (void)response; brpc::Controller* cntl = (brpc::Controller*)controller; brpc::ClosureGuard done_guard(done); braft::NodeManager *const nm = braft::NodeManager::GetInstance(); diff --git a/src/chunkserver/chunk_service.cpp b/src/chunkserver/chunk_service.cpp index fd8a33a6ca..aaa7e721ec 100755 --- a/src/chunkserver/chunk_service.cpp +++ b/src/chunkserver/chunk_service.cpp @@ -211,6 +211,8 @@ void ChunkServiceImpl::CreateS3CloneChunk(RpcController* controller, const CreateS3CloneChunkRequest* request, CreateS3CloneChunkResponse* response, Closure* done) { + (void)controller; + (void)request; brpc::ClosureGuard doneGuard(done); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); LOG(INFO) << "Invalid request, serverSide Not implement yet"; @@ -238,7 +240,6 @@ void ChunkServiceImpl::ReadChunk(RpcController *controller, } // 判断request参数是否合法 - auto maxSize = copysetNodeManager_->GetCopysetNodeOptions().maxChunkSize; if (!CheckRequestOffsetAndLength(request->offset(), request->size())) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); LOG(ERROR) << "I/O request, op: " << request->optype() @@ -425,6 +426,7 @@ void ChunkServiceImpl::GetChunkInfo(RpcController *controller, const GetChunkInfoRequest *request, GetChunkInfoResponse *response, Closure *done) { + (void)controller; ChunkServiceClosure* closure = new (std::nothrow) ChunkServiceClosure(inflightThrottle_, nullptr, @@ -494,6 +496,7 @@ void ChunkServiceImpl::GetChunkHash(RpcController *controller, const GetChunkHashRequest *request, GetChunkHashResponse *response, Closure *done) { + (void)controller; brpc::ClosureGuard doneGuard(done); // 判断request参数是否合法 @@ -553,6 +556,7 @@ void ChunkServiceImpl::UpdateEpoch(RpcController *controller, const UpdateEpochRequest *request, UpdateEpochResponse *response, Closure *done) { + (void)controller; brpc::ClosureGuard doneGuard(done); bool success = epochMap_->UpdateEpoch(request->fileid(), request->epoch()); if (success) { diff --git a/src/chunkserver/chunkserver_metrics.cpp b/src/chunkserver/chunkserver_metrics.cpp index f47ee9978b..339ecbbe66 100644 --- a/src/chunkserver/chunkserver_metrics.cpp +++ b/src/chunkserver/chunkserver_metrics.cpp @@ -31,14 +31,12 @@ namespace curve { namespace chunkserver { IOMetric::IOMetric() - : rps_(&reqNum_, 1) - , iops_(&ioNum_, 1) - , eps_(&errorNum_, 1) - , bps_(&ioBytes_, 1) {} + : rps_(&reqNum_, 1), iops_(&ioNum_, 1), eps_(&errorNum_, 1), + bps_(&ioBytes_, 1) {} IOMetric::~IOMetric() {} -int IOMetric::Init(const std::string& prefix) { +int IOMetric::Init(const std::string &prefix) { // 暴露所有的metric if (reqNum_.expose_as(prefix, "request_num") != 0) { LOG(ERROR) << "expose request num failed."; @@ -83,9 +81,7 @@ int IOMetric::Init(const std::string& prefix) { return 0; } -void IOMetric::OnRequest() { - reqNum_ << 1; -} +void IOMetric::OnRequest() { reqNum_ << 1; } void IOMetric::OnResponse(size_t size, int64_t latUs, bool hasError) { if (!hasError) { @@ -99,7 +95,7 @@ void IOMetric::OnResponse(size_t size, int64_t latUs, bool hasError) { } -int CSIOMetric::Init(const std::string& prefix) { +int CSIOMetric::Init(const std::string &prefix) { // 初始化io统计项metric std::string readPrefix = prefix + "_read"; std::string writePrefix = prefix + "_write"; @@ -154,10 +150,8 @@ void CSIOMetric::OnRequest(CSIOMetricType type) { } } -void CSIOMetric::OnResponse(CSIOMetricType type, - size_t size, - int64_t latUs, - bool hasError) { +void CSIOMetric::OnResponse(CSIOMetricType type, size_t size, int64_t latUs, + bool hasError) { IOMetricPtr ioMetric = GetIOMetric(type); if (ioMetric != nullptr) { ioMetric->OnResponse(size, latUs, hasError); @@ -167,43 +161,42 @@ void CSIOMetric::OnResponse(CSIOMetricType type, IOMetricPtr CSIOMetric::GetIOMetric(CSIOMetricType type) { IOMetricPtr result = nullptr; switch (type) { - case CSIOMetricType::READ_CHUNK: - result = readMetric_; - break; - case CSIOMetricType::WRITE_CHUNK: - result = writeMetric_; - break; - case CSIOMetricType::RECOVER_CHUNK: - result = recoverMetric_; - break; - case CSIOMetricType::PASTE_CHUNK: - result = pasteMetric_; - break; - case CSIOMetricType::DOWNLOAD: - result = downloadMetric_; - break; - default: - result = nullptr; - break; + case CSIOMetricType::READ_CHUNK: + result = readMetric_; + break; + case CSIOMetricType::WRITE_CHUNK: + result = writeMetric_; + break; + case CSIOMetricType::RECOVER_CHUNK: + result = recoverMetric_; + break; + case CSIOMetricType::PASTE_CHUNK: + result = pasteMetric_; + break; + case CSIOMetricType::DOWNLOAD: + result = downloadMetric_; + break; + default: + result = nullptr; + break; } return result; } -int CSCopysetMetric::Init(const LogicPoolID& logicPoolId, - const CopysetID& copysetId) { +int CSCopysetMetric::Init(const LogicPoolID &logicPoolId, + const CopysetID ©setId) { logicPoolId_ = logicPoolId; copysetId_ = copysetId; int ret = ioMetrics_.Init(Prefix()); if (ret < 0) { - LOG(ERROR) << "Init Copyset (" - << logicPoolId << "," << copysetId << ")" + LOG(ERROR) << "Init Copyset (" << logicPoolId << "," << copysetId << ")" << " metric failed."; return -1; } return 0; } -void CSCopysetMetric::MonitorDataStore(CSDataStore* datastore) { +void CSCopysetMetric::MonitorDataStore(CSDataStore *datastore) { std::string chunkCountPrefix = Prefix() + "_chunk_count"; std::string snapshotCountPrefix = Prefix() + "snapshot_count"; std::string cloneChunkCountPrefix = Prefix() + "_clonechunk_count"; @@ -216,26 +209,21 @@ void CSCopysetMetric::MonitorDataStore(CSDataStore* datastore) { } void CSCopysetMetric::MonitorCurveSegmentLogStorage( - CurveSegmentLogStorage* logStorage) { + CurveSegmentLogStorage *logStorage) { std::string walSegmentCountPrefix = Prefix() + "_walsegment_count"; walSegmentCount_ = std::make_shared>( walSegmentCountPrefix, GetLogStorageWalSegmentCountFunc, logStorage); } ChunkServerMetric::ChunkServerMetric() - : hasInited_(false) - , leaderCount_(nullptr) - , chunkLeft_(nullptr) - , walSegmentLeft_(nullptr) - , chunkTrashed_(nullptr) - , chunkCount_(nullptr) - , snapshotCount_(nullptr) - , cloneChunkCount_(nullptr) - , walSegmentCount_(nullptr) {} - -ChunkServerMetric* ChunkServerMetric::self_ = nullptr; - -ChunkServerMetric* ChunkServerMetric::GetInstance() { + : hasInited_(false), leaderCount_(nullptr), chunkLeft_(nullptr), + walSegmentLeft_(nullptr), chunkTrashed_(nullptr), chunkCount_(nullptr), + walSegmentCount_(nullptr), snapshotCount_(nullptr), + cloneChunkCount_(nullptr) {} + +ChunkServerMetric *ChunkServerMetric::self_ = nullptr; + +ChunkServerMetric *ChunkServerMetric::GetInstance() { // chunkserver metric 在chunkserver启动时初始化创建 // 因此创建的时候不会存在竞争,不需要锁保护 if (self_ == nullptr) { @@ -244,7 +232,7 @@ ChunkServerMetric* ChunkServerMetric::GetInstance() { return self_; } -int ChunkServerMetric::Init(const ChunkServerMetricOptions& option) { +int ChunkServerMetric::Init(const ChunkServerMetricOptions &option) { if (hasInited_) { LOG(WARNING) << "chunkserver metric has inited."; return 0; @@ -305,8 +293,8 @@ int ChunkServerMetric::Fini() { return 0; } -int ChunkServerMetric::CreateCopysetMetric(const LogicPoolID& logicPoolId, - const CopysetID& copysetId) { +int ChunkServerMetric::CreateCopysetMetric(const LogicPoolID &logicPoolId, + const CopysetID ©setId) { if (!option_.collectMetric) { return 0; } @@ -314,8 +302,8 @@ int ChunkServerMetric::CreateCopysetMetric(const LogicPoolID& logicPoolId, GroupId groupId = ToGroupId(logicPoolId, copysetId); bool exist = copysetMetricMap_.Exist(groupId); if (exist) { - LOG(ERROR) << "Create Copyset (" - << logicPoolId << "," << copysetId << ")" + LOG(ERROR) << "Create Copyset (" << logicPoolId << "," << copysetId + << ")" << " metric failed : is already exists."; return -1; } @@ -323,8 +311,8 @@ int ChunkServerMetric::CreateCopysetMetric(const LogicPoolID& logicPoolId, CopysetMetricPtr copysetMetric = std::make_shared(); int ret = copysetMetric->Init(logicPoolId, copysetId); if (ret < 0) { - LOG(ERROR) << "Create Copyset (" - << logicPoolId << "," << copysetId << ")" + LOG(ERROR) << "Create Copyset (" << logicPoolId << "," << copysetId + << ")" << " metric failed : init failed."; return -1; } @@ -333,8 +321,9 @@ int ChunkServerMetric::CreateCopysetMetric(const LogicPoolID& logicPoolId, return 0; } -CopysetMetricPtr ChunkServerMetric::GetCopysetMetric( - const LogicPoolID& logicPoolId, const CopysetID& copysetId) { +CopysetMetricPtr +ChunkServerMetric::GetCopysetMetric(const LogicPoolID &logicPoolId, + const CopysetID ©setId) { if (!option_.collectMetric) { return nullptr; } @@ -343,8 +332,8 @@ CopysetMetricPtr ChunkServerMetric::GetCopysetMetric( return copysetMetricMap_.Get(groupId); } -int ChunkServerMetric::RemoveCopysetMetric(const LogicPoolID& logicPoolId, - const CopysetID& copysetId) { +int ChunkServerMetric::RemoveCopysetMetric(const LogicPoolID &logicPoolId, + const CopysetID ©setId) { GroupId groupId = ToGroupId(logicPoolId, copysetId); // 这里先保存copyset metric,等remove后再去释放 // 防止在读写锁里面去操作metric,导致死锁 @@ -353,8 +342,8 @@ int ChunkServerMetric::RemoveCopysetMetric(const LogicPoolID& logicPoolId, return 0; } -void ChunkServerMetric::OnRequest(const LogicPoolID& logicPoolId, - const CopysetID& copysetId, +void ChunkServerMetric::OnRequest(const LogicPoolID &logicPoolId, + const CopysetID ©setId, CSIOMetricType type) { if (!option_.collectMetric) { return; @@ -367,12 +356,10 @@ void ChunkServerMetric::OnRequest(const LogicPoolID& logicPoolId, ioMetrics_.OnRequest(type); } -void ChunkServerMetric::OnResponse(const LogicPoolID& logicPoolId, - const CopysetID& copysetId, - CSIOMetricType type, - size_t size, - int64_t latUs, - bool hasError) { +void ChunkServerMetric::OnResponse(const LogicPoolID &logicPoolId, + const CopysetID ©setId, + CSIOMetricType type, size_t size, + int64_t latUs, bool hasError) { if (!option_.collectMetric) { return; } @@ -384,7 +371,7 @@ void ChunkServerMetric::OnResponse(const LogicPoolID& logicPoolId, ioMetrics_.OnResponse(type, size, latUs, hasError); } -void ChunkServerMetric::MonitorChunkFilePool(FilePool* chunkFilePool) { +void ChunkServerMetric::MonitorChunkFilePool(FilePool *chunkFilePool) { if (!option_.collectMetric) { return; } @@ -394,7 +381,7 @@ void ChunkServerMetric::MonitorChunkFilePool(FilePool* chunkFilePool) { chunkLeftPrefix, GetChunkLeftFunc, chunkFilePool); } -void ChunkServerMetric::MonitorWalFilePool(FilePool* walFilePool) { +void ChunkServerMetric::MonitorWalFilePool(FilePool *walFilePool) { if (!option_.collectMetric) { return; } @@ -404,7 +391,7 @@ void ChunkServerMetric::MonitorWalFilePool(FilePool* walFilePool) { walSegmentLeftPrefix, GetWalSegmentLeftFunc, walFilePool); } -void ChunkServerMetric::MonitorTrash(Trash* trash) { +void ChunkServerMetric::MonitorTrash(Trash *trash) { if (!option_.collectMetric) { return; } @@ -430,7 +417,7 @@ void ChunkServerMetric::DecreaseLeaderCount() { *leaderCount_ << -1; } -void ChunkServerMetric::ExposeConfigMetric(common::Configuration* conf) { +void ChunkServerMetric::ExposeConfigMetric(common::Configuration *conf) { if (!option_.collectMetric) { return; } @@ -441,4 +428,3 @@ void ChunkServerMetric::ExposeConfigMetric(common::Configuration* conf) { } // namespace chunkserver } // namespace curve - diff --git a/src/chunkserver/chunkserver_metrics.h b/src/chunkserver/chunkserver_metrics.h index 097700103b..d4354d196f 100644 --- a/src/chunkserver/chunkserver_metrics.h +++ b/src/chunkserver/chunkserver_metrics.h @@ -36,11 +36,11 @@ #include "src/common/configuration.h" #include "src/chunkserver/datastore/file_pool.h" -using curve::common::Uncopyable; -using curve::common::RWLock; +using curve::common::Configuration; using curve::common::ReadLockGuard; +using curve::common::RWLock; +using curve::common::Uncopyable; using curve::common::WriteLockGuard; -using curve::common::Configuration; namespace curve { namespace chunkserver { @@ -54,8 +54,7 @@ class Trash; template using PassiveStatusPtr = std::shared_ptr>; -template -using AdderPtr = std::shared_ptr>; +template using AdderPtr = std::shared_ptr>; // 使用LatencyRecorder的实现来统计读写请求的size情况 // 可以统计分位值、最大值、中位数、平均值等情况 @@ -72,7 +71,7 @@ class IOMetric { * @param prefix: 用于bvar曝光时使用的前缀 * @return 成功返回0,失败返回-1 */ - int Init(const std::string& prefix); + int Init(const std::string &prefix); /** * IO请求到来时统计requestNum */ @@ -88,25 +87,25 @@ class IOMetric { public: // io请求的数量 - bvar::Adder reqNum_; + bvar::Adder reqNum_; // 成功io的数量 - bvar::Adder ioNum_; + bvar::Adder ioNum_; // 失败的io个数 - bvar::Adder errorNum_; + bvar::Adder errorNum_; // 所有io的数据量 - bvar::Adder ioBytes_; + bvar::Adder ioBytes_; // io的延时情况(分位值、最大值、中位数、平均值) - bvar::LatencyRecorder latencyRecorder_; + bvar::LatencyRecorder latencyRecorder_; // io大小的情况(分位值、最大值、中位数、平均值) - IOSizeRecorder sizeRecorder_; + IOSizeRecorder sizeRecorder_; // 最近1秒请求的IO数量 - bvar::PerSecond> rps_; + bvar::PerSecond> rps_; // 最近1秒的iops - bvar::PerSecond> iops_; + bvar::PerSecond> iops_; // 最近1秒的出错IO数量 - bvar::PerSecond> eps_; + bvar::PerSecond> eps_; // 最近1秒的数据量 - bvar::PerSecond> bps_; + bvar::PerSecond> bps_; }; using IOMetricPtr = std::shared_ptr; @@ -121,11 +120,8 @@ enum class CSIOMetricType { class CSIOMetric { public: CSIOMetric() - : readMetric_(nullptr) - , writeMetric_(nullptr) - , recoverMetric_(nullptr) - , pasteMetric_(nullptr) - , downloadMetric_(nullptr) {} + : readMetric_(nullptr), writeMetric_(nullptr), recoverMetric_(nullptr), + pasteMetric_(nullptr), downloadMetric_(nullptr) {} ~CSIOMetric() {} @@ -143,9 +139,7 @@ class CSIOMetric { * @param latUS: 此次io的延时 * @param hasError: 此次io是否有错误产生 */ - void OnResponse(CSIOMetricType type, - size_t size, - int64_t latUs, + void OnResponse(CSIOMetricType type, size_t size, int64_t latUs, bool hasError); /** @@ -159,7 +153,7 @@ class CSIOMetric { * 初始化各项op的metric统计项 * @return 成功返回0,失败返回-1 */ - int Init(const std::string& prefix); + int Init(const std::string &prefix); /** * 释放各项op的metric资源 */ @@ -181,12 +175,9 @@ class CSIOMetric { class CSCopysetMetric { public: CSCopysetMetric() - : logicPoolId_(0) - , copysetId_(0) - , chunkCount_(nullptr) - , snapshotCount_(nullptr) - , cloneChunkCount_(nullptr) - , walSegmentCount_(nullptr) {} + : logicPoolId_(0), copysetId_(0), chunkCount_(nullptr), + walSegmentCount_(nullptr), snapshotCount_(nullptr), + cloneChunkCount_(nullptr) {} ~CSCopysetMetric() {} @@ -196,27 +187,25 @@ class CSCopysetMetric { * @param copysetId: copyset的id * @return 成功返回0,失败返回-1 */ - int Init(const LogicPoolID& logicPoolId, const CopysetID& copysetId); + int Init(const LogicPoolID &logicPoolId, const CopysetID ©setId); /** * 监控DataStore指标,主要包括chunk的数量、快照的数量等 * @param datastore: 该copyset下的datastore指针 */ - void MonitorDataStore(CSDataStore* datastore); + void MonitorDataStore(CSDataStore *datastore); /** * @brief: Monitor log storage's metric, like the number of WAL segment file * @param logStorage: The pointer to CurveSegmentLogStorage */ - void MonitorCurveSegmentLogStorage(CurveSegmentLogStorage* logStorage); + void MonitorCurveSegmentLogStorage(CurveSegmentLogStorage *logStorage); /** * 执行请求前记录metric * @param type: 请求对应的metric类型 */ - void OnRequest(CSIOMetricType type) { - ioMetrics_.OnRequest(type); - } + void OnRequest(CSIOMetricType type) { ioMetrics_.OnRequest(type); } /** * 执行请求后记录metric @@ -226,9 +215,7 @@ class CSCopysetMetric { * @param latUS: 此次io的延时 * @param hasError: 此次io是否有错误产生 */ - void OnResponse(CSIOMetricType type, - size_t size, - int64_t latUs, + void OnResponse(CSIOMetricType type, size_t size, int64_t latUs, bool hasError) { ioMetrics_.OnResponse(type, size, latUs, hasError); } @@ -272,10 +259,8 @@ class CSCopysetMetric { private: inline std::string Prefix() { - return "copyset_" - + std::to_string(logicPoolId_) - + "_" - + std::to_string(copysetId_); + return "copyset_" + std::to_string(logicPoolId_) + "_" + + std::to_string(copysetId_); } private: @@ -375,7 +360,7 @@ class ChunkServerMetric : public Uncopyable { * @pa)ram option: 初始化配置项 * @return 成功返回0,失败返回-1 */ - int Init(const ChunkServerMetricOptions& option); + int Init(const ChunkServerMetricOptions &option); /** * 释放metric资源 @@ -389,8 +374,7 @@ class ChunkServerMetric : public Uncopyable { * @param copysetId: 此次io操作所在的copysetid * @param type: 请求类型 */ - void OnRequest(const LogicPoolID& logicPoolId, - const CopysetID& copysetId, + void OnRequest(const LogicPoolID &logicPoolId, const CopysetID ©setId, CSIOMetricType type); /** @@ -403,11 +387,8 @@ class ChunkServerMetric : public Uncopyable { * @param latUS: 此次io的延时 * @param hasError: 此次io是否有错误产生 */ - void OnResponse(const LogicPoolID& logicPoolId, - const CopysetID& copysetId, - CSIOMetricType type, - size_t size, - int64_t latUs, + void OnResponse(const LogicPoolID &logicPoolId, const CopysetID ©setId, + CSIOMetricType type, size_t size, int64_t latUs, bool hasError); /** @@ -417,8 +398,8 @@ class ChunkServerMetric : public Uncopyable { * @param copysetId: copyset的id * @return 成功返回0,失败返回-1,如果指定metric已存在返回失败 */ - int CreateCopysetMetric(const LogicPoolID& logicPoolId, - const CopysetID& copysetId); + int CreateCopysetMetric(const LogicPoolID &logicPoolId, + const CopysetID ©setId); /** * 获取指定copyset的metric @@ -426,8 +407,8 @@ class ChunkServerMetric : public Uncopyable { * @param copysetId: copyset的id * @return 成功返回指定的copyset metric,失败返回nullptr */ - CopysetMetricPtr GetCopysetMetric(const LogicPoolID& logicPoolId, - const CopysetID& copysetId); + CopysetMetricPtr GetCopysetMetric(const LogicPoolID &logicPoolId, + const CopysetID ©setId); /** * 删除指定copyset的metric @@ -435,26 +416,26 @@ class ChunkServerMetric : public Uncopyable { * @param copysetId: copyset的id * @return 成功返回0,失败返回-1 */ - int RemoveCopysetMetric(const LogicPoolID& logicPoolId, - const CopysetID& copysetId); + int RemoveCopysetMetric(const LogicPoolID &logicPoolId, + const CopysetID ©setId); /** * 监视chunk分配池,主要监视池中chunk的数量 * @param chunkFilePool: chunkfilePool的对象指针 */ - void MonitorChunkFilePool(FilePool* chunkFilePool); + void MonitorChunkFilePool(FilePool *chunkFilePool); /** * 监视wal segment分配池,主要监视池中segment的数量 * @param walFilePool: walfilePool的对象指针 */ - void MonitorWalFilePool(FilePool* walFilePool); + void MonitorWalFilePool(FilePool *walFilePool); /** * 监视回收站 * @param trash: trash的对象指针 */ - void MonitorTrash(Trash* trash); + void MonitorTrash(Trash *trash); /** * 增加 leader count 计数 @@ -470,7 +451,7 @@ class ChunkServerMetric : public Uncopyable { * 更新配置项数据 * @param conf: 配置内容 */ - void ExposeConfigMetric(common::Configuration* conf); + void ExposeConfigMetric(common::Configuration *conf); /** * 获取指定类型的IOMetric @@ -481,13 +462,9 @@ class ChunkServerMetric : public Uncopyable { return ioMetrics_.GetIOMetric(type); } - CopysetMetricMap* GetCopysetMetricMap() { - return ©setMetricMap_; - } + CopysetMetricMap *GetCopysetMetricMap() { return ©setMetricMap_; } - uint32_t GetCopysetCount() { - return copysetMetricMap_.Size(); - } + uint32_t GetCopysetCount() { return copysetMetricMap_.Size(); } uint32_t GetLeaderCount() const { if (leaderCount_ == nullptr) @@ -570,7 +547,7 @@ class ChunkServerMetric : public Uncopyable { // chunkserver上的IO类型的metric统计 CSIOMetric ioMetrics_; // 用于单例模式的自指指针 - static ChunkServerMetric* self_; + static ChunkServerMetric *self_; }; } // namespace chunkserver diff --git a/src/chunkserver/cli.cpp b/src/chunkserver/cli.cpp index 6e7056c4e8..71c7baea02 100644 --- a/src/chunkserver/cli.cpp +++ b/src/chunkserver/cli.cpp @@ -206,6 +206,8 @@ butil::Status Snapshot(const LogicPoolID &logicPoolId, const CopysetID ©setId, const PeerId &peer, const braft::cli::CliOptions &options) { + (void)logicPoolId; + (void)copysetId; brpc::Channel channel; if (channel.Init(peer.addr, NULL) != 0) { return butil::Status(-1, "Fail to init channel to %s", diff --git a/src/chunkserver/clone_copyer.cpp b/src/chunkserver/clone_copyer.cpp index 858a257939..bc449fd2c8 100644 --- a/src/chunkserver/clone_copyer.cpp +++ b/src/chunkserver/clone_copyer.cpp @@ -62,7 +62,7 @@ void OriginCopyer::DeleteExpiredCurveCache(void* arg) { while (taskCopyer->curveOpenTime_.size() > 0) { CurveOpenTimestamp oldestCache = *taskCopyer->curveOpenTime_.begin(); if (now.tv_sec - oldestCache.lastUsedSec < - taskCopyer->curveFileTimeoutSec_) { + static_cast(taskCopyer->curveFileTimeoutSec_)) { break; } @@ -186,6 +186,7 @@ void OriginCopyer::DownloadFromS3(const string& objectName, GetObjectAsyncCallBack cb = [=] (const S3Adapter* adapter, const std::shared_ptr& context) { + (void)adapter; brpc::ClosureGuard doneGuard(done); if (context->retCode != 0) { done->SetFailed(); diff --git a/src/chunkserver/clone_core.cpp b/src/chunkserver/clone_core.cpp index 0058f449a6..a9bedbae7b 100644 --- a/src/chunkserver/clone_core.cpp +++ b/src/chunkserver/clone_core.cpp @@ -47,9 +47,9 @@ DownloadClosure::DownloadClosure(std::shared_ptr readRequest, Closure* done) : isFailed_(false) , beginTime_(TimeUtility::GetTimeofDayUs()) - , readRequest_(readRequest) - , cloneCore_(cloneCore) , downloadCtx_(downloadCtx) + , cloneCore_(cloneCore) + , readRequest_(readRequest) , done_(done) { // 记录初始metric if (readRequest_ != nullptr) { @@ -354,7 +354,6 @@ int CloneCore::ReadThenMerge(std::shared_ptr readRequest, const butil::IOBuf* cloneData, char* chunkData) { const ChunkRequest* request = readRequest->request_; - ChunkID id = readRequest->ChunkId(); std::shared_ptr dataStore = readRequest->datastore_; off_t offset = request->offset(); diff --git a/src/chunkserver/concurrent_apply/concurrent_apply.h b/src/chunkserver/concurrent_apply/concurrent_apply.h index c11e3f76a4..af167c3e9a 100644 --- a/src/chunkserver/concurrent_apply/concurrent_apply.h +++ b/src/chunkserver/concurrent_apply/concurrent_apply.h @@ -61,8 +61,8 @@ class CURVE_CACHELINE_ALIGNMENT ConcurrentApplyModule { public: ConcurrentApplyModule(): start_(false), rconcurrentsize_(0), - wconcurrentsize_(0), rqueuedepth_(0), + wconcurrentsize_(0), wqueuedepth_(0), cond_(0) {} diff --git a/src/chunkserver/conf_epoch_file.cpp b/src/chunkserver/conf_epoch_file.cpp index 237668378f..6a39c6ce3e 100644 --- a/src/chunkserver/conf_epoch_file.cpp +++ b/src/chunkserver/conf_epoch_file.cpp @@ -34,10 +34,8 @@ namespace chunkserver { const uint32_t kConfEpochFileMaxSize = 4096; const uint64_t kConfEpochFileMagic = 0x6225929368674119; -int ConfEpochFile::Load(const std::string &path, - LogicPoolID *logicPoolID, - CopysetID *copysetID, - uint64_t *epoch) { +int ConfEpochFile::Load(const std::string &path, LogicPoolID *logicPoolID, + CopysetID *copysetID, uint64_t *epoch) { int fd = fs_->Open(path.c_str(), O_RDWR); if (0 > fd) { LOG(ERROR) << "LoadConfEpoch failed open file " << path @@ -86,16 +84,13 @@ int ConfEpochFile::Load(const std::string &path, LOG(INFO) << "Load conf epoch " << path << " success. " << "logicPoolID: " << *logicPoolID - << ", copysetID: " << *copysetID - << ", epoch: " << *epoch; + << ", copysetID: " << *copysetID << ", epoch: " << *epoch; return 0; } -int ConfEpochFile::Save(const std::string &path, - const LogicPoolID logicPoolID, - const CopysetID copysetID, - const uint64_t epoch) { +int ConfEpochFile::Save(const std::string &path, const LogicPoolID logicPoolID, + const CopysetID copysetID, const uint64_t epoch) { // 1. 转换成conf message ConfEpoch confEpoch; confEpoch.set_logicpoolid(logicPoolID); @@ -113,7 +108,8 @@ int ConfEpochFile::Save(const std::string &path, opt.enum_option = json2pb::OUTPUT_ENUM_BY_NUMBER; if (!json2pb::ProtoMessageToJson(confEpoch, &out, opt, &err)) { - LOG(ERROR) << "Failed to encode conf epoch," << " error: " << err; + LOG(ERROR) << "Failed to encode conf epoch," + << " error: " << err; return -1; } @@ -127,7 +123,8 @@ int ConfEpochFile::Save(const std::string &path, } // 3. write文件 - if (out.size() != fs_->Write(fd, out.c_str(), 0, out.size())) { + if (static_cast(out.size()) != + fs_->Write(fd, out.c_str(), 0, out.size())) { LOG(ERROR) << "SaveConfEpoch write failed, path: " << path << ", errno: " << errno << ", error message: " << strerror(errno); @@ -150,22 +147,18 @@ int ConfEpochFile::Save(const std::string &path, uint32_t ConfEpochFile::ConfEpochCrc(const ConfEpoch &confEpoch) { uint32_t crc32c = 0; - uint32_t logicPoolId = confEpoch.logicpoolid(); - uint32_t copysetId = confEpoch.copysetid(); - uint64_t epoch = confEpoch.epoch(); - uint64_t magic = kConfEpochFileMagic; - - crc32c = curve::common::CRC32(crc32c, - reinterpret_cast(&logicPoolId), - sizeof(logicPoolId)); - crc32c = curve::common::CRC32(crc32c, - reinterpret_cast(©setId), + uint32_t logicPoolId = confEpoch.logicpoolid(); + uint32_t copysetId = confEpoch.copysetid(); + uint64_t epoch = confEpoch.epoch(); + uint64_t magic = kConfEpochFileMagic; + + crc32c = curve::common::CRC32( + crc32c, reinterpret_cast(&logicPoolId), sizeof(logicPoolId)); + crc32c = curve::common::CRC32(crc32c, reinterpret_cast(©setId), sizeof(copysetId)); - crc32c = curve::common::CRC32(crc32c, - reinterpret_cast(&epoch), + crc32c = curve::common::CRC32(crc32c, reinterpret_cast(&epoch), sizeof(epoch)); - crc32c = curve::common::CRC32(crc32c, - reinterpret_cast(&magic), + crc32c = curve::common::CRC32(crc32c, reinterpret_cast(&magic), sizeof(magic)); return crc32c; diff --git a/src/chunkserver/copyset_node.cpp b/src/chunkserver/copyset_node.cpp index 1945ed9ba4..c4dee64f0c 100755 --- a/src/chunkserver/copyset_node.cpp +++ b/src/chunkserver/copyset_node.cpp @@ -75,10 +75,10 @@ CopysetNode::CopysetNode(const LogicPoolID &logicPoolId, chunkDataRpath_(), appliedIndex_(0), leaderTerm_(-1), + configChange_(std::make_shared()), + lastSnapshotIndex_(0), scaning_(false), lastScanSec_(0), - lastSnapshotIndex_(0), - configChange_(std::make_shared()), enableOdsyncWhenOpenChunkFile_(false), isSyncing_(false), checkSyncingIntervalMs_(500) { @@ -208,7 +208,10 @@ int CopysetNode::Run() { << "Copyset: " << GroupIdString(); return -1; } - syncThread_.Run(); + + if (!enableOdsyncWhenOpenChunkFile_) { + syncThread_.Run(); + } LOG(INFO) << "Run copyset success." << "Copyset: " << GroupIdString(); @@ -495,15 +498,16 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { } void CopysetNode::on_leader_start(int64_t term) { - leaderTerm_.store(term, std::memory_order_release); ChunkServerMetric::GetInstance()->IncreaseLeaderCount(); concurrentapply_->Flush(); + leaderTerm_.store(term, std::memory_order_release); LOG(INFO) << "Copyset: " << GroupIdString() << ", peer id: " << peerId_.to_string() << " become leader, term is: " << leaderTerm_; } void CopysetNode::on_leader_stop(const butil::Status &status) { + (void)status; leaderTerm_.store(-1, std::memory_order_release); ChunkServerMetric::GetInstance()->DecreaseLeaderCount(); LOG(INFO) << "Copyset: " << GroupIdString() diff --git a/src/chunkserver/copyset_service.cpp b/src/chunkserver/copyset_service.cpp index e29f8c8ab8..e09516c0ad 100755 --- a/src/chunkserver/copyset_service.cpp +++ b/src/chunkserver/copyset_service.cpp @@ -84,6 +84,7 @@ void CopysetServiceImpl::CreateCopysetNode2(RpcController *controller, const CopysetRequest2 *request, CopysetResponse2 *response, Closure *done) { + (void)controller; brpc::ClosureGuard doneGuard(done); Copyset copyset; @@ -138,6 +139,7 @@ void CopysetServiceImpl::DeleteBrokenCopyset(RpcController* controller, const CopysetRequest* request, CopysetResponse* response, Closure* done) { + (void)controller; LOG(INFO) << "Receive delete broken copyset request"; brpc::ClosureGuard doneGuard(done); @@ -163,6 +165,7 @@ void CopysetServiceImpl::GetCopysetStatus(RpcController *controller, const CopysetStatusRequest *request, CopysetStatusResponse *response, Closure *done) { + (void)controller; brpc::ClosureGuard doneGuard(done); LOG(INFO) << "Received GetCopysetStatus request: " diff --git a/src/chunkserver/datastore/chunkserver_chunkfile.cpp b/src/chunkserver/datastore/chunkserver_chunkfile.cpp index 78d0ae2119..b4d21cdb7f 100644 --- a/src/chunkserver/datastore/chunkserver_chunkfile.cpp +++ b/src/chunkserver/datastore/chunkserver_chunkfile.cpp @@ -300,6 +300,7 @@ CSErrorCode CSChunkFile::Write(SequenceNum sn, off_t offset, size_t length, uint32_t* cost) { + (void)cost; WriteLockGuard writeGuard(rwLock_); if (!CheckOffsetAndLength( offset, length, isCloneChunk_ ? pageSize_ : FLAGS_minIoAlignment)) { diff --git a/src/chunkserver/datastore/chunkserver_datastore.cpp b/src/chunkserver/datastore/chunkserver_datastore.cpp index 3eed89d747..6d624e6a87 100644 --- a/src/chunkserver/datastore/chunkserver_datastore.cpp +++ b/src/chunkserver/datastore/chunkserver_datastore.cpp @@ -39,8 +39,8 @@ CSDataStore::CSDataStore(std::shared_ptr lfs, const DataStoreOptions& options) : chunkSize_(options.chunkSize), pageSize_(options.pageSize), - baseDir_(options.baseDir), locationLimit_(options.locationLimit), + baseDir_(options.baseDir), chunkFilePool_(chunkFilePool), lfs_(lfs), enableOdsyncWhenOpenChunkFile_(options.enableOdsyncWhenOpenChunkFile) { @@ -147,6 +147,7 @@ CSErrorCode CSDataStore::ReadChunk(ChunkID id, char * buf, off_t offset, size_t length) { + (void)sn; auto chunkFile = metaCache_.Get(id); if (chunkFile == nullptr) { return CSErrorCode::ChunkNotExistError; @@ -163,6 +164,7 @@ CSErrorCode CSDataStore::ReadChunk(ChunkID id, CSErrorCode CSDataStore::ReadChunkMetaPage(ChunkID id, SequenceNum sn, char * buf) { + (void)sn; auto chunkFile = metaCache_.Get(id); if (chunkFile == nullptr) { return CSErrorCode::ChunkNotExistError; diff --git a/src/chunkserver/datastore/file_pool.cpp b/src/chunkserver/datastore/file_pool.cpp index be56d87125..d5d5e78d8e 100644 --- a/src/chunkserver/datastore/file_pool.cpp +++ b/src/chunkserver/datastore/file_pool.cpp @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -44,10 +45,10 @@ using curve::common::kFilePoolMaigic; namespace curve { namespace chunkserver { -const char* FilePoolHelper::kFileSize = "chunkSize"; -const char* FilePoolHelper::kMetaPageSize = "metaPageSize"; -const char* FilePoolHelper::kFilePoolPath = "chunkfilepool_path"; -const char* FilePoolHelper::kCRC = "crc"; +const char *FilePoolHelper::kFileSize = "chunkSize"; +const char *FilePoolHelper::kMetaPageSize = "metaPageSize"; +const char *FilePoolHelper::kFilePoolPath = "chunkfilepool_path"; +const char *FilePoolHelper::kCRC = "crc"; const uint32_t FilePoolHelper::kPersistSize = 4096; const std::string FilePool::kCleanChunkSuffix_ = ".clean"; // NOLINT const std::chrono::milliseconds FilePool::kSuccessSleepMsec_(10); @@ -55,8 +56,8 @@ const std::chrono::milliseconds FilePool::kFailSleepMsec_(500); int FilePoolHelper::PersistEnCodeMetaInfo( std::shared_ptr fsptr, uint32_t chunkSize, - uint32_t metaPageSize, const std::string& filePoolPath, - const std::string& persistPath) { + uint32_t metaPageSize, const std::string &filePoolPath, + const std::string &persistPath) { Json::Value root; root[kFileSize] = chunkSize; root[kMetaPageSize] = metaPageSize; @@ -64,7 +65,7 @@ int FilePoolHelper::PersistEnCodeMetaInfo( uint32_t crcsize = sizeof(kFilePoolMaigic) + sizeof(chunkSize) + sizeof(metaPageSize) + filePoolPath.size(); - char* crcbuf = new char[crcsize]; + char *crcbuf = new char[crcsize]; ::memcpy(crcbuf, kFilePoolMaigic, sizeof(kFilePoolMaigic)); ::memcpy(crcbuf + sizeof(kFilePoolMaigic), &chunkSize, sizeof(uint32_t)); @@ -85,7 +86,7 @@ int FilePoolHelper::PersistEnCodeMetaInfo( LOG(INFO) << root.toStyledString().c_str(); - char* writeBuffer = new char[kPersistSize]; + char *writeBuffer = new char[kPersistSize]; memset(writeBuffer, 0, kPersistSize); memcpy(writeBuffer, root.toStyledString().c_str(), root.toStyledString().size()); @@ -105,9 +106,9 @@ int FilePoolHelper::PersistEnCodeMetaInfo( } int FilePoolHelper::DecodeMetaInfoFromMetaFile( - std::shared_ptr fsptr, const std::string& metaFilePath, - uint32_t metaFileSize, uint32_t* chunksize, uint32_t* metapagesize, - std::string* chunkfilePath) { + std::shared_ptr fsptr, const std::string &metaFilePath, + uint32_t metaFileSize, uint32_t *chunksize, uint32_t *metapagesize, + std::string *chunkfilePath) { int fd = fsptr->Open(metaFilePath, O_RDWR); if (fd < 0) { LOG(ERROR) << "meta file open failed, " << metaFilePath; @@ -117,7 +118,7 @@ int FilePoolHelper::DecodeMetaInfoFromMetaFile( std::unique_ptr readvalid(new char[metaFileSize]); memset(readvalid.get(), 0, metaFileSize); int ret = fsptr->Read(fd, readvalid.get(), 0, metaFileSize); - if (ret != metaFileSize) { + if (ret != static_cast(metaFileSize)) { fsptr->Close(fd); LOG(ERROR) << "meta file read failed, " << metaFilePath; return -1; @@ -128,10 +129,14 @@ int FilePoolHelper::DecodeMetaInfoFromMetaFile( uint32_t crcvalue = 0; bool parse = false; do { - Json::Reader reader; + Json::CharReaderBuilder builder; + std::unique_ptr reader(builder.newCharReader()); Json::Value value; - if (!reader.parse(readvalid.get(), value)) { - LOG(ERROR) << "chunkfile meta file got error!"; + char *str = readvalid.get(); + JSONCPP_STRING errormsg; + if (!reader->parse(str, str + strlen(str), &value, &errormsg)) { + LOG(ERROR) << "chunkfile meta file got error!" + << " error: " << errormsg; break; } @@ -210,7 +215,7 @@ FilePool::FilePool(std::shared_ptr fsptr) memset(writeBuffer_.get(), 0, poolOpt_.bytesPerWrite); } -bool FilePool::Initialize(const FilePoolOptions& cfopt) { +bool FilePool::Initialize(const FilePoolOptions &cfopt) { poolOpt_ = cfopt; if (poolOpt_.getFileFromPool) { if (!CheckValid()) { @@ -261,7 +266,7 @@ bool FilePool::CleanChunk(uint64_t chunkid, bool onlyMarked) { } int fd = ret; - auto defer = [&](...){ fsptr_->Close(fd); }; + auto defer = [&](...) { fsptr_->Close(fd); }; std::shared_ptr _(nullptr, defer); uint64_t chunklen = poolOpt_.fileSize + poolOpt_.metaPageSize; @@ -276,10 +281,11 @@ bool FilePool::CleanChunk(uint64_t chunkid, bool onlyMarked) { uint64_t nwrite = 0; uint64_t ntotal = chunklen; uint32_t bytesPerWrite = poolOpt_.bytesPerWrite; - char* buffer = writeBuffer_.get(); + char *buffer = writeBuffer_.get(); while (nwrite < ntotal) { - nbytes = fsptr_->Write(fd, buffer, nwrite, + nbytes = fsptr_->Write( + fd, buffer, nwrite, std::min(ntotal - nwrite, (uint64_t)bytesPerWrite)); if (nbytes < 0) { LOG(ERROR) << "Write file failed: " << chunkpath; @@ -305,8 +311,8 @@ bool FilePool::CleanChunk(uint64_t chunkid, bool onlyMarked) { } bool FilePool::CleaningChunk() { - auto popBack = [this](std::vector* chunks, - uint64_t* chunksLeft) -> uint64_t { + auto popBack = [this](std::vector *chunks, + uint64_t *chunksLeft) -> uint64_t { std::unique_lock lk(mtx_); if (chunks->empty()) { return 0; @@ -319,8 +325,8 @@ bool FilePool::CleaningChunk() { return chunkid; }; - auto pushBack = [this](std::vector* chunks, - uint64_t chunkid, uint64_t* chunksLeft) { + auto pushBack = [this](std::vector *chunks, uint64_t chunkid, + uint64_t *chunksLeft) { std::unique_lock lk(mtx_); chunks->push_back(chunkid); (*chunksLeft)++; @@ -374,9 +380,9 @@ bool FilePool::StopCleaning() { return true; } -bool FilePool::GetChunk(bool needClean, uint64_t* chunkid, bool* isCleaned) { - auto pop = [&](std::vector* chunks, - uint64_t* chunksLeft, bool isCleanChunks) -> bool { +bool FilePool::GetChunk(bool needClean, uint64_t *chunkid, bool *isCleaned) { + auto pop = [&](std::vector *chunks, uint64_t *chunksLeft, + bool isCleanChunks) -> bool { std::unique_lock lk(mtx_); if (chunks->empty()) { return false; @@ -391,14 +397,14 @@ bool FilePool::GetChunk(bool needClean, uint64_t* chunkid, bool* isCleaned) { }; if (!needClean) { - return pop(&dirtyChunks_, ¤tState_.dirtyChunksLeft, false) - || pop(&cleanChunks_, ¤tState_.cleanChunksLeft, true); + return pop(&dirtyChunks_, ¤tState_.dirtyChunksLeft, false) || + pop(&cleanChunks_, ¤tState_.cleanChunksLeft, true); } // Need clean chunk *isCleaned = false; - bool ret = pop(&cleanChunks_, ¤tState_.cleanChunksLeft, true) - || pop(&dirtyChunks_, ¤tState_.dirtyChunksLeft, false); + bool ret = pop(&cleanChunks_, ¤tState_.cleanChunksLeft, true) || + pop(&dirtyChunks_, ¤tState_.dirtyChunksLeft, false); if (true == ret && false == *isCleaned && CleanChunk(*chunkid, true)) { *isCleaned = true; @@ -407,8 +413,7 @@ bool FilePool::GetChunk(bool needClean, uint64_t* chunkid, bool* isCleaned) { return *isCleaned; } -int FilePool::GetFile(const std::string& targetpath, - const char* metapage, +int FilePool::GetFile(const std::string &targetpath, const char *metapage, bool needClean) { int ret = -1; int retry = 0; @@ -470,7 +475,7 @@ int FilePool::GetFile(const std::string& targetpath, return ret; } -int FilePool::AllocateChunk(const std::string& chunkpath) { +int FilePool::AllocateChunk(const std::string &chunkpath) { uint64_t chunklen = poolOpt_.fileSize + poolOpt_.metaPageSize; int ret = fsptr_->Open(chunkpath.c_str(), O_RDWR | O_CREAT); @@ -487,7 +492,7 @@ int FilePool::AllocateChunk(const std::string& chunkpath) { return -1; } - char* data = new (std::nothrow) char[chunklen]; + char *data = new (std::nothrow) char[chunklen]; memset(data, 0, chunklen); ret = fsptr_->Write(fd, data, 0, chunklen); @@ -513,7 +518,7 @@ int FilePool::AllocateChunk(const std::string& chunkpath) { return ret; } -bool FilePool::WriteMetaPage(const std::string& sourcepath, const char* page) { +bool FilePool::WriteMetaPage(const std::string &sourcepath, const char *page) { int fd = -1; int ret = -1; @@ -526,7 +531,7 @@ bool FilePool::WriteMetaPage(const std::string& sourcepath, const char* page) { fd = ret; ret = fsptr_->Write(fd, page, 0, poolOpt_.metaPageSize); - if (ret != poolOpt_.metaPageSize) { + if (ret != static_cast(poolOpt_.metaPageSize)) { fsptr_->Close(fd); LOG(ERROR) << "write metapage failed, " << sourcepath.c_str(); return false; @@ -547,7 +552,7 @@ bool FilePool::WriteMetaPage(const std::string& sourcepath, const char* page) { return true; } -int FilePool::RecycleFile(const std::string& chunkpath) { +int FilePool::RecycleFile(const std::string &chunkpath) { if (!poolOpt_.getFileFromPool) { int ret = fsptr_->Delete(chunkpath.c_str()); if (ret < 0) { @@ -574,7 +579,7 @@ int FilePool::RecycleFile(const std::string& chunkpath) { return fsptr_->Delete(chunkpath.c_str()); } - if (info.st_size != chunklen) { + if (info.st_size != static_cast(chunklen)) { LOG(ERROR) << "file size illegal, " << chunkpath.c_str() << ", delete file dirctly" << ", standard size = " << chunklen @@ -634,7 +639,7 @@ bool FilePool::ScanInternal() { size_t suffixLen = kCleanChunkSuffix_.size(); uint64_t chunklen = poolOpt_.fileSize + poolOpt_.metaPageSize; - for (auto& iter : tmpvec) { + for (auto &iter : tmpvec) { bool isCleaned = false; std::string chunkNum = iter; if (::curve::common::StringEndsWith(iter, kCleanChunkSuffix_)) { @@ -642,10 +647,9 @@ bool FilePool::ScanInternal() { chunkNum = iter.substr(0, iter.size() - suffixLen); } - auto it = std::find_if(chunkNum.begin(), chunkNum.end(), - [](unsigned char c) { - return !std::isdigit(c); - }); + auto it = + std::find_if(chunkNum.begin(), chunkNum.end(), + [](unsigned char c) { return !std::isdigit(c); }); if (it != chunkNum.end()) { LOG(ERROR) << "file name illegal! [" << iter << "]"; return false; @@ -664,7 +668,7 @@ bool FilePool::ScanInternal() { struct stat info; int ret = fsptr_->Fstat(fd, &info); - if (ret != 0 || info.st_size != chunklen) { + if (ret != 0 || info.st_size != static_cast(chunklen)) { LOG(ERROR) << "file size illegal, " << filepath.c_str() << ", standard size = " << chunklen << ", current size = " << info.st_size; @@ -690,8 +694,8 @@ bool FilePool::ScanInternal() { currentmaxfilenum_.store(maxnum + 1); currentState_.dirtyChunksLeft = dirtyChunks_.size(); currentState_.cleanChunksLeft = cleanChunks_.size(); - currentState_.preallocatedChunksLeft = currentState_.dirtyChunksLeft - + currentState_.cleanChunksLeft; + currentState_.preallocatedChunksLeft = + currentState_.dirtyChunksLeft + currentState_.cleanChunksLeft; LOG(INFO) << "scan done, pool size = " << currentState_.preallocatedChunksLeft; @@ -703,9 +707,7 @@ size_t FilePool::Size() { return currentState_.preallocatedChunksLeft; } -FilePoolState_t FilePool::GetState() { - return currentState_; -} +FilePoolState_t FilePool::GetState() { return currentState_; } } // namespace chunkserver } // namespace curve diff --git a/src/chunkserver/heartbeat.cpp b/src/chunkserver/heartbeat.cpp index 6db69b46a3..0e756b29c6 100644 --- a/src/chunkserver/heartbeat.cpp +++ b/src/chunkserver/heartbeat.cpp @@ -35,6 +35,7 @@ #include "src/chunkserver/heartbeat.h" #include "src/common/uri_parser.h" #include "src/chunkserver/heartbeat_helper.h" +#include "src/common/curve_version.h" using curve::fs::FileSystemInfo; @@ -304,6 +305,7 @@ int Heartbeat::BuildRequest(HeartbeatRequest* req) { } } req->set_leadercount(leaders); + req->set_version(curve::common::CurveVersion()); return 0; } diff --git a/src/chunkserver/heartbeat_helper.cpp b/src/chunkserver/heartbeat_helper.cpp index f960578ca5..02a2fc65c9 100644 --- a/src/chunkserver/heartbeat_helper.cpp +++ b/src/chunkserver/heartbeat_helper.cpp @@ -89,6 +89,7 @@ bool HeartbeatHelper::CopySetConfValid( bool HeartbeatHelper::NeedPurge(const butil::EndPoint &csEp, const CopySetConf &conf, const CopysetNodePtr ©set) { + (void)copyset; // CLDCFS-1004 bug-fix: mds下发epoch为0, 配置为空的copyset if (0 == conf.epoch() && conf.peers().empty()) { LOG(INFO) << "Clean copyset " diff --git a/src/chunkserver/op_request.cpp b/src/chunkserver/op_request.cpp index 90734d0937..5409092020 100755 --- a/src/chunkserver/op_request.cpp +++ b/src/chunkserver/op_request.cpp @@ -208,6 +208,7 @@ void DeleteChunkRequest::OnApply(uint64_t index, void DeleteChunkRequest::OnApplyFromLog(std::shared_ptr datastore, const ChunkRequest &request, const butil::IOBuf &data) { + (void)data; // NOTE: 处理过程中优先使用参数传入的datastore/request auto ret = datastore->DeleteChunk(request.chunkid(), request.sn()); @@ -363,6 +364,9 @@ void ReadChunkRequest::OnApply(uint64_t index, void ReadChunkRequest::OnApplyFromLog(std::shared_ptr datastore, const ChunkRequest &request, const butil::IOBuf &data) { + (void)datastore; + (void)request; + (void)data; // NOTE: 处理过程中优先使用参数传入的datastore/request // read什么都不用做 } @@ -571,6 +575,9 @@ void ReadSnapshotRequest::OnApply(uint64_t index, void ReadSnapshotRequest::OnApplyFromLog(std::shared_ptr datastore, const ChunkRequest &request, const butil::IOBuf &data) { + (void)datastore; + (void)request; + (void)data; // NOTE: 处理过程中优先使用参数传入的datastore/request // read什么都不用做 } @@ -607,6 +614,7 @@ void DeleteSnapshotRequest::OnApply(uint64_t index, void DeleteSnapshotRequest::OnApplyFromLog(std::shared_ptr datastore, //NOLINT const ChunkRequest &request, const butil::IOBuf &data) { + (void)data; // NOTE: 处理过程中优先使用参数传入的datastore/request auto ret = datastore->DeleteSnapshotChunkOrCorrectSn( request.chunkid(), request.correctedsn()); @@ -669,6 +677,7 @@ void CreateCloneChunkRequest::OnApply(uint64_t index, void CreateCloneChunkRequest::OnApplyFromLog(std::shared_ptr datastore, //NOLINT const ChunkRequest &request, const butil::IOBuf &data) { + (void)data; // NOTE: 处理过程中优先使用参数传入的datastore/request auto ret = datastore->CreateCloneChunk(request.chunkid(), request.sn(), @@ -806,6 +815,7 @@ void ScanChunkRequest::OnApply(uint64_t index, void ScanChunkRequest::OnApplyFromLog(std::shared_ptr datastore, //NOLINT const ChunkRequest &request, const butil::IOBuf &data) { + (void)data; uint32_t crc = 0; size_t size = request.size(); std::unique_ptr readBuffer(new(std::nothrow)char[size]); diff --git a/src/chunkserver/raftlog/curve_segment.cpp b/src/chunkserver/raftlog/curve_segment.cpp index 59a7c65e85..0e144971b8 100644 --- a/src/chunkserver/raftlog/curve_segment.cpp +++ b/src/chunkserver/raftlog/curve_segment.cpp @@ -80,7 +80,7 @@ int CurveSegment::create() { return -1; } res = ::lseek(_fd, _meta_page_size, SEEK_SET); - if (res != _meta_page_size) { + if (res != static_cast(_meta_page_size)) { LOG(ERROR) << "lseek fail! error: " << strerror(errno); return -1; } @@ -231,7 +231,7 @@ int CurveSegment::load(braft::ConfigurationManager* configuration_manager) { int CurveSegment::_load_meta() { char* metaPage = new char[_meta_page_size]; int res = ::pread(_fd, metaPage, _meta_page_size, 0); - if (res != _meta_page_size) { + if (res != static_cast(_meta_page_size)) { delete metaPage; return -1; } @@ -437,7 +437,7 @@ int CurveSegment::append(const braft::LogEntry* entry) { data.copy_to(write_buf + kEntryHeaderSize, real_length); int ret = ::pwrite(_direct_fd, write_buf, to_write, _meta.bytes); free(write_buf); - if (ret != to_write) { + if (ret != static_cast(to_write)) { LOG(ERROR) << "Fail to write directly to fd=" << _direct_fd << ", buf=" << write_buf << ", size=" << to_write << ", offset=" << _meta.bytes << ", error=" << berror(); @@ -486,7 +486,7 @@ int CurveSegment::_update_meta_page() { ret = ::pwrite(_fd, metaPage, _meta_page_size, 0); } free(metaPage); - if (ret != _meta_page_size) { + if (ret != static_cast(_meta_page_size)) { LOG(ERROR) << "Fail to write meta page into fd=" << (FLAGS_enableWalDirectWrite ? _direct_fd : _fd) << ", path: " << _path << berror(); @@ -642,7 +642,6 @@ int CurveSegment::sync(bool will_sync) { } int CurveSegment::unlink() { - int ret = 0; std::string path(_path); if (_is_open) { butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, diff --git a/src/chunkserver/raftlog/curve_segment_log_storage.cpp b/src/chunkserver/raftlog/curve_segment_log_storage.cpp index c9552e5c3f..6182577e6e 100644 --- a/src/chunkserver/raftlog/curve_segment_log_storage.cpp +++ b/src/chunkserver/raftlog/curve_segment_log_storage.cpp @@ -424,6 +424,7 @@ int CurveSegmentLogStorage::append_entry(const braft::LogEntry* entry) { int CurveSegmentLogStorage::append_entries( const std::vector& entries, braft::IOMetric* metric) { + (void)metric; if (entries.empty()) { return 0; } diff --git a/src/chunkserver/raftlog/curve_segment_log_storage.h b/src/chunkserver/raftlog/curve_segment_log_storage.h index 0d970577a6..bb7ff46839 100644 --- a/src/chunkserver/raftlog/curve_segment_log_storage.h +++ b/src/chunkserver/raftlog/curve_segment_log_storage.h @@ -38,8 +38,8 @@ // Zhangyi Chen(chenzhangyi01@baidu.com) // Xiong,Kai(xiongkai@baidu.com) -#ifndef SRC_CHUNKSERVER_RAFTLOG_CURVE_SEGMENT_LOG_STORAGE_H_ -#define SRC_CHUNKSERVER_RAFTLOG_CURVE_SEGMENT_LOG_STORAGE_H_ +#ifndef SRC_CHUNKSERVER_RAFTLOG_CURVE_SEGMENT_LOG_STORAGE_H_ +#define SRC_CHUNKSERVER_RAFTLOG_CURVE_SEGMENT_LOG_STORAGE_H_ #include #include @@ -64,25 +64,23 @@ class CurveSegmentLogStorage; struct LogStorageOptions { std::shared_ptr walFilePool; - std::function monitorMetricCb; + std::function monitorMetricCb; LogStorageOptions() = default; - LogStorageOptions(std::shared_ptr walFilePool, - std::function monitorMetricCb) - : walFilePool(walFilePool), monitorMetricCb(monitorMetricCb) { - } + LogStorageOptions( + std::shared_ptr walFilePool, + std::function monitorMetricCb) + : walFilePool(walFilePool), monitorMetricCb(monitorMetricCb) {} }; struct LogStorageStatus { explicit LogStorageStatus(uint32_t walSegmentFileCount) - : walSegmentFileCount(walSegmentFileCount) { - } + : walSegmentFileCount(walSegmentFileCount) {} uint32_t walSegmentFileCount; }; -LogStorageOptions StoreOptForCurveSegmentLogStorage( - LogStorageOptions options); +LogStorageOptions StoreOptForCurveSegmentLogStorage(LogStorageOptions options); void RegisterCurveSegmentLogStorageOrDie(); @@ -96,31 +94,23 @@ void RegisterCurveSegmentLogStorageOrDie(); // log_inprogress_0001001: open segment class CurveSegmentLogStorage : public braft::LogStorage { public: - typedef std::map > SegmentMap; + typedef std::map> SegmentMap; - explicit CurveSegmentLogStorage(const std::string& path, - bool enable_sync = true, + explicit CurveSegmentLogStorage( + const std::string &path, bool enable_sync = true, std::shared_ptr walFilePool = nullptr) - : _path(path) - , _first_log_index(1) - , _last_log_index(0) - , _checksum_type(0) - , _enable_sync(enable_sync) - , _walFilePool(walFilePool) - {} + : _path(path), _first_log_index(1), _last_log_index(0), + _walFilePool(walFilePool), _checksum_type(0), + _enable_sync(enable_sync) {} CurveSegmentLogStorage() - : _first_log_index(1) - , _last_log_index(0) - , _checksum_type(0) - , _enable_sync(true) - , _walFilePool(nullptr) - {} + : _first_log_index(1), _last_log_index(0), _walFilePool(nullptr), + _checksum_type(0), _enable_sync(true) {} virtual ~CurveSegmentLogStorage() {} // init logstorage, check consistency and integrity - virtual int init(braft::ConfigurationManager* configuration_manager); + virtual int init(braft::ConfigurationManager *configuration_manager); // first log index in log virtual int64_t first_log_index() { @@ -131,18 +121,17 @@ class CurveSegmentLogStorage : public braft::LogStorage { virtual int64_t last_log_index(); // get logentry by index - virtual braft::LogEntry* get_entry(const int64_t index); + virtual braft::LogEntry *get_entry(const int64_t index); // get logentry's term by index virtual int64_t get_term(const int64_t index); // append entry to log - int append_entry(const braft::LogEntry* entry); + int append_entry(const braft::LogEntry *entry); // append entries to log and update IOMetric, return success append number - virtual int append_entries( - const std::vector& entries, - braft::IOMetric* metric); + virtual int append_entries(const std::vector &entries, + braft::IOMetric *metric); // delete logs from storage's head, [1, first_index_kept) will be discarded virtual int truncate_prefix(const int64_t first_index_kept); @@ -153,13 +142,11 @@ class CurveSegmentLogStorage : public braft::LogStorage { virtual int reset(const int64_t next_log_index); - LogStorage* new_instance(const std::string& uri) const; + LogStorage *new_instance(const std::string &uri) const; - SegmentMap& segments() { - return _segments; - } + SegmentMap &segments() { return _segments; } - void list_files(std::vector* seg_files); + void list_files(std::vector *seg_files); void sync(); @@ -170,15 +157,13 @@ class CurveSegmentLogStorage : public braft::LogStorage { int save_meta(const int64_t log_index); int load_meta(); int list_segments(bool is_empty); - int load_segments(braft::ConfigurationManager* configuration_manager); - int get_segment(int64_t log_index, scoped_refptr* ptr); - void pop_segments( - int64_t first_index_kept, - std::vector >* poped); - void pop_segments_from_back( - const int64_t first_index_kept, - std::vector >* popped, - scoped_refptr* last_segment); + int load_segments(braft::ConfigurationManager *configuration_manager); + int get_segment(int64_t log_index, scoped_refptr *ptr); + void pop_segments(int64_t first_index_kept, + std::vector> *poped); + void pop_segments_from_back(const int64_t first_index_kept, + std::vector> *popped, + scoped_refptr *last_segment); std::string _path; diff --git a/src/chunkserver/raftsnapshot/curve_file_service.h b/src/chunkserver/raftsnapshot/curve_file_service.h index 6f88bb6adb..3c8e503e8f 100644 --- a/src/chunkserver/raftsnapshot/curve_file_service.h +++ b/src/chunkserver/raftsnapshot/curve_file_service.h @@ -70,7 +70,7 @@ class BAIDU_CACHELINE_ALIGNMENT CurveFileService : public braft::FileService { void set_snapshot_attachment(SnapshotAttachment *snapshot_attachment); void clear_snapshot_attachment() { BAIDU_SCOPED_LOCK(_mutex); - auto ret = _snapshot_attachment.release(); + (void)_snapshot_attachment.release(); } private: diff --git a/src/chunkserver/register.cpp b/src/chunkserver/register.cpp index 616ef0ac95..b764878db5 100644 --- a/src/chunkserver/register.cpp +++ b/src/chunkserver/register.cpp @@ -53,8 +53,8 @@ Register::Register(const RegisterOptions &ops) { } int Register::RegisterToMDS(const ChunkServerMetadata *localMetadata, - ChunkServerMetadata *metadata, - const std::shared_ptr &epochMap) { + ChunkServerMetadata *metadata, + const std::shared_ptr &epochMap) { ::curve::mds::topology::ChunkServerRegistRequest req; ::curve::mds::topology::ChunkServerRegistResponse resp; req.set_disktype(ops_.chunkserverDiskType); @@ -65,7 +65,7 @@ int Register::RegisterToMDS(const ChunkServerMetadata *localMetadata, } req.set_port(ops_.chunkserverPort); uint64_t chunkPoolSize = ops_.chunkFilepool->Size() * - ops_.chunkFilepool->GetFilePoolOpt().fileSize; + ops_.chunkFilepool->GetFilePoolOpt().fileSize; req.set_chunkfilepoolsize(chunkPoolSize); if (ops_.chunkFilepool->GetFilePoolOpt().getFileFromPool) { req.set_usechunkfilepoolaswalpool(ops_.useChunkFilePoolAsWalPool); @@ -107,8 +107,7 @@ int Register::RegisterToMDS(const ChunkServerMetadata *localMetadata, break; } else { LOG(ERROR) << ops_.chunkserverInternalIp << ":" - << ops_.chunkserverPort - << " Fail to register to MDS " + << ops_.chunkserverPort << " Fail to register to MDS " << mdsEps_[inServiceIndex_] << ", cntl errorCode: " << cntl.ErrorCode() << "," << " cntl error: " << cntl.ErrorText() << "," @@ -131,8 +130,8 @@ int Register::RegisterToMDS(const ChunkServerMetadata *localMetadata, } if (resp.epochmap_size() != 0) { - for (auto it = resp.epochmap().begin(); - it != resp.epochmap().end(); it++) { + for (auto it = resp.epochmap().begin(); it != resp.epochmap().end(); + it++) { epochMap->UpdateEpoch(it->first, it->second); } } @@ -173,8 +172,8 @@ int Register::PersistChunkServerMeta(const ChunkServerMetadata &metadata) { return -1; } - if (ops_.fs->Write( - fd, metaStr.c_str(), 0, metaStr.size()) < metaStr.size()) { + if (ops_.fs->Write(fd, metaStr.c_str(), 0, metaStr.size()) < + static_cast(metaStr.size())) { LOG(ERROR) << "Failed to write chunkserver metadata file"; return -1; } diff --git a/src/chunkserver/scan_service.cpp b/src/chunkserver/scan_service.cpp index 89a876643e..bc3d2789ce 100644 --- a/src/chunkserver/scan_service.cpp +++ b/src/chunkserver/scan_service.cpp @@ -29,6 +29,7 @@ void ScanServiceImpl::FollowScanMap(RpcController *controller, const FollowScanMapRequest *request, FollowScanMapResponse *response, Closure *done) { + (void)controller; brpc::ClosureGuard doneGuard(done); scanManager_->DealFollowerScanMap(*request, response); } diff --git a/src/chunkserver/trash.cpp b/src/chunkserver/trash.cpp index 678ac212a4..2941261240 100644 --- a/src/chunkserver/trash.cpp +++ b/src/chunkserver/trash.cpp @@ -185,7 +185,7 @@ bool Trash::IsCopysetInTrash(const std::string &dirName) { // 目录是十进制形式 // 例如:2860448220024 (poolId: 666, copysetId: 888) uint64_t groupId; - int n = dirName.find("."); + auto n = dirName.find("."); if (n == std::string::npos) { return false; } @@ -259,6 +259,7 @@ bool Trash::RecycleChunksAndWALInDir( bool Trash::RecycleChunkfile( const std::string &filepath, const std::string &filename) { + (void)filename; LockGuard lg(mtx_); if (0 != chunkFilePool_->RecycleFile(filepath)) { LOG(ERROR) << "Trash failed recycle chunk " << filepath @@ -272,6 +273,7 @@ bool Trash::RecycleChunkfile( bool Trash::RecycleWAL( const std::string &filepath, const std::string &filename) { + (void)filename; LockGuard lg(mtx_); if (walPool_ != nullptr && 0 != walPool_->RecycleFile(filepath)) { LOG(ERROR) << "Trash failed recycle WAL " << filepath diff --git a/src/client/chunkserver_broadcaster.h b/src/client/chunkserver_broadcaster.h index d0f4969261..e7813754fa 100644 --- a/src/client/chunkserver_broadcaster.h +++ b/src/client/chunkserver_broadcaster.h @@ -24,6 +24,7 @@ #define SRC_CLIENT_CHUNKSERVER_BROADCASTER_H_ #include +#include #include "include/client/libcurve_define.h" #include "src/client/client_common.h" diff --git a/src/client/chunkserver_client.cpp b/src/client/chunkserver_client.cpp index 77d72a09bd..052c592746 100644 --- a/src/client/chunkserver_client.cpp +++ b/src/client/chunkserver_client.cpp @@ -25,6 +25,7 @@ #include #include #include +#include using curve::chunkserver::ChunkService_Stub; using curve::chunkserver::CHUNK_OP_STATUS; diff --git a/src/client/client_common.h b/src/client/client_common.h index 7f609c610c..1b94f7244a 100644 --- a/src/client/client_common.h +++ b/src/client/client_common.h @@ -159,6 +159,7 @@ typedef struct FInfo { uint64_t cloneLength{0}; uint64_t stripeUnit; uint64_t stripeCount; + std::string poolset; OpenFlags openflags; common::ReadWriteThrottleParams throttleParams; @@ -344,6 +345,19 @@ inline std::ostream& operator<<(std::ostream& os, const OpenFlags& flags) { // default flags for readonly open OpenFlags DefaultReadonlyOpenFlags(); +struct CreateFileContext { + // pagefile or directory + bool pagefile; + std::string name; + UserInfo user; + + // used for creating pagefile + size_t length; + uint64_t stripeUnit = 0; + uint64_t stripeCount = 0; + std::string poolset; +}; + } // namespace client } // namespace curve diff --git a/src/client/client_config.cpp b/src/client/client_config.cpp index e8c0e3427a..00123b204e 100644 --- a/src/client/client_config.cpp +++ b/src/client/client_config.cpp @@ -184,6 +184,12 @@ int ClientConfig::Init(const std::string& configpath) { std::vector mdsAddr; common::SplitString(metaAddr, ",", &mdsAddr); + if (mdsAddr.empty()) { + LOG(ERROR) << "mds.listen.addr seems invalid or empty, `" << metaAddr + << "`'"; + return -1; + } + fileServiceOption_.metaServerOpt.rpcRetryOpt.addrs.assign(mdsAddr.begin(), mdsAddr.end()); for (auto& addr : fileServiceOption_.metaServerOpt.rpcRetryOpt.addrs) { diff --git a/src/client/file_instance.cpp b/src/client/file_instance.cpp index 882287e619..b49bf3ed31 100644 --- a/src/client/file_instance.cpp +++ b/src/client/file_instance.cpp @@ -39,18 +39,14 @@ using curve::common::TimeUtility; using curve::mds::SessionStatus; FileInstance::FileInstance() - : finfo_(), - fileopt_(), - mdsclient_(nullptr), - leaseExecutor_(), - iomanager4file_(), - readonly_(false) {} - -bool FileInstance::Initialize(const std::string& filename, + : finfo_(), fileopt_(), mdsclient_(nullptr), leaseExecutor_(), + iomanager4file_(), readonly_(false) {} + +bool FileInstance::Initialize(const std::string &filename, std::shared_ptr mdsclient, - const UserInfo_t& userinfo, - const OpenFlags& openflags, - const FileServiceOption& fileservicopt, + const UserInfo_t &userinfo, + const OpenFlags &openflags, + const FileServiceOption &fileservicopt, bool readonly) { readonly_ = readonly; fileopt_ = fileservicopt; @@ -105,39 +101,37 @@ void FileInstance::UnInitialize() { mdsclient_.reset(); } -int FileInstance::Read(char* buf, off_t offset, size_t length) { - DLOG_EVERY_SECOND(INFO) << "begin Read "<< finfo_.fullPathName - << ", offset = " << offset - << ", len = " << length; +int FileInstance::Read(char *buf, off_t offset, size_t length) { + DLOG_EVERY_SECOND(INFO) << "begin Read " << finfo_.fullPathName + << ", offset = " << offset << ", len = " << length; return iomanager4file_.Read(buf, offset, length, mdsclient_.get()); } -int FileInstance::Write(const char* buf, off_t offset, size_t len) { +int FileInstance::Write(const char *buf, off_t offset, size_t len) { if (readonly_) { DVLOG(9) << "open with read only, do not support write!"; return -1; } DLOG_EVERY_SECOND(INFO) << "begin write " << finfo_.fullPathName - << ", offset = " << offset - << ", len = " << len; + << ", offset = " << offset << ", len = " << len; return iomanager4file_.Write(buf, offset, len, mdsclient_.get()); } -int FileInstance::AioRead(CurveAioContext* aioctx, UserDataType dataType) { - DLOG_EVERY_SECOND(INFO) << "begin AioRead " << finfo_.fullPathName - << ", offset = " << aioctx->offset - << ", len = " << aioctx->length; +int FileInstance::AioRead(CurveAioContext *aioctx, UserDataType dataType) { + DLOG_EVERY_SECOND(INFO) + << "begin AioRead " << finfo_.fullPathName + << ", offset = " << aioctx->offset << ", len = " << aioctx->length; return iomanager4file_.AioRead(aioctx, mdsclient_.get(), dataType); } -int FileInstance::AioWrite(CurveAioContext* aioctx, UserDataType dataType) { +int FileInstance::AioWrite(CurveAioContext *aioctx, UserDataType dataType) { if (readonly_) { DVLOG(9) << "open with read only, do not support write!"; return -1; } - DLOG_EVERY_SECOND(INFO) << "begin AioWrite " << finfo_.fullPathName - << ", offset = " << aioctx->offset - << ", len = " << aioctx->length; + DLOG_EVERY_SECOND(INFO) + << "begin AioWrite " << finfo_.fullPathName + << ", offset = " << aioctx->offset << ", len = " << aioctx->length; return iomanager4file_.AioWrite(aioctx, mdsclient_.get(), dataType); } @@ -150,7 +144,7 @@ int FileInstance::Discard(off_t offset, size_t length) { return -1; } -int FileInstance::AioDiscard(CurveAioContext* aioctx) { +int FileInstance::AioDiscard(CurveAioContext *aioctx) { if (!readonly_) { return iomanager4file_.AioDiscard(aioctx, mdsclient_.get()); } @@ -167,15 +161,16 @@ int FileInstance::AioDiscard(CurveAioContext* aioctx) { // 这时候当前还没有成功打开,所以还没有存储该session信息,所以无法通过refresh // 再去打开,所以这时候需要获取mds一侧session lease时长,然后在client这一侧 // 等待一段时间再去Open,如果依然失败,就向上层返回失败。 -int FileInstance::Open(const std::string& filename, - const UserInfo& userinfo, - std::string* sessionId) { - LeaseSession_t lease; +int FileInstance::Open(const std::string &filename, const UserInfo &userinfo, + std::string *sessionId) { + (void)userinfo; + + LeaseSession_t lease; int ret = LIBCURVE_ERROR::FAILED; FileEpoch_t fEpoch; - ret = mdsclient_->OpenFile(filename, finfo_.userinfo, - &finfo_, &fEpoch, &lease); + ret = mdsclient_->OpenFile(filename, finfo_.userinfo, &finfo_, &fEpoch, + &lease); if (ret == LIBCURVE_ERROR::OK) { iomanager4file_.UpdateFileThrottleParams(finfo_.throttleParams); ret = leaseExecutor_->Start(finfo_, lease) ? LIBCURVE_ERROR::OK @@ -188,17 +183,17 @@ int FileInstance::Open(const std::string& filename, return -ret; } -int FileInstance::ReOpen(const std::string& filename, - const std::string& sessionId, - const UserInfo& userInfo, - std::string* newSessionId) { +int FileInstance::ReOpen(const std::string &filename, + const std::string &sessionId, const UserInfo &userInfo, + std::string *newSessionId) { + (void)sessionId; return Open(filename, userInfo, newSessionId); } -int FileInstance::GetFileInfo(const std::string& filename, - FInfo_t* fi, FileEpoch_t *fEpoch) { - LIBCURVE_ERROR ret = mdsclient_->GetFileInfo(filename, finfo_.userinfo, - fi, fEpoch); +int FileInstance::GetFileInfo(const std::string &filename, FInfo_t *fi, + FileEpoch_t *fEpoch) { + LIBCURVE_ERROR ret = + mdsclient_->GetFileInfo(filename, finfo_.userinfo, fi, fEpoch); return -ret; } @@ -215,14 +210,14 @@ int FileInstance::Close() { return -ret; } -FileInstance* FileInstance::NewInitedFileInstance( - const FileServiceOption& fileServiceOption, - std::shared_ptr mdsClient, - const std::string& filename, - const UserInfo& userInfo, - const OpenFlags& openflags, // TODO(all): maybe we can put userinfo and readonly into openflags // NOLINT +FileInstance *FileInstance::NewInitedFileInstance( + const FileServiceOption &fileServiceOption, + std::shared_ptr mdsClient, const std::string &filename, + const UserInfo &userInfo, + const OpenFlags &openflags, // TODO(all): maybe we can put userinfo and + // readonly into openflags // NOLINT bool readonly) { - FileInstance* instance = new (std::nothrow) FileInstance(); + FileInstance *instance = new (std::nothrow) FileInstance(); if (instance == nullptr) { LOG(ERROR) << "Create FileInstance failed, filename: " << filename; return nullptr; @@ -242,12 +237,12 @@ FileInstance* FileInstance::NewInitedFileInstance( return instance; } -FileInstance* FileInstance::Open4Readonly(const FileServiceOption& opt, +FileInstance *FileInstance::Open4Readonly(const FileServiceOption &opt, std::shared_ptr mdsclient, - const std::string& filename, - const UserInfo& userInfo, - const OpenFlags& openflags) { - FileInstance* instance = FileInstance::NewInitedFileInstance( + const std::string &filename, + const UserInfo &userInfo, + const OpenFlags &openflags) { + FileInstance *instance = FileInstance::NewInitedFileInstance( opt, std::move(mdsclient), filename, userInfo, openflags, true); if (instance == nullptr) { LOG(ERROR) << "NewInitedFileInstance failed, filename = " << filename; @@ -280,5 +275,5 @@ void FileInstance::StopLease() { } } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/src/client/io_tracker.cpp b/src/client/io_tracker.cpp index 238be74d96..85d6dae911 100644 --- a/src/client/io_tracker.cpp +++ b/src/client/io_tracker.cpp @@ -50,8 +50,8 @@ IOTracker::IOTracker(IOManager* iomanager, FileMetric* clientMetric, bool disableStripe) : mc_(mc), - iomanager_(iomanager), scheduler_(scheduler), + iomanager_(iomanager), fileMetric_(clientMetric), disableStripe_(disableStripe) { id_ = tracekerID_.fetch_add(1, std::memory_order_relaxed); diff --git a/src/client/io_tracker.h b/src/client/io_tracker.h index 6c6ae27df6..6369410ae3 100644 --- a/src/client/io_tracker.h +++ b/src/client/io_tracker.h @@ -336,13 +336,13 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { // store segment indices that can be discarded std::unordered_set discardSegments_; + // metacache为当前fileinstance的元数据信息 + MetaCache* mc_; + // scheduler用来将用户线程与client自己的线程切分 // 大IO被切分之后,将切分的reqlist传给scheduler向下发送 RequestScheduler* scheduler_; - // metacache为当前fileinstance的元数据信息 - MetaCache* mc_; - // 对于异步IO,Tracker需要向上层通知当前IO已经处理结束 // iomanager可以将该tracker释放 IOManager* iomanager_; diff --git a/src/client/libcbd_ext4.cpp b/src/client/libcbd_ext4.cpp index ae48dd530b..6bbdda0fef 100644 --- a/src/client/libcbd_ext4.cpp +++ b/src/client/libcbd_ext4.cpp @@ -28,7 +28,7 @@ extern "C" { CurveOptions g_cbd_ext4_options = {false, 0}; -int cbd_ext4_init(const CurveOptions* options) { +int cbd_ext4_init(const CurveOptions *options) { if (g_cbd_ext4_options.inited) { return 0; } @@ -45,47 +45,47 @@ int cbd_ext4_init(const CurveOptions* options) { return 0; } -int cbd_ext4_fini() { - return 0; -} +int cbd_ext4_fini() { return 0; } -int cbd_ext4_open(const char* filename) { +int cbd_ext4_open(const char *filename) { int fd = -1; char path[CBD_MAX_FILE_PATH_LEN] = {0}; #ifdef CBD_BACKEND_EXT4 - strcat(path, g_cbd_ext4_options.datahome); //NOLINT - strcat(path, "/"); //NOLINT + strcat(path, g_cbd_ext4_options.datahome); // NOLINT + strcat(path, "/"); // NOLINT #endif - strcat(path, filename); //NOLINT + strcat(path, filename); // NOLINT fd = open(path, O_RDWR | O_CREAT, 0660); return fd; } -int cbd_ext4_close(int fd) { - return close(fd); -} +int cbd_ext4_close(int fd) { return close(fd); } -int cbd_ext4_pread(int fd, void* buf, off_t offset, size_t length) { +int cbd_ext4_pread(int fd, void *buf, off_t offset, size_t length) { return pread(fd, buf, length, offset); } -int cbd_ext4_pwrite(int fd, const void* buf, off_t offset, size_t length) { +int cbd_ext4_pwrite(int fd, const void *buf, off_t offset, size_t length) { return pwrite(fd, buf, length, offset); } int cbd_ext4_pdiscard(int fd, off_t offset, size_t length) { + (void)fd; + (void)offset; + (void)length; return 0; } void cbd_ext4_aio_callback(union sigval sigev_value) { - CurveAioContext* context = (CurveAioContext *)sigev_value.sival_ptr; //NOLINT + CurveAioContext *context = + (CurveAioContext *)sigev_value.sival_ptr; // NOLINT context->cb(context); } -int cbd_ext4_aio_pread(int fd, CurveAioContext* context) { - struct aiocb* cb; +int cbd_ext4_aio_pread(int fd, CurveAioContext *context) { + struct aiocb *cb; cb = (struct aiocb *)malloc(sizeof(struct aiocb)); if (!cb) { @@ -98,14 +98,14 @@ int cbd_ext4_aio_pread(int fd, CurveAioContext* context) { cb->aio_nbytes = context->length; cb->aio_buf = context->buf; cb->aio_sigevent.sigev_notify = SIGEV_THREAD; - cb->aio_sigevent.sigev_value.sival_ptr = (void*)context; //NOLINT + cb->aio_sigevent.sigev_value.sival_ptr = (void *)context; // NOLINT cb->aio_sigevent.sigev_notify_function = cbd_ext4_aio_callback; return aio_read(cb); } -int cbd_ext4_aio_pwrite(int fd, CurveAioContext* context) { - struct aiocb* cb; +int cbd_ext4_aio_pwrite(int fd, CurveAioContext *context) { + struct aiocb *cb; cb = (struct aiocb *)malloc(sizeof(struct aiocb)); if (!cb) { @@ -118,32 +118,31 @@ int cbd_ext4_aio_pwrite(int fd, CurveAioContext* context) { cb->aio_nbytes = context->length; cb->aio_buf = context->buf; cb->aio_sigevent.sigev_notify = SIGEV_THREAD; - cb->aio_sigevent.sigev_value.sival_ptr = (void*)context; //NOLINT + cb->aio_sigevent.sigev_value.sival_ptr = (void *)context; // NOLINT cb->aio_sigevent.sigev_notify_function = cbd_ext4_aio_callback; return aio_write(cb); } -int cbd_ext4_aio_pdiscard(int fd, CurveAioContext* aioctx) { +int cbd_ext4_aio_pdiscard(int fd, CurveAioContext *aioctx) { + (void)fd; aioctx->ret = aioctx->length; aioctx->cb(aioctx); return 0; } -int cbd_ext4_sync(int fd) { - return fsync(fd); -} +int cbd_ext4_sync(int fd) { return fsync(fd); } -int64_t cbd_ext4_filesize(const char* filename) { +int64_t cbd_ext4_filesize(const char *filename) { struct stat st; int ret; char path[CBD_MAX_FILE_PATH_LEN] = {0}; #ifdef CBD_BACKEND_EXT4 - strcat(path, g_cbd_ext4_options.datahome); //NOLINT - strcat(path, "/"); //NOLINT + strcat(path, g_cbd_ext4_options.datahome); // NOLINT + strcat(path, "/"); // NOLINT #endif - strcat(path, filename); //NOLINT + strcat(path, filename); // NOLINT ret = stat(path, &st); if (ret) { @@ -153,9 +152,9 @@ int64_t cbd_ext4_filesize(const char* filename) { } } -int cbd_ext4_increase_epoch(const char* filename) { +int cbd_ext4_increase_epoch(const char *filename) { + (void)filename; return 0; } } // extern "C" - diff --git a/src/client/libcbd_libcurve.cpp b/src/client/libcbd_libcurve.cpp index 62fa3afc7d..2e29ef6f3c 100644 --- a/src/client/libcbd_libcurve.cpp +++ b/src/client/libcbd_libcurve.cpp @@ -91,6 +91,7 @@ int cbd_libcurve_aio_pdiscard(int fd, CurveAioContext* context) { int cbd_libcurve_sync(int fd) { // Ignored as it always sync writes to chunkserver currently + (void)fd; return 0; } diff --git a/src/client/libcurve_file.cpp b/src/client/libcurve_file.cpp index c03d723a69..9cd42f0e2c 100644 --- a/src/client/libcurve_file.cpp +++ b/src/client/libcurve_file.cpp @@ -48,7 +48,7 @@ #include "src/common/fast_align.h" bool globalclientinited_ = false; -curve::client::FileClient* globalclient = nullptr; +curve::client::FileClient *globalclient = nullptr; using curve::client::UserInfo; @@ -72,9 +72,9 @@ char g_processname[kProcessNameMax]; class LoggerGuard { private: - friend void InitLogging(const std::string& confPath); + friend void InitLogging(const std::string &confPath); - explicit LoggerGuard(const std::string& confpath) { + explicit LoggerGuard(const std::string &confpath) { InitInternal(confpath); } @@ -84,13 +84,13 @@ class LoggerGuard { } } - void InitInternal(const std::string& confpath); + void InitInternal(const std::string &confpath); private: bool needShutdown_ = false; }; -void LoggerGuard::InitInternal(const std::string& confPath) { +void LoggerGuard::InitInternal(const std::string &confPath) { curve::common::Configuration conf; conf.SetConfigPath(confPath); @@ -120,26 +120,22 @@ void LoggerGuard::InitInternal(const std::string& confPath) { LOG_IF(WARNING, !conf.GetStringValue("global.logPath", &FLAGS_log_dir)) << "config no logpath info, using default dir '/tmp'"; - std::string processName = std::string("libcurve-").append( - curve::common::UUIDGenerator().GenerateUUID().substr(0, 8)); - snprintf(g_processname, sizeof(g_processname), - "%s", processName.c_str()); + std::string processName = + std::string("libcurve-") + .append(curve::common::UUIDGenerator().GenerateUUID().substr(0, 8)); + snprintf(g_processname, sizeof(g_processname), "%s", processName.c_str()); google::InitGoogleLogging(g_processname); needShutdown_ = true; } -void InitLogging(const std::string& confPath) { +void InitLogging(const std::string &confPath) { static LoggerGuard guard(confPath); } } // namespace FileClient::FileClient() - : rwlock_(), - fdcount_(0), - fileserviceMap_(), - clientconfig_(), - mdsClient_(), + : rwlock_(), fdcount_(0), fileserviceMap_(), clientconfig_(), mdsClient_(), csClient_(std::make_shared()), csBroadCaster_(std::make_shared(csClient_)), inited_(false), @@ -150,7 +146,7 @@ bool FileClient::CheckAligned(off_t offset, size_t length) const { common::is_aligned(length, kMinIOAlignment); } -int FileClient::Init(const std::string& configpath) { +int FileClient::Init(const std::string &configpath) { if (inited_) { LOG(WARNING) << "already inited!"; return 0; @@ -187,8 +183,7 @@ int FileClient::Init(const std::string& configpath) { mdsClient_ = std::move(tmpMdsClient); - int rc2 = csClient_->Init( - clientconfig_.GetFileServiceOption().csClientOpt); + int rc2 = csClient_->Init(clientconfig_.GetFileServiceOption().csClientOpt); if (rc2 != 0) { LOG(ERROR) << "Init ChunkServer Client failed!"; return -LIBCURVE_ERROR::FAILED; @@ -221,11 +216,10 @@ void FileClient::UnInit() { inited_ = false; } -int FileClient::Open(const std::string& filename, - const UserInfo_t& userinfo, - const OpenFlags& openflags) { +int FileClient::Open(const std::string &filename, const UserInfo_t &userinfo, + const OpenFlags &openflags) { LOG(INFO) << "Opening filename: " << filename << ", flags: " << openflags; - FileInstance* fileserv = FileInstance::NewInitedFileInstance( + FileInstance *fileserv = FileInstance::NewInitedFileInstance( clientconfig_.GetFileServiceOption(), mdsClient_, filename, userinfo, openflags, false); if (fileserv == nullptr) { @@ -256,9 +250,9 @@ int FileClient::Open(const std::string& filename, return fd; } -int FileClient::Open4ReadOnly(const std::string& filename, - const UserInfo_t& userinfo, bool disableStripe) { - FileInstance* instance = FileInstance::Open4Readonly( +int FileClient::Open4ReadOnly(const std::string &filename, + const UserInfo_t &userinfo, bool disableStripe) { + FileInstance *instance = FileInstance::Open4Readonly( clientconfig_.GetFileServiceOption(), mdsClient_, filename, userinfo); if (instance == nullptr) { @@ -283,16 +277,16 @@ int FileClient::Open4ReadOnly(const std::string& filename, return fd; } -int FileClient::IncreaseEpoch(const std::string& filename, - const UserInfo_t& userinfo) { +int FileClient::IncreaseEpoch(const std::string &filename, + const UserInfo_t &userinfo) { LOG(INFO) << "IncreaseEpoch, filename: " << filename; FInfo_t fi; FileEpoch_t fEpoch; std::list> csLocs; LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { - ret = mdsClient_->IncreaseEpoch(filename, userinfo, - &fi, &fEpoch, &csLocs); + ret = mdsClient_->IncreaseEpoch(filename, userinfo, &fi, &fEpoch, + &csLocs); LOG_IF(ERROR, ret != LIBCURVE_ERROR::OK) << "IncreaseEpoch failed, filename: " << filename << ", ret: " << ret; @@ -301,11 +295,10 @@ int FileClient::IncreaseEpoch(const std::string& filename, return -LIBCURVE_ERROR::FAILED; } - int ret2 = csBroadCaster_->BroadCastFileEpoch( - fEpoch.fileId, fEpoch.epoch, csLocs); + int ret2 = + csBroadCaster_->BroadCastFileEpoch(fEpoch.fileId, fEpoch.epoch, csLocs); LOG_IF(ERROR, ret2 != LIBCURVE_ERROR::OK) - << "BroadCastEpoch failed, filename: " << filename - << ", ret: " << ret2; + << "BroadCastEpoch failed, filename: " << filename << ", ret: " << ret2; // update epoch if file is already open auto it = fileserviceFileNameMap_.find(filename); @@ -316,10 +309,17 @@ int FileClient::IncreaseEpoch(const std::string& filename, } int FileClient::Create(const std::string& filename, - const UserInfo_t& userinfo, size_t size) { + const UserInfo& userinfo, + size_t size) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { - ret = mdsClient_->CreateFile(filename, userinfo, size); + CreateFileContext ctx; + ctx.pagefile = true; + ctx.name = filename; + ctx.user = userinfo; + ctx.length = size; + + ret = mdsClient_->CreateFile(ctx); LOG_IF(ERROR, ret != LIBCURVE_ERROR::OK) << "Create file failed, filename: " << filename << ", ret: " << ret; } else { @@ -329,15 +329,13 @@ int FileClient::Create(const std::string& filename, return -ret; } -int FileClient::Create2(const std::string& filename, - const UserInfo_t& userinfo, size_t size, - uint64_t stripeUnit, uint64_t stripeCount) { +int FileClient::Create2(const CreateFileContext& context) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { - ret = mdsClient_->CreateFile(filename, userinfo, size, true, - stripeUnit, stripeCount); + ret = mdsClient_->CreateFile(context); LOG_IF(ERROR, ret != LIBCURVE_ERROR::OK) - << "Create file failed, filename: " << filename << ", ret: " << ret; + << "Create file failed, filename: " << context.name + << ", ret: " << ret; } else { LOG(ERROR) << "global mds client not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -345,7 +343,7 @@ int FileClient::Create2(const std::string& filename, return -ret; } -int FileClient::Read(int fd, char* buf, off_t offset, size_t len) { +int FileClient::Read(int fd, char *buf, off_t offset, size_t len) { // 长度为0,直接返回,不做任何操作 if (len == 0) { return -LIBCURVE_ERROR::OK; @@ -366,7 +364,7 @@ int FileClient::Read(int fd, char* buf, off_t offset, size_t len) { return fileserviceMap_[fd]->Read(buf, offset, len); } -int FileClient::Write(int fd, const char* buf, off_t offset, size_t len) { +int FileClient::Write(int fd, const char *buf, off_t offset, size_t len) { // 长度为0,直接返回,不做任何操作 if (len == 0) { return -LIBCURVE_ERROR::OK; @@ -398,7 +396,7 @@ int FileClient::Discard(int fd, off_t offset, size_t length) { return iter->second->Discard(offset, length); } -int FileClient::AioRead(int fd, CurveAioContext* aioctx, +int FileClient::AioRead(int fd, CurveAioContext *aioctx, UserDataType dataType) { // 长度为0,直接返回,不做任何操作 if (aioctx->length == 0) { @@ -423,7 +421,7 @@ int FileClient::AioRead(int fd, CurveAioContext* aioctx, return ret; } -int FileClient::AioWrite(int fd, CurveAioContext* aioctx, +int FileClient::AioWrite(int fd, CurveAioContext *aioctx, UserDataType dataType) { // 长度为0,直接返回,不做任何操作 if (aioctx->length == 0) { @@ -449,7 +447,7 @@ int FileClient::AioWrite(int fd, CurveAioContext* aioctx, return ret; } -int FileClient::AioDiscard(int fd, CurveAioContext* aioctx) { +int FileClient::AioDiscard(int fd, CurveAioContext *aioctx) { ReadLockGuard lk(rwlock_); auto iter = fileserviceMap_.find(fd); if (CURVE_UNLIKELY(iter == fileserviceMap_.end())) { @@ -460,14 +458,13 @@ int FileClient::AioDiscard(int fd, CurveAioContext* aioctx) { } } -int FileClient::Rename(const UserInfo_t& userinfo, - const std::string& oldpath, const std::string& newpath) { +int FileClient::Rename(const UserInfo_t &userinfo, const std::string &oldpath, + const std::string &newpath) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { ret = mdsClient_->RenameFile(userinfo, oldpath, newpath); LOG_IF(ERROR, ret != LIBCURVE_ERROR::OK) - << "Rename failed, OldPath: " << oldpath - << ", NewPath: " << newpath + << "Rename failed, OldPath: " << oldpath << ", NewPath: " << newpath << ", ret: " << ret; } else { LOG(ERROR) << "global mds client not inited!"; @@ -476,15 +473,14 @@ int FileClient::Rename(const UserInfo_t& userinfo, return -ret; } -int FileClient::Extend(const std::string& filename, - const UserInfo_t& userinfo, uint64_t newsize) { +int FileClient::Extend(const std::string &filename, const UserInfo_t &userinfo, + uint64_t newsize) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { ret = mdsClient_->Extend(filename, userinfo, newsize); LOG_IF(ERROR, ret != LIBCURVE_ERROR::OK) << "Extend failed, filename: " << filename - << ", NewSize: " << newsize - << ", ret: " << ret; + << ", NewSize: " << newsize << ", ret: " << ret; } else { LOG(ERROR) << "global mds client not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -492,15 +488,14 @@ int FileClient::Extend(const std::string& filename, return -ret; } -int FileClient::Unlink(const std::string& filename, - const UserInfo_t& userinfo, bool deleteforce) { +int FileClient::Unlink(const std::string &filename, const UserInfo_t &userinfo, + bool deleteforce) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { ret = mdsClient_->DeleteFile(filename, userinfo, deleteforce); LOG_IF(ERROR, ret != LIBCURVE_ERROR::OK) << "Unlink failed, filename: " << filename - << ", force: " << deleteforce - << ", ret: " << ret; + << ", force: " << deleteforce << ", ret: " << ret; } else { LOG(ERROR) << "global mds client not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -508,14 +503,13 @@ int FileClient::Unlink(const std::string& filename, return -ret; } -int FileClient::Recover(const std::string& filename, - const UserInfo_t& userinfo, uint64_t fileId) { +int FileClient::Recover(const std::string &filename, const UserInfo_t &userinfo, + uint64_t fileId) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { ret = mdsClient_->RecoverFile(filename, userinfo, fileId); LOG_IF(ERROR, ret != LIBCURVE_ERROR::OK) - << "Recover failed, filename: " << filename - << ", ret: " << ret; + << "Recover failed, filename: " << filename << ", ret: " << ret; } else { LOG(ERROR) << "global mds client not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -523,8 +517,8 @@ int FileClient::Recover(const std::string& filename, return -ret; } -int FileClient::StatFile(const std::string& filename, - const UserInfo_t& userinfo, FileStatInfo* finfo) { +int FileClient::StatFile(const std::string &filename, + const UserInfo_t &userinfo, FileStatInfo *finfo) { FInfo_t fi; FileEpoch_t fEpoch; int ret; @@ -538,18 +532,18 @@ int FileClient::StatFile(const std::string& filename, } if (ret == LIBCURVE_ERROR::OK) { - finfo->id = fi.id; + finfo->id = fi.id; finfo->parentid = fi.parentid; - finfo->ctime = fi.ctime; - finfo->length = fi.length; + finfo->ctime = fi.ctime; + finfo->length = fi.length; finfo->filetype = fi.filetype; finfo->stripeUnit = fi.stripeUnit; finfo->stripeCount = fi.stripeCount; memcpy(finfo->filename, fi.filename.c_str(), - std::min(sizeof(finfo->filename), fi.filename.size() + 1)); + std::min(sizeof(finfo->filename), fi.filename.size() + 1)); memcpy(finfo->owner, fi.owner.c_str(), - std::min(sizeof(finfo->owner), fi.owner.size() + 1)); + std::min(sizeof(finfo->owner), fi.owner.size() + 1)); finfo->fileStatus = static_cast(fi.filestatus); } @@ -557,8 +551,8 @@ int FileClient::StatFile(const std::string& filename, return -ret; } -int FileClient::Listdir(const std::string& dirpath, - const UserInfo_t& userinfo, std::vector* filestatVec) { +int FileClient::Listdir(const std::string &dirpath, const UserInfo_t &userinfo, + std::vector *filestatVec) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { ret = mdsClient_->Listdir(dirpath, userinfo, filestatVec); @@ -572,10 +566,14 @@ int FileClient::Listdir(const std::string& dirpath, return -ret; } -int FileClient::Mkdir(const std::string& dirpath, const UserInfo_t& userinfo) { +int FileClient::Mkdir(const std::string &dirpath, const UserInfo_t &userinfo) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { - ret = mdsClient_->CreateFile(dirpath, userinfo, 0, false); + CreateFileContext context; + context.pagefile = false; + context.user = userinfo; + context.name = dirpath; + ret = mdsClient_->CreateFile(context); if (ret != LIBCURVE_ERROR::OK) { if (ret == LIBCURVE_ERROR::EXISTS) { LOG(WARNING) << "Create directory failed, " << dirpath @@ -593,7 +591,7 @@ int FileClient::Mkdir(const std::string& dirpath, const UserInfo_t& userinfo) { return -ret; } -int FileClient::Rmdir(const std::string& dirpath, const UserInfo_t& userinfo) { +int FileClient::Rmdir(const std::string &dirpath, const UserInfo_t &userinfo) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { ret = mdsClient_->DeleteFile(dirpath, userinfo); @@ -606,8 +604,9 @@ int FileClient::Rmdir(const std::string& dirpath, const UserInfo_t& userinfo) { return -ret; } -int FileClient::ChangeOwner(const std::string& filename, - const std::string& newOwner, const UserInfo_t& userinfo) { +int FileClient::ChangeOwner(const std::string &filename, + const std::string &newOwner, + const UserInfo_t &userinfo) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { ret = mdsClient_->ChangeOwner(filename, newOwner, userinfo); @@ -655,14 +654,14 @@ int FileClient::Close(int fd) { return -LIBCURVE_ERROR::FAILED; } -int FileClient::GetClusterId(char* buf, int len) { +int FileClient::GetClusterId(char *buf, int len) { std::string result = GetClusterId(); if (result.empty()) { return -LIBCURVE_ERROR::FAILED; } - if (len >= result.size() + 1) { + if (static_cast(len) >= result.size() + 1) { snprintf(buf, len, "%s", result.c_str()); return LIBCURVE_ERROR::OK; } @@ -689,7 +688,7 @@ std::string FileClient::GetClusterId() { return {}; } -int FileClient::GetFileInfo(int fd, FInfo* finfo) { +int FileClient::GetFileInfo(int fd, FInfo *finfo) { int ret = -LIBCURVE_ERROR::FAILED; ReadLockGuard lk(rwlock_); @@ -702,6 +701,19 @@ int FileClient::GetFileInfo(int fd, FInfo* finfo) { return ret; } +std::vector FileClient::ListPoolset() { + std::vector out; + if (CURVE_UNLIKELY(mdsClient_ == nullptr)) { + LOG(WARNING) << "global mds client not inited!"; + return out; + } + + const auto ret = mdsClient_->ListPoolset(&out); + LOG_IF(WARNING, ret != LIBCURVE_ERROR::OK) + << "Failed to list poolset, error: " << ret; + return out; +} + bool FileClient::StartDummyServer() { if (!clientconfig_.GetFileServiceOption().commonOpt.mdsRegisterToMDS) { LOG(INFO) << "No need register to MDS"; @@ -745,23 +757,21 @@ bool FileClient::StartDummyServer() { return true; } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve // 全局初始化与反初始化 -int GlobalInit(const char* configpath); +int GlobalInit(const char *configpath); void GlobalUnInit(); -int Init(const char* path) { - return GlobalInit(path); -} +int Init(const char *path) { return GlobalInit(path); } -int Open4Qemu(const char* filename) { +int Open4Qemu(const char *filename) { curve::client::UserInfo_t userinfo; std::string realname; - bool ret = curve::client::ServiceHelper::GetUserInfoFromFilename(filename, - &realname, &userinfo.owner); + bool ret = curve::client::ServiceHelper::GetUserInfoFromFilename( + filename, &realname, &userinfo.owner); if (!ret) { LOG(ERROR) << "get user info from filename failed!"; return -LIBCURVE_ERROR::FAILED; @@ -775,11 +785,11 @@ int Open4Qemu(const char* filename) { return globalclient->Open(realname, userinfo); } -int IncreaseEpoch(const char* filename) { +int IncreaseEpoch(const char *filename) { curve::client::UserInfo_t userinfo; std::string realname; - bool ret = curve::client::ServiceHelper::GetUserInfoFromFilename(filename, - &realname, &userinfo.owner); + bool ret = curve::client::ServiceHelper::GetUserInfoFromFilename( + filename, &realname, &userinfo.owner); if (!ret) { LOG(ERROR) << "get user info from filename failed!"; return -LIBCURVE_ERROR::FAILED; @@ -793,11 +803,11 @@ int IncreaseEpoch(const char* filename) { return globalclient->IncreaseEpoch(realname, userinfo); } -int Extend4Qemu(const char* filename, int64_t newsize) { +int Extend4Qemu(const char *filename, int64_t newsize) { curve::client::UserInfo_t userinfo; std::string realname; - bool ret = curve::client::ServiceHelper::GetUserInfoFromFilename(filename, - &realname, &userinfo.owner); + bool ret = curve::client::ServiceHelper::GetUserInfoFromFilename( + filename, &realname, &userinfo.owner); if (!ret) { LOG(ERROR) << "get user info from filename failed!"; return -LIBCURVE_ERROR::FAILED; @@ -812,20 +822,20 @@ int Extend4Qemu(const char* filename, int64_t newsize) { } return globalclient->Extend(realname, userinfo, - static_cast(newsize)); + static_cast(newsize)); } -int Open(const char* filename, const C_UserInfo_t* userinfo) { +int Open(const char *filename, const C_UserInfo_t *userinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; } return globalclient->Open(filename, - UserInfo(userinfo->owner, userinfo->password)); + UserInfo(userinfo->owner, userinfo->password)); } -int Read(int fd, char* buf, off_t offset, size_t length) { +int Read(int fd, char *buf, off_t offset, size_t length) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -834,7 +844,7 @@ int Read(int fd, char* buf, off_t offset, size_t length) { return globalclient->Read(fd, buf, offset, length); } -int Write(int fd, const char* buf, off_t offset, size_t length) { +int Write(int fd, const char *buf, off_t offset, size_t length) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -852,32 +862,30 @@ int Discard(int fd, off_t offset, size_t length) { return globalclient->Discard(fd, offset, length); } -int AioRead(int fd, CurveAioContext* aioctx) { +int AioRead(int fd, CurveAioContext *aioctx) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; } - DVLOG(9) << "offset: " << aioctx->offset - << " length: " << aioctx->length - << " op: " << aioctx->op; + DVLOG(9) << "offset: " << aioctx->offset << " length: " << aioctx->length + << " op: " << aioctx->op; return globalclient->AioRead(fd, aioctx); } -int AioWrite(int fd, CurveAioContext* aioctx) { +int AioWrite(int fd, CurveAioContext *aioctx) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; } - DVLOG(9) << "offset: " << aioctx->offset - << " length: " << aioctx->length - << " op: " << aioctx->op - << " buf: " << *(unsigned int*)aioctx->buf; + DVLOG(9) << "offset: " << aioctx->offset << " length: " << aioctx->length + << " op: " << aioctx->op + << " buf: " << *(unsigned int *)aioctx->buf; return globalclient->AioWrite(fd, aioctx); } -int AioDiscard(int fd, CurveAioContext* aioctx) { +int AioDiscard(int fd, CurveAioContext *aioctx) { if (globalclient == nullptr) { LOG(ERROR) << "Not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -886,26 +894,14 @@ int AioDiscard(int fd, CurveAioContext* aioctx) { return globalclient->AioDiscard(fd, aioctx); } -int Create(const char* filename, const C_UserInfo_t* userinfo, size_t size) { +int Create(const char *filename, const C_UserInfo_t *userinfo, size_t size) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; } - return globalclient->Create(filename, - UserInfo(userinfo->owner, userinfo->password), size); -} - -int Create2(const char* filename, const C_UserInfo_t* userinfo, size_t size, - uint64_t stripeUnit, uint64_t stripeCount) { - if (globalclient == nullptr) { - LOG(ERROR) << "not inited!"; - return -LIBCURVE_ERROR::FAILED; - } - - return globalclient->Create2(filename, - UserInfo(userinfo->owner, userinfo->password), - size, stripeUnit, stripeCount); + return globalclient->Create( + filename, UserInfo(userinfo->owner, userinfo->password), size); } int Rename(const C_UserInfo_t* userinfo, @@ -916,68 +912,66 @@ int Rename(const C_UserInfo_t* userinfo, } return globalclient->Rename(UserInfo(userinfo->owner, userinfo->password), - oldpath, newpath); + oldpath, newpath); } -int Extend(const char* filename, - const C_UserInfo_t* userinfo, uint64_t newsize) { +int Extend(const char *filename, const C_UserInfo_t *userinfo, + uint64_t newsize) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; } - return globalclient->Extend(filename, - UserInfo(userinfo->owner, userinfo->password), newsize); + return globalclient->Extend( + filename, UserInfo(userinfo->owner, userinfo->password), newsize); } -int Unlink(const char* filename, const C_UserInfo_t* userinfo) { +int Unlink(const char *filename, const C_UserInfo_t *userinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; } return globalclient->Unlink(filename, - UserInfo(userinfo->owner, userinfo->password)); + UserInfo(userinfo->owner, userinfo->password)); } -int DeleteForce(const char* filename, const C_UserInfo_t* userinfo) { +int DeleteForce(const char *filename, const C_UserInfo_t *userinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; } - return globalclient->Unlink(filename, - UserInfo(userinfo->owner, userinfo->password), - true); + return globalclient->Unlink( + filename, UserInfo(userinfo->owner, userinfo->password), true); } -int Recover(const char* filename, const C_UserInfo_t* userinfo, - uint64_t fileId) { +int Recover(const char *filename, const C_UserInfo_t *userinfo, + uint64_t fileId) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; } - return globalclient->Recover(filename, - UserInfo(userinfo->owner, userinfo->password), - fileId); + return globalclient->Recover( + filename, UserInfo(userinfo->owner, userinfo->password), fileId); } -DirInfo_t* OpenDir(const char* dirpath, const C_UserInfo_t* userinfo) { +DirInfo_t *OpenDir(const char *dirpath, const C_UserInfo_t *userinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return nullptr; } - DirInfo_t* dirinfo = new (std::nothrow) DirInfo_t; - dirinfo->dirpath = const_cast(dirpath); - dirinfo->userinfo = const_cast(userinfo); + DirInfo_t *dirinfo = new (std::nothrow) DirInfo_t; + dirinfo->dirpath = const_cast(dirpath); + dirinfo->userinfo = const_cast(userinfo); dirinfo->fileStat = nullptr; return dirinfo; } -int Listdir(DirInfo_t* dirinfo) { +int Listdir(DirInfo_t *dirinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -989,9 +983,10 @@ int Listdir(DirInfo_t* dirinfo) { } std::vector fileStat; - int ret = globalclient->Listdir(dirinfo->dirpath, - UserInfo(dirinfo->userinfo->owner, dirinfo->userinfo->password), - &fileStat); + int ret = globalclient->Listdir( + dirinfo->dirpath, + UserInfo(dirinfo->userinfo->owner, dirinfo->userinfo->password), + &fileStat); dirinfo->dirSize = fileStat.size(); dirinfo->fileStat = new (std::nothrow) FileStatInfo_t[dirinfo->dirSize]; @@ -1001,7 +996,7 @@ int Listdir(DirInfo_t* dirinfo) { return -LIBCURVE_ERROR::FAILED; } - for (int i = 0; i < dirinfo->dirSize; i++) { + for (uint64_t i = 0; i < dirinfo->dirSize; i++) { dirinfo->fileStat[i].id = fileStat[i].id; dirinfo->fileStat[i].parentid = fileStat[i].parentid; dirinfo->fileStat[i].filetype = fileStat[i].filetype; @@ -1011,13 +1006,13 @@ int Listdir(DirInfo_t* dirinfo) { memcpy(dirinfo->fileStat[i].owner, fileStat[i].owner, NAME_MAX_SIZE); memset(dirinfo->fileStat[i].filename, 0, NAME_MAX_SIZE); memcpy(dirinfo->fileStat[i].filename, fileStat[i].filename, - NAME_MAX_SIZE); + NAME_MAX_SIZE); } return ret; } -void CloseDir(DirInfo_t* dirinfo) { +void CloseDir(DirInfo_t *dirinfo) { if (dirinfo != nullptr) { if (dirinfo->fileStat != nullptr) { delete[] dirinfo->fileStat; @@ -1027,24 +1022,24 @@ void CloseDir(DirInfo_t* dirinfo) { } } -int Mkdir(const char* dirpath, const C_UserInfo_t* userinfo) { +int Mkdir(const char *dirpath, const C_UserInfo_t *userinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; } return globalclient->Mkdir(dirpath, - UserInfo(userinfo->owner, userinfo->password)); + UserInfo(userinfo->owner, userinfo->password)); } -int Rmdir(const char* dirpath, const C_UserInfo_t* userinfo) { +int Rmdir(const char *dirpath, const C_UserInfo_t *userinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; } return globalclient->Rmdir(dirpath, - UserInfo(userinfo->owner, userinfo->password)); + UserInfo(userinfo->owner, userinfo->password)); } int Close(int fd) { @@ -1056,11 +1051,11 @@ int Close(int fd) { return globalclient->Close(fd); } -int StatFile4Qemu(const char* filename, FileStatInfo* finfo) { +int StatFile4Qemu(const char *filename, FileStatInfo *finfo) { curve::client::UserInfo_t userinfo; std::string realname; - bool ret = curve::client::ServiceHelper::GetUserInfoFromFilename(filename, - &realname, &userinfo.owner); + bool ret = curve::client::ServiceHelper::GetUserInfoFromFilename( + filename, &realname, &userinfo.owner); if (!ret) { LOG(ERROR) << "get user info from filename failed!"; return -LIBCURVE_ERROR::FAILED; @@ -1074,8 +1069,8 @@ int StatFile4Qemu(const char* filename, FileStatInfo* finfo) { return globalclient->StatFile(realname, userinfo, finfo); } -int StatFile(const char* filename, - const C_UserInfo_t* cuserinfo, FileStatInfo* finfo) { +int StatFile(const char *filename, const C_UserInfo_t *cuserinfo, + FileStatInfo *finfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -1085,8 +1080,8 @@ int StatFile(const char* filename, return globalclient->StatFile(filename, userinfo, finfo); } -int ChangeOwner(const char* filename, - const char* newOwner, const C_UserInfo_t* cuserinfo) { +int ChangeOwner(const char *filename, const char *newOwner, + const C_UserInfo_t *cuserinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -1096,11 +1091,9 @@ int ChangeOwner(const char* filename, return globalclient->ChangeOwner(filename, newOwner, userinfo); } -void UnInit() { - GlobalUnInit(); -} +void UnInit() { GlobalUnInit(); } -int GetClusterId(char* buf, int len) { +int GetClusterId(char *buf, int len) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -1109,7 +1102,7 @@ int GetClusterId(char* buf, int len) { return globalclient->GetClusterId(buf, len); } -int GlobalInit(const char* path) { +int GlobalInit(const char *path) { int ret = 0; if (globalclientinited_) { LOG(INFO) << "global cient already inited!"; @@ -1146,74 +1139,74 @@ void GlobalUnInit() { } } -const char* LibCurveErrorName(LIBCURVE_ERROR err) { +const char *LibCurveErrorName(LIBCURVE_ERROR err) { switch (err) { - case LIBCURVE_ERROR::OK: - return "OK"; - case LIBCURVE_ERROR::EXISTS: - return "EXISTS"; - case LIBCURVE_ERROR::FAILED: - return "FAILED"; - case LIBCURVE_ERROR::DISABLEIO: - return "DISABLEIO"; - case LIBCURVE_ERROR::AUTHFAIL: - return "AUTHFAIL"; - case LIBCURVE_ERROR::DELETING: - return "DELETING"; - case LIBCURVE_ERROR::NOTEXIST: - return "NOTEXIST"; - case LIBCURVE_ERROR::UNDER_SNAPSHOT: - return "UNDER_SNAPSHOT"; - case LIBCURVE_ERROR::NOT_UNDERSNAPSHOT: - return "NOT_UNDERSNAPSHOT"; - case LIBCURVE_ERROR::DELETE_ERROR: - return "DELETE_ERROR"; - case LIBCURVE_ERROR::NOT_ALLOCATE: - return "NOT_ALLOCATE"; - case LIBCURVE_ERROR::NOT_SUPPORT: - return "NOT_SUPPORT"; - case LIBCURVE_ERROR::NOT_EMPTY: - return "NOT_EMPTY"; - case LIBCURVE_ERROR::NO_SHRINK_BIGGER_FILE: - return "NO_SHRINK_BIGGER_FILE"; - case LIBCURVE_ERROR::SESSION_NOTEXISTS: - return "SESSION_NOTEXISTS"; - case LIBCURVE_ERROR::FILE_OCCUPIED: - return "FILE_OCCUPIED"; - case LIBCURVE_ERROR::PARAM_ERROR: - return "PARAM_ERROR"; - case LIBCURVE_ERROR::INTERNAL_ERROR: - return "INTERNAL_ERROR"; - case LIBCURVE_ERROR::CRC_ERROR: - return "CRC_ERROR"; - case LIBCURVE_ERROR::INVALID_REQUEST: - return "INVALID_REQUEST"; - case LIBCURVE_ERROR::DISK_FAIL: - return "DISK_FAIL"; - case LIBCURVE_ERROR::NO_SPACE: - return "NO_SPACE"; - case LIBCURVE_ERROR::NOT_ALIGNED: - return "NOT_ALIGNED"; - case LIBCURVE_ERROR::BAD_FD: - return "BAD_FD"; - case LIBCURVE_ERROR::LENGTH_NOT_SUPPORT: - return "LENGTH_NOT_SUPPORT"; - case LIBCURVE_ERROR::SESSION_NOT_EXIST: - return "SESSION_NOT_EXIST"; - case LIBCURVE_ERROR::STATUS_NOT_MATCH: - return "STATUS_NOT_MATCH"; - case LIBCURVE_ERROR::DELETE_BEING_CLONED: - return "DELETE_BEING_CLONED"; - case LIBCURVE_ERROR::CLIENT_NOT_SUPPORT_SNAPSHOT: - return "CLIENT_NOT_SUPPORT_SNAPSHOT"; - case LIBCURVE_ERROR::SNAPSTHO_FROZEN: - return "SNAPSTHO_FROZEN"; - case LIBCURVE_ERROR::RETRY_UNTIL_SUCCESS: - return "RETRY_UNTIL_SUCCESS"; - case LIBCURVE_ERROR::EPOCH_TOO_OLD: - return "EPOCH_TOO_OLD"; - case LIBCURVE_ERROR::UNKNOWN: - break; + case LIBCURVE_ERROR::OK: + return "OK"; + case LIBCURVE_ERROR::EXISTS: + return "EXISTS"; + case LIBCURVE_ERROR::FAILED: + return "FAILED"; + case LIBCURVE_ERROR::DISABLEIO: + return "DISABLEIO"; + case LIBCURVE_ERROR::AUTHFAIL: + return "AUTHFAIL"; + case LIBCURVE_ERROR::DELETING: + return "DELETING"; + case LIBCURVE_ERROR::NOTEXIST: + return "NOTEXIST"; + case LIBCURVE_ERROR::UNDER_SNAPSHOT: + return "UNDER_SNAPSHOT"; + case LIBCURVE_ERROR::NOT_UNDERSNAPSHOT: + return "NOT_UNDERSNAPSHOT"; + case LIBCURVE_ERROR::DELETE_ERROR: + return "DELETE_ERROR"; + case LIBCURVE_ERROR::NOT_ALLOCATE: + return "NOT_ALLOCATE"; + case LIBCURVE_ERROR::NOT_SUPPORT: + return "NOT_SUPPORT"; + case LIBCURVE_ERROR::NOT_EMPTY: + return "NOT_EMPTY"; + case LIBCURVE_ERROR::NO_SHRINK_BIGGER_FILE: + return "NO_SHRINK_BIGGER_FILE"; + case LIBCURVE_ERROR::SESSION_NOTEXISTS: + return "SESSION_NOTEXISTS"; + case LIBCURVE_ERROR::FILE_OCCUPIED: + return "FILE_OCCUPIED"; + case LIBCURVE_ERROR::PARAM_ERROR: + return "PARAM_ERROR"; + case LIBCURVE_ERROR::INTERNAL_ERROR: + return "INTERNAL_ERROR"; + case LIBCURVE_ERROR::CRC_ERROR: + return "CRC_ERROR"; + case LIBCURVE_ERROR::INVALID_REQUEST: + return "INVALID_REQUEST"; + case LIBCURVE_ERROR::DISK_FAIL: + return "DISK_FAIL"; + case LIBCURVE_ERROR::NO_SPACE: + return "NO_SPACE"; + case LIBCURVE_ERROR::NOT_ALIGNED: + return "NOT_ALIGNED"; + case LIBCURVE_ERROR::BAD_FD: + return "BAD_FD"; + case LIBCURVE_ERROR::LENGTH_NOT_SUPPORT: + return "LENGTH_NOT_SUPPORT"; + case LIBCURVE_ERROR::SESSION_NOT_EXIST: + return "SESSION_NOT_EXIST"; + case LIBCURVE_ERROR::STATUS_NOT_MATCH: + return "STATUS_NOT_MATCH"; + case LIBCURVE_ERROR::DELETE_BEING_CLONED: + return "DELETE_BEING_CLONED"; + case LIBCURVE_ERROR::CLIENT_NOT_SUPPORT_SNAPSHOT: + return "CLIENT_NOT_SUPPORT_SNAPSHOT"; + case LIBCURVE_ERROR::SNAPSTHO_FROZEN: + return "SNAPSTHO_FROZEN"; + case LIBCURVE_ERROR::RETRY_UNTIL_SUCCESS: + return "RETRY_UNTIL_SUCCESS"; + case LIBCURVE_ERROR::EPOCH_TOO_OLD: + return "EPOCH_TOO_OLD"; + case LIBCURVE_ERROR::UNKNOWN: + break; } static thread_local char message[64]; diff --git a/src/client/libcurve_file.h b/src/client/libcurve_file.h index e3cbe8c681..57c61b74fb 100644 --- a/src/client/libcurve_file.h +++ b/src/client/libcurve_file.h @@ -99,19 +99,10 @@ class FileClient { size_t size); /** - * create file with stripe - * @param: filename file name - * @param: userinfo user info - * @param: size file size - * @param: stripeUnit block in stripe size - * @param stripeCount stripe count in one stripe + * Create file with parameters * @return: success return 0, fail return less than 0 - * */ - virtual int Create2(const std::string& filename, - const UserInfo_t& userinfo, - size_t size, uint64_t stripeUnit, - uint64_t stripeCount); + virtual int Create2(const CreateFileContext& context); /** * 同步模式读 @@ -291,6 +282,9 @@ class FileClient { */ int GetFileInfo(int fd, FInfo* finfo); + // List all poolsets' name in cluster + std::vector ListPoolset(); + /** * 测试使用,获取当前挂载文件数量 * @return 返回当前挂载文件数量 diff --git a/src/client/libcurve_snapshot.cpp b/src/client/libcurve_snapshot.cpp index 2e76b60411..e4aeaa7b6a 100644 --- a/src/client/libcurve_snapshot.cpp +++ b/src/client/libcurve_snapshot.cpp @@ -164,15 +164,19 @@ int SnapshotClient::CheckSnapShotStatus(const std::string &filename, return -ret; } -int SnapshotClient::CreateCloneFile(const std::string &source, - const std::string &destination, - const UserInfo_t &userinfo, uint64_t size, - uint64_t sn, uint32_t chunksize, - uint64_t stripeUnit, uint64_t stripeCount, - FInfo *finfo) { - LIBCURVE_ERROR ret = - mdsclient_.CreateCloneFile(source, destination, userinfo, size, sn, - chunksize, stripeUnit, stripeCount, finfo); +int SnapshotClient::CreateCloneFile(const std::string& source, + const std::string& destination, + const UserInfo_t& userinfo, + uint64_t size, + uint64_t sn, + uint32_t chunksize, + uint64_t stripeUnit, + uint64_t stripeCount, + const std::string& poolset, + FInfo* finfo) { + LIBCURVE_ERROR ret = mdsclient_.CreateCloneFile( + source, destination, userinfo, size, sn, chunksize, stripeUnit, + stripeCount, poolset, finfo); return -ret; } diff --git a/src/client/libcurve_snapshot.h b/src/client/libcurve_snapshot.h index 00d5f7eb1b..7a21575374 100644 --- a/src/client/libcurve_snapshot.h +++ b/src/client/libcurve_snapshot.h @@ -161,14 +161,20 @@ class SnapshotClient { * @param: chunksize是要创建文件的chunk大小 * @param stripeUnit stripe size * @param stripeCount stripe count + * @param poolset poolset of destination file * @param[out] fileinfo 创建的目标文件的文件信息 * * @return 错误码 */ - int CreateCloneFile(const std::string& source, const std::string& destination, - const UserInfo_t& userinfo, uint64_t size, uint64_t sn, + int CreateCloneFile(const std::string& source, + const std::string& destination, + const UserInfo_t& userinfo, + uint64_t size, + uint64_t sn, uint32_t chunksize, - uint64_t stripeUnit, uint64_t stripeCount, + uint64_t stripeUnit, + uint64_t stripeCount, + const std::string& poolset, FInfo* fileinfo); /** diff --git a/src/client/mds_client.cpp b/src/client/mds_client.cpp index 87a3b4b767..883b594c80 100644 --- a/src/client/mds_client.cpp +++ b/src/client/mds_client.cpp @@ -191,6 +191,9 @@ int RPCExcutorRetryPolicy::GetNextMDSIndex(bool needChangeMDS, int RPCExcutorRetryPolicy::ExcuteTask(int mdsindex, uint64_t rpcTimeOutMS, RPCFunc task) { + assert(mdsindex >= 0 && + mdsindex < static_cast(retryOpt_.addrs.size())); + const std::string &mdsaddr = retryOpt_.addrs[mdsindex]; brpc::Channel channel; @@ -240,15 +243,17 @@ void MDSClient::UnInitialize() { inited_ = false; } -#define RPCTaskDefine \ - [&](int addrindex, uint64_t rpctimeoutMS, brpc::Channel *channel, \ - brpc::Controller *cntl) -> int +#define RPCTaskDefine \ + [&](CURVE_UNUSED int addrindex, CURVE_UNUSED uint64_t rpctimeoutMS, \ + brpc::Channel* channel, brpc::Controller* cntl) -> int LIBCURVE_ERROR MDSClient::OpenFile(const std::string &filename, const UserInfo_t &userinfo, FInfo_t *fi, FileEpoch_t *fEpoch, LeaseSession *lease) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; OpenFileResponse response; mdsClientMetric_.openFile.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.openFile.latency); @@ -306,17 +311,14 @@ LIBCURVE_ERROR MDSClient::OpenFile(const std::string &filename, rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::CreateFile(const std::string &filename, - const UserInfo_t &userinfo, size_t size, - bool normalFile, uint64_t stripeUnit, - uint64_t stripeCount) { +LIBCURVE_ERROR MDSClient::CreateFile(const CreateFileContext& context) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; CreateFileResponse response; mdsClientMetric_.createFile.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.createFile.latency); - MDSClientBase::CreateFile(filename, userinfo, size, normalFile, - stripeUnit, stripeCount, &response, cntl, - channel); + MDSClientBase::CreateFile(context, &response, cntl, channel); if (cntl->Failed()) { mdsClientMetric_.createFile.eps.count << 1; @@ -331,9 +333,10 @@ LIBCURVE_ERROR MDSClient::CreateFile(const std::string &filename, StatusCode stcode = response.statuscode(); MDSStatusCode2LibcurveError(stcode, &retcode); LOG_IF(WARNING, retcode != LIBCURVE_ERROR::OK) - << "CreateFile: filename = " << filename - << ", owner = " << userinfo.owner - << ", is nomalfile: " << normalFile << ", errocde = " << retcode + << "CreateFile: filename = " << context.name + << ", owner = " << context.user.owner + << ", is pagefile: " << context.pagefile + << ", errcode = " << retcode << ", error msg = " << StatusCode_Name(stcode) << ", log id = " << cntl->log_id(); return retcode; @@ -346,6 +349,8 @@ LIBCURVE_ERROR MDSClient::CloseFile(const std::string &filename, const UserInfo_t &userinfo, const std::string &sessionid) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; CloseFileResponse response; mdsClientMetric_.closeFile.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.closeFile.latency); @@ -380,6 +385,8 @@ LIBCURVE_ERROR MDSClient::GetFileInfo(const std::string &filename, const UserInfo_t &uinfo, FInfo_t *fi, FileEpoch_t *fEpoch) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; GetFileInfoResponse response; mdsClientMetric_.getFile.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.getFile.latency); @@ -416,6 +423,8 @@ LIBCURVE_ERROR MDSClient::IncreaseEpoch(const std::string& filename, FileEpoch_t *fEpoch, std::list> *csLocs) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; IncreaseFileEpochResponse response; mdsClientMetric_.increaseEpoch.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.increaseEpoch.latency); @@ -474,6 +483,8 @@ LIBCURVE_ERROR MDSClient::CreateSnapShot(const std::string& filename, const UserInfo_t& userinfo, uint64_t* seq) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; CreateSnapShotResponse response; MDSClientBase::CreateSnapShot(filename, userinfo, &response, cntl, channel); @@ -533,6 +544,8 @@ LIBCURVE_ERROR MDSClient::DeleteSnapShot(const std::string &filename, const UserInfo_t &userinfo, uint64_t seq) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; DeleteSnapShotResponse response; MDSClientBase::DeleteSnapShot(filename, userinfo, seq, &response, cntl, channel); @@ -565,6 +578,8 @@ LIBCURVE_ERROR MDSClient::ListSnapShot(const std::string &filename, const std::vector *seq, std::map *snapif) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; ListSnapShotFileInfoResponse response; MDSClientBase::ListSnapShot(filename, userinfo, seq, &response, cntl, channel); @@ -597,7 +612,7 @@ LIBCURVE_ERROR MDSClient::ListSnapShot(const std::string &filename, snapif->insert(std::make_pair(tempInfo.seqnum, tempInfo)); } - if (response.fileinfo_size() != seq->size()) { + if (response.fileinfo_size() != static_cast(seq->size())) { LOG(WARNING) << "some snapshot info not found!"; return LIBCURVE_ERROR::NOTEXIST; } @@ -613,6 +628,8 @@ LIBCURVE_ERROR MDSClient::GetSnapshotSegmentInfo(const std::string &filename, uint64_t seq, uint64_t offset, SegmentInfo *segInfo) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; GetOrAllocateSegmentResponse response; MDSClientBase::GetSnapshotSegmentInfo(filename, userinfo, seq, offset, &response, cntl, channel); @@ -676,6 +693,8 @@ LIBCURVE_ERROR MDSClient::RefreshSession(const std::string &filename, LeaseRefreshResult *resp, LeaseSession *lease) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; ReFreshSessionResponse response; mdsClientMetric_.refreshSession.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.refreshSession.latency); @@ -750,6 +769,8 @@ LIBCURVE_ERROR MDSClient::CheckSnapShotStatus(const std::string &filename, uint64_t seq, FileStatus *filestatus) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; CheckSnapShotStatusResponse response; MDSClientBase::CheckSnapShotStatus(filename, userinfo, seq, &response, cntl, channel); @@ -785,6 +806,8 @@ MDSClient::GetServerList(const LogicPoolID &logicalpooid, const std::vector ©setidvec, std::vector> *cpinfoVec) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; GetChunkServerListInCopySetsResponse response; mdsClientMetric_.getServerList.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.getServerList.latency); @@ -848,6 +871,8 @@ MDSClient::GetServerList(const LogicPoolID &logicalpooid, LIBCURVE_ERROR MDSClient::GetClusterInfo(ClusterContext *clsctx) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; curve::mds::topology::GetClusterInfoResponse response; MDSClientBase::GetClusterInfo(&response, cntl, channel); @@ -868,15 +893,54 @@ LIBCURVE_ERROR MDSClient::GetClusterInfo(ClusterContext *clsctx) { rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); } -LIBCURVE_ERROR MDSClient::CreateCloneFile( - const std::string &source, const std::string &destination, - const UserInfo_t &userinfo, uint64_t size, uint64_t sn, uint32_t chunksize, - uint64_t stripeUnit, uint64_t stripeCount, FInfo *fileinfo) { +LIBCURVE_ERROR MDSClient::ListPoolset(std::vector* out) { + assert(out != nullptr); + + auto task = RPCTaskDefine { + ListPoolsetResponse response; + MDSClientBase::ListPoolset(&response, cntl, channel); + + if (cntl->Failed()) { + LOG(WARNING) << "Failed to list poolset, error: " + << cntl->ErrorText(); + return -cntl->ErrorCode(); + } + + const bool succ = (response.statuscode() == 0); + if (!succ) { + LOG(WARNING) << "Failed to list poolset, response error: " + << response.statuscode(); + return LIBCURVE_ERROR::FAILED; + } + + for (const auto& p : response.poolsetinfos()) { + out->emplace_back(p.poolsetname()); + } + + return LIBCURVE_ERROR::OK; + }; + + return ReturnError( + rpcExcutor_.DoRPCTask(task, metaServerOpt_.mdsMaxRetryMS)); +} + +LIBCURVE_ERROR MDSClient::CreateCloneFile(const std::string& source, + const std::string& destination, + const UserInfo_t& userinfo, + uint64_t size, + uint64_t sn, + uint32_t chunksize, + uint64_t stripeUnit, + uint64_t stripeCount, + const std::string& poolset, + FInfo* fileinfo) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; CreateCloneFileResponse response; MDSClientBase::CreateCloneFile(source, destination, userinfo, size, sn, chunksize, stripeUnit, stripeCount, - &response, cntl, channel); + poolset, &response, cntl, channel); if (cntl->Failed()) { LOG(WARNING) << "Create clone file failed, errcorde = " << cntl->ErrorCode() @@ -928,6 +992,8 @@ LIBCURVE_ERROR MDSClient::SetCloneFileStatus(const std::string &filename, const UserInfo_t &userinfo, uint64_t fileID) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; SetCloneFileStatusResponse response; MDSClientBase::SetCloneFileStatus(filename, filestatus, userinfo, fileID, &response, cntl, channel); @@ -960,6 +1026,8 @@ LIBCURVE_ERROR MDSClient::GetOrAllocateSegment(bool allocate, uint64_t offset, const FileEpoch_t *fEpoch, SegmentInfo *segInfo) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; GetOrAllocateSegmentResponse response; mdsClientMetric_.getOrAllocateSegment.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.getOrAllocateSegment.latency); @@ -1023,6 +1091,8 @@ LIBCURVE_ERROR MDSClient::GetOrAllocateSegment(bool allocate, uint64_t offset, LIBCURVE_ERROR MDSClient::DeAllocateSegment(const FInfo *fileInfo, uint64_t offset) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; DeAllocateSegmentResponse response; mdsClientMetric_.deAllocateSegment.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.deAllocateSegment.latency); @@ -1063,6 +1133,8 @@ LIBCURVE_ERROR MDSClient::RenameFile(const UserInfo_t &userinfo, uint64_t originId, uint64_t destinationId) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; RenameFileResponse response; mdsClientMetric_.renameFile.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.renameFile.latency); @@ -1102,6 +1174,8 @@ LIBCURVE_ERROR MDSClient::RenameFile(const UserInfo_t &userinfo, LIBCURVE_ERROR MDSClient::Extend(const std::string &filename, const UserInfo_t &userinfo, uint64_t newsize) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; ExtendFileResponse response; mdsClientMetric_.extendFile.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.extendFile.latency); @@ -1135,6 +1209,8 @@ LIBCURVE_ERROR MDSClient::DeleteFile(const std::string &filename, const UserInfo_t &userinfo, bool deleteforce, uint64_t fileid) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; DeleteFileResponse response; mdsClientMetric_.deleteFile.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.deleteFile.latency); @@ -1173,6 +1249,8 @@ LIBCURVE_ERROR MDSClient::RecoverFile(const std::string &filename, const UserInfo_t &userinfo, uint64_t fileid) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; RecoverFileResponse response; mdsClientMetric_.recoverFile.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.recoverFile.latency); @@ -1205,6 +1283,8 @@ LIBCURVE_ERROR MDSClient::ChangeOwner(const std::string &filename, const std::string &newOwner, const UserInfo_t &userinfo) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; ChangeOwnerResponse response; mdsClientMetric_.changeOwner.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.changeOwner.latency); @@ -1244,6 +1324,8 @@ LIBCURVE_ERROR MDSClient::Listdir(const std::string &dirpath, const UserInfo_t &userinfo, std::vector *filestatVec) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; ListDirResponse response; mdsClientMetric_.listDir.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.listDir.latency); @@ -1306,6 +1388,8 @@ LIBCURVE_ERROR MDSClient::GetChunkServerInfo(const PeerAddr &csAddr, } auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; curve::mds::topology::GetChunkServerInfoResponse response; mdsClientMetric_.getChunkServerId.qps.count << 1; @@ -1366,6 +1450,8 @@ LIBCURVE_ERROR MDSClient::ListChunkServerInServer(const std::string &serverIp, std::vector *csIds) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; curve::mds::topology::ListChunkServerResponse response; mdsClientMetric_.listChunkserverInServer.qps.count << 1; @@ -1417,6 +1503,7 @@ void MDSClient::MDSStatusCode2LibcurveError(const StatusCode &status, case StatusCode::kSnapshotFileNotExists: case StatusCode::kFileNotExists: case StatusCode::kDirNotExist: + case StatusCode::kPoolsetNotExist: *errcode = LIBCURVE_ERROR::NOTEXIST; break; case StatusCode::kSegmentNotAllocated: diff --git a/src/client/mds_client.h b/src/client/mds_client.h index 3c31a0d181..36822fa31c 100644 --- a/src/client/mds_client.h +++ b/src/client/mds_client.h @@ -171,19 +171,13 @@ class MDSClient : public MDSClientBase, /** * 创建文件 - * @param: filename创建文件的文件名 - * @param: userinfo为user信息 - * @param: size文件长度 - * @param: normalFile表示创建的是普通文件还是目录文件,如果是目录则忽略size + * @param: context创建文件信息 * @return: 成功返回LIBCURVE_ERROR::OK * 文件已存在返回LIBCURVE_ERROR::EXIST * 否则返回LIBCURVE_ERROR::FAILED * 如果认证失败返回LIBCURVE_ERROR::AUTHFAIL, */ - LIBCURVE_ERROR CreateFile(const std::string &filename, - const UserInfo_t &userinfo, size_t size = 0, - bool normalFile = true, uint64_t stripeUnit = 0, - uint64_t stripeCount = 0); + LIBCURVE_ERROR CreateFile(const CreateFileContext& context); /** * open file * @param: filename file name @@ -220,6 +214,8 @@ class MDSClient : public MDSClientBase, */ LIBCURVE_ERROR GetClusterInfo(ClusterContext *clsctx); + LIBCURVE_ERROR ListPoolset(std::vector* out); + /** * Get or Alloc SegmentInfo,and update to Metacache * @param: allocate ture for allocate, false for get only @@ -421,6 +417,7 @@ class MDSClient : public MDSClientBase, const UserInfo_t &userinfo, uint64_t size, uint64_t sn, uint32_t chunksize, uint64_t stripeUnit, uint64_t stripeCount, + const std::string& poolset, FInfo *fileinfo); /** diff --git a/src/client/mds_client_base.cpp b/src/client/mds_client_base.cpp index 2ab238fef5..4469370b9d 100644 --- a/src/client/mds_client_base.cpp +++ b/src/client/mds_client_base.cpp @@ -23,12 +23,14 @@ #include "src/client/mds_client_base.h" #include "src/common/authenticator.h" +#include "src/common/curve_define.h" #include "src/common/curve_version.h" namespace curve { namespace client { using curve::common::Authenticator; +using curve::mds::topology::TopologyService_Stub; const char* kRootUserName = "root"; @@ -60,34 +62,37 @@ void MDSClientBase::OpenFile(const std::string& filename, stub.OpenFile(cntl, &request, response, nullptr); } -void MDSClientBase::CreateFile(const std::string& filename, - const UserInfo_t& userinfo, - size_t size, - bool normalFile, - uint64_t stripeUnit, - uint64_t stripeCount, +void MDSClientBase::CreateFile(const CreateFileContext& context, CreateFileResponse* response, brpc::Controller* cntl, brpc::Channel* channel) { CreateFileRequest request; - request.set_filename(filename); - if (normalFile) { + request.set_filename(context.name); + if (context.pagefile) { request.set_filetype(curve::mds::FileType::INODE_PAGEFILE); - request.set_filelength(size); + request.set_filelength(context.length); + request.set_stripeunit(context.stripeUnit); + request.set_stripecount(context.stripeCount); + request.set_poolset(context.poolset); } else { request.set_filetype(curve::mds::FileType::INODE_DIRECTORY); } - request.set_stripeunit(stripeUnit); - request.set_stripecount(stripeCount); - FillUserInfo(&request, userinfo); + FillUserInfo(&request, context.user); - LOG(INFO) << "CreateFile: filename = " << filename - << ", owner = " << userinfo.owner - << ", is normalfile: " << normalFile - << ", log id = " << cntl->log_id() - << ", stripeUnit = " << stripeUnit - << ", stripeCount = " << stripeCount; + if (context.pagefile) { + LOG(INFO) << "CreateFile, filename = `" << context.name << "`" + << ", owner = " << context.user.owner + << ", stripe unit = " << context.stripeUnit + << ", stripe count = " << context.stripeCount + << ", poolset = `" << context.poolset << "`" + << ", length = " << context.length / common::kGB << "GiB" + << ", log id = " << cntl->log_id(); + } else { + LOG(INFO) << "CreateDirectory, dirname = `" << context.name << "`" + << ", owner = " << context.user.owner + << ", log id = " << cntl->log_id(); + } curve::mds::CurveFSService_Stub stub(channel); stub.CreateFile(cntl, &request, response, NULL); @@ -309,6 +314,15 @@ void MDSClientBase::GetClusterInfo(GetClusterInfoResponse* response, stub.GetClusterInfo(cntl, &request, response, nullptr); } +void MDSClientBase::ListPoolset(ListPoolsetResponse* response, + brpc::Controller* cntl, + brpc::Channel* channel) { + ListPoolsetRequest request; + + TopologyService_Stub stub(channel); + stub.ListPoolset(cntl, &request, response, nullptr); +} + void MDSClientBase::CreateCloneFile(const std::string& source, const std::string& destination, const UserInfo_t& userinfo, @@ -317,6 +331,7 @@ void MDSClientBase::CreateCloneFile(const std::string& source, uint32_t chunksize, uint64_t stripeUnit, uint64_t stripeCount, + const std::string& poolset, CreateCloneFileResponse* response, brpc::Controller* cntl, brpc::Channel* channel) { @@ -329,12 +344,10 @@ void MDSClientBase::CreateCloneFile(const std::string& source, request.set_clonesource(source); request.set_stripeunit(stripeUnit); request.set_stripecount(stripeCount); + request.set_poolset(poolset); FillUserInfo(&request, userinfo); - LOG(INFO) << "CreateCloneFile: source = " << source - << ", destination = " << destination - << ", owner = " << userinfo.owner << ", seqnum = " << sn - << ", size = " << size << ", chunksize = " << chunksize + LOG(INFO) << "CreateCloneFile: " << request.ShortDebugString() << ", log id = " << cntl->log_id(); curve::mds::CurveFSService_Stub stub(channel); @@ -377,7 +390,6 @@ void MDSClientBase::GetOrAllocateSegment(bool allocate, // convert the user offset to seg offset uint64_t segmentsize = fi->segmentsize; - uint64_t chunksize = fi->chunksize; uint64_t seg_offset = (offset / segmentsize) * segmentsize; request.set_filename(fi->fullPathName); request.set_offset(seg_offset); diff --git a/src/client/mds_client_base.h b/src/client/mds_client_base.h index ad2464ad3e..64178e43e9 100644 --- a/src/client/mds_client_base.h +++ b/src/client/mds_client_base.h @@ -87,6 +87,8 @@ using curve::mds::topology::GetChunkServerInfoResponse; using curve::mds::topology::ListChunkServerResponse; using curve::mds::IncreaseFileEpochRequest; using curve::mds::IncreaseFileEpochResponse; +using curve::mds::topology::ListPoolsetRequest; +using curve::mds::topology::ListPoolsetResponse; extern const char* kRootUserName; @@ -119,12 +121,7 @@ class MDSClientBase { * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 * @param[in]:channel是当前与mds建立的通道 */ - void CreateFile(const std::string& filename, - const UserInfo_t& userinfo, - size_t size, - bool normalFile, - const uint64_t stripeUnit, - const uint64_t stripeCount, + void CreateFile(const CreateFileContext& context, CreateFileResponse* response, brpc::Controller* cntl, brpc::Channel* channel); @@ -288,6 +285,10 @@ class MDSClientBase { brpc::Controller* cntl, brpc::Channel* channel); + void ListPoolset(ListPoolsetResponse* response, + brpc::Controller* cntl, + brpc::Channel* channel); + /** * 创建clone文件 * @param source 克隆源文件名 @@ -310,6 +311,7 @@ class MDSClientBase { uint32_t chunksize, uint64_t stripeUnit, uint64_t stripeCount, + const std::string& poolset, CreateCloneFileResponse* response, brpc::Controller* cntl, brpc::Channel* channel); diff --git a/src/client/metacache.cpp b/src/client/metacache.cpp index 289a1eb1aa..dd9f4ba2f0 100644 --- a/src/client/metacache.cpp +++ b/src/client/metacache.cpp @@ -214,8 +214,7 @@ void MetaCache::UpdateCopysetInfoIfMatchCurrentLeader( CopysetID copysetId, const PeerAddr& leaderAddr) { std::vector> copysetInfos; - int ret = - mdsclient_->GetServerList(logicPoolId, {copysetId}, ©setInfos); + (void)mdsclient_->GetServerList(logicPoolId, {copysetId}, ©setInfos); bool needUpdate = (!copysetInfos.empty()) && (copysetInfos[0].HasPeerInCopyset(leaderAddr)); diff --git a/src/client/metacache_struct.h b/src/client/metacache_struct.h index f29d3de467..85ca375339 100644 --- a/src/client/metacache_struct.h +++ b/src/client/metacache_struct.h @@ -55,7 +55,7 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetPeerInfo { CopysetPeerInfo() = default; - CopysetPeerInfo(const CopysetPeerInfo&) = default; + CopysetPeerInfo(const CopysetPeerInfo &) = default; CopysetPeerInfo &operator=(const CopysetPeerInfo &other) = default; CopysetPeerInfo(const T &cid, const PeerAddr &internal, @@ -160,7 +160,7 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { bool GetCurrentLeaderID(T *id) const { if (leaderindex_ >= 0) { - if (csinfos_.size() < leaderindex_) { + if (static_cast(csinfos_.size()) < leaderindex_) { return false; } else { *id = csinfos_[leaderindex_].peerID; @@ -212,7 +212,8 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { */ int GetLeaderInfo(T *peerid, EndPoint *ep) { // 第一次获取leader,如果当前leader信息没有确定,返回-1,由外部主动发起更新leader - if (leaderindex_ < 0 || leaderindex_ >= csinfos_.size()) { + if (leaderindex_ < 0 || + leaderindex_ >= static_cast(csinfos_.size())) { LOG(INFO) << "GetLeaderInfo pool " << lpid_ << ", copyset " << cpid_ << " has no leader"; diff --git a/src/client/request_sender.cpp b/src/client/request_sender.cpp index 12e062b62e..539d063d4f 100644 --- a/src/client/request_sender.cpp +++ b/src/client/request_sender.cpp @@ -80,6 +80,7 @@ int RequestSender::ReadChunk(const ChunkIDInfo& idinfo, uint64_t appliedindex, const RequestSourceInfo& sourceInfo, ClientClosure *done) { + (void)sn; brpc::ClosureGuard doneGuard(done); brpc::Controller *cntl = new brpc::Controller(); ChunkResponse *response = new ChunkResponse(); diff --git a/src/client/service_helper.cpp b/src/client/service_helper.cpp index f1ae8daaa9..1c9d2dade0 100644 --- a/src/client/service_helper.cpp +++ b/src/client/service_helper.cpp @@ -124,6 +124,9 @@ void ServiceHelper::ProtoFileInfo2Local(const curve::mds::FileInfo& finfo, fi->throttleParams = ProtoFileThrottleParamsToLocal(finfo.throttleparams()); } + if (finfo.has_poolset()) { + fi->poolset = finfo.poolset(); + } fEpoch->fileId = finfo.id(); if (finfo.has_epoch()) { diff --git a/src/client/splitor.cpp b/src/client/splitor.cpp index f95b20e0b7..c821f16395 100644 --- a/src/client/splitor.cpp +++ b/src/client/splitor.cpp @@ -143,8 +143,6 @@ bool Splitor::AssignInternal(IOTracker* iotracker, MetaCache* metaCache, MDSClient* mdsclient, const FInfo_t* fileInfo, const FileEpoch_t* fEpoch, ChunkIndex chunkidx) { - const auto maxSplitSizeBytes = 1024 * iosplitopt_.fileIOSplitMaxSizeKB; - lldiv_t res = std::div( static_cast(chunkidx) * fileInfo->chunksize, // NOLINT static_cast(fileInfo->segmentsize)); // NOLINT @@ -369,7 +367,6 @@ int Splitor::SplitForStripe(IOTracker* iotracker, MetaCache* metaCache, uint64_t cur = offset; uint64_t left = length; - uint64_t curChunkIndex = 0; while (left > 0) { uint64_t blockIndex = cur / stripeUnit; @@ -427,11 +424,11 @@ uint64_t Splitor::ProcessUnalignedRequests(const off_t currentOffset, uint64_t alignedEndOffset = common::align_down(currentEndOff, iosplitopt_.alignment.cloneVolume); - if (currentOffset == alignedStartOffset && + if (static_cast(currentOffset) == alignedStartOffset && currentEndOff == alignedEndOffset) { padding->aligned = true; } else { - if (currentOffset == alignedStartOffset) { + if (static_cast(currentOffset) == alignedStartOffset) { padding->aligned = false; padding->type = RequestContext::Padding::Right; padding->offset = alignedEndOffset; diff --git a/src/client/unstable_helper.cpp b/src/client/unstable_helper.cpp index 26fa1bbea8..5cc99945fe 100644 --- a/src/client/unstable_helper.cpp +++ b/src/client/unstable_helper.cpp @@ -24,15 +24,14 @@ namespace curve { namespace client { -UnstableState UnstableHelper::GetCurrentUnstableState( - ChunkServerID csId, - const butil::EndPoint& csEndPoint) { - +UnstableState +UnstableHelper::GetCurrentUnstableState(ChunkServerID csId, + const butil::EndPoint &csEndPoint) { std::string ip = butil::ip2str(csEndPoint.ip).c_str(); mtx_.lock(); // 如果当前ip已经超过阈值,则直接返回chunkserver unstable - int unstabled = serverUnstabledChunkservers_[ip].size(); + uint32_t unstabled = serverUnstabledChunkservers_[ip].size(); if (unstabled >= option_.serverUnstableThreshold) { serverUnstabledChunkservers_[ip].emplace(csId); mtx_.unlock(); diff --git a/src/common/concurrent/dlock.h b/src/common/concurrent/dlock.h index c68538e4e9..36210b445b 100644 --- a/src/common/concurrent/dlock.h +++ b/src/common/concurrent/dlock.h @@ -32,8 +32,8 @@ namespace curve { namespace common { -using curve::kvstorage::KVStorageClient; using curve::common::Uncopyable; +using curve::kvstorage::KVStorageClient; struct DLockOpts { std::string pfx; @@ -47,19 +47,19 @@ struct DLockOpts { class DLock : public Uncopyable { public: - explicit DLock(const DLockOpts &opts) : opts_(opts), locker_(0) {} + explicit DLock(const DLockOpts &opts) : locker_(0), opts_(opts) {} virtual ~DLock(); /** * @brief Init the etcd Mutex - * + * * @return lock leaseid */ virtual int64_t Init(); /** * @brief lock the object - * + * * @return error code EtcdErrCode */ virtual int Lock(); @@ -97,8 +97,8 @@ class DLock : public Uncopyable { const DLockOpts &opts_; }; -} // namespace common -} // namespace curve +} // namespace common +} // namespace curve #endif // SRC_COMMON_CONCURRENT_DLOCK_H_ diff --git a/src/common/concurrent/task_queue.h b/src/common/concurrent/task_queue.h index 50b86c1ec7..420fcdabfa 100644 --- a/src/common/concurrent/task_queue.h +++ b/src/common/concurrent/task_queue.h @@ -13,25 +13,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - /* * Project: curve * File Created: Tuesday, 18th December 2018 4:52:44 pm * Author: tongguangxun */ - #ifndef SRC_COMMON_CONCURRENT_TASK_QUEUE_H_ #define SRC_COMMON_CONCURRENT_TASK_QUEUE_H_ - #include // NOLINT #include // NOLINT #include // NOLINT #include // NOLINT #include - namespace curve { namespace common { - template class GenericTaskQueue { public: @@ -47,24 +42,26 @@ class GenericTaskQueue { while (tasks_.size() >= capacity_) { notfullcv_.wait(lk); } - tasks_.push(std::move(task)); } notemptycv_.notify_one(); } - Task Pop() { std::unique_lock lk(mtx_); while (tasks_.empty()) { notemptycv_.wait(lk); } - Task t = std::move(tasks_.front()); tasks_.pop(); notfullcv_.notify_one(); return t; } + size_t Size() { + std::unique_lock lk(mtx_); + return tasks_.size(); + } + private: size_t capacity_; MutexT mtx_; diff --git a/src/common/fs_util.h b/src/common/fs_util.h index 99dab36174..3e591fd5ca 100644 --- a/src/common/fs_util.h +++ b/src/common/fs_util.h @@ -20,8 +20,8 @@ * Author: charisu */ -#ifndef SRC_COMMON_FS_UTIL_H_ -#define SRC_COMMON_FS_UTIL_H_ +#ifndef SRC_COMMON_FS_UTIL_H_ +#define SRC_COMMON_FS_UTIL_H_ #include #include @@ -32,8 +32,8 @@ namespace curve { namespace common { // 计算path2相对于path1的相对路径 -inline std::string CalcRelativePath(const std::string& path1, - const std::string& path2) { +inline std::string CalcRelativePath(const std::string &path1, + const std::string &path2) { if (path1.empty() || path2.empty()) { return ""; } @@ -41,7 +41,7 @@ inline std::string CalcRelativePath(const std::string& path1, std::vector dirs2; SplitString(path1, "/", &dirs1); SplitString(path2, "/", &dirs2); - int unmatchedIndex = 0; + size_t unmatchedIndex = 0; while (unmatchedIndex < dirs1.size() && unmatchedIndex < dirs2.size()) { if (dirs1[unmatchedIndex] != dirs2[unmatchedIndex]) { break; @@ -52,13 +52,13 @@ inline std::string CalcRelativePath(const std::string& path1, if (unmatchedIndex == dirs1.size()) { rpath.append("."); } - for (int i = 0; i < dirs1.size() - unmatchedIndex; ++i) { + for (int i = 0; i < static_cast(dirs1.size() - unmatchedIndex); ++i) { if (i > 0) { rpath.append("/"); } rpath.append(".."); } - for (int i = unmatchedIndex; i < dirs2.size(); ++i) { + for (size_t i = unmatchedIndex; i < dirs2.size(); ++i) { rpath.append("/"); rpath.append(dirs2[i]); } @@ -66,8 +66,7 @@ inline std::string CalcRelativePath(const std::string& path1, } // Check whether the path2 is the subpath of path1 -inline bool IsSubPath(const std::string& path1, - const std::string& path2) { +inline bool IsSubPath(const std::string &path1, const std::string &path2) { return StringStartWith(CalcRelativePath(path1, path2), "./"); } @@ -75,4 +74,3 @@ inline bool IsSubPath(const std::string& path1, } // namespace curve #endif // SRC_COMMON_FS_UTIL_H_ - diff --git a/src/common/namespace_define.h b/src/common/namespace_define.h index 442b7e36c4..a4d9cf18fe 100644 --- a/src/common/namespace_define.h +++ b/src/common/namespace_define.h @@ -53,6 +53,8 @@ const char CHUNKSERVERKEYEND[] = "1006"; const char CLUSTERINFOKEY[] = "1007"; const char COPYSETKEYPREFIX[] = "1008"; const char COPYSETKEYEND[] = "1009"; +const char POOLSETKEYPREFIX[] = "1010"; +const char POOLSETKEYEND[] = "1011"; const char SNAPINFOKEYPREFIX[] = "11"; const char SNAPINFOKEYEND[] = "12"; @@ -68,9 +70,10 @@ const int LEADER_PREFIX_LENGTH = 8; const int SEGMENTKEYLEN = 18; const int DISCARDSEGMENTKEYLEN = 26; +constexpr int kDefaultPoolsetId = 1; +constexpr char kDefaultPoolsetName[] = "default"; + } // namespace common } // namespace curve #endif // SRC_COMMON_NAMESPACE_DEFINE_H_ - - diff --git a/src/common/s3_adapter.h b/src/common/s3_adapter.h index 6ee6538b39..53e514a813 100644 --- a/src/common/s3_adapter.h +++ b/src/common/s3_adapter.h @@ -22,6 +22,7 @@ #ifndef SRC_COMMON_S3_ADAPTER_H_ #define SRC_COMMON_S3_ADAPTER_H_ +#include #include #include #include @@ -54,7 +55,7 @@ #include //NOLINT #include //NOLINT #include //NOLINT -#include // NOLINT +#include //NOLINT #include "src/common/configuration.h" #include "src/common/throttle.h" @@ -97,6 +98,7 @@ struct S3InfoOption { std::string bucketName; uint64_t blockSize; uint64_t chunkSize; + uint32_t objectPrefix; }; void InitS3AdaptorOptionExceptS3InfoOption(Configuration *conf, @@ -115,7 +117,7 @@ struct GetObjectAsyncContext : public Aws::Client::AsyncCallerContext { size_t len; GetObjectAsyncCallBack cb; int retCode; - int retry; + uint32_t retry; size_t actualLen; }; @@ -143,9 +145,7 @@ class S3Adapter { s3Client_ = nullptr; throttle_ = nullptr; } - virtual ~S3Adapter() { - Deinit(); - } + virtual ~S3Adapter() { Deinit(); } /** * 初始化S3Adapter */ @@ -359,10 +359,15 @@ class FakeS3Adapter : public S3Adapter { int PutObject(const Aws::String &key, const char *buffer, const size_t bufferSize) override { + (void)key; + (void)buffer; + (void)bufferSize; return 0; } int PutObject(const Aws::String &key, const std::string &data) override { + (void)key; + (void)data; return 0; } @@ -373,6 +378,8 @@ class FakeS3Adapter : public S3Adapter { } int GetObject(const Aws::String &key, std::string *data) override { + (void)key; + (void)data; // just return 4M data data->resize(4 * 1024 * 1024, '1'); return 0; @@ -380,6 +387,8 @@ class FakeS3Adapter : public S3Adapter { int GetObject(const std::string &key, char *buf, off_t offset, size_t len) override { + (void)key; + (void)offset; // juset return len data memset(buf, '1', len); return 0; @@ -392,13 +401,20 @@ class FakeS3Adapter : public S3Adapter { context->cb(this, context); } - int DeleteObject(const Aws::String &key) override { return 0; } + int DeleteObject(const Aws::String &key) override { + (void)key; + return 0; + } int DeleteObjects(const std::list &keyList) override { + (void)keyList; return 0; } - bool ObjectExist(const Aws::String &key) override { return true; } + bool ObjectExist(const Aws::String &key) override { + (void)key; + return true; + } }; diff --git a/src/common/snapshotclone/snapshotclone_define.cpp b/src/common/snapshotclone/snapshotclone_define.cpp index 2eb1ea1c83..b3b08f8d74 100644 --- a/src/common/snapshotclone/snapshotclone_define.cpp +++ b/src/common/snapshotclone/snapshotclone_define.cpp @@ -53,6 +53,7 @@ const char* kOffsetStr = "Offset"; const char* kSourceStr = "Source"; const char* kDestinationStr = "Destination"; const char* kLazyStr = "Lazy"; +const char* kPoolset = "Poolset"; const char* kStatusStr = "Status"; const char* kTypeStr = "Type"; const char* kInodeStr = "Inode"; @@ -105,5 +106,3 @@ std::string BuildErrorMessage( } // namespace snapshotcloneserver } // namespace curve - - diff --git a/src/common/snapshotclone/snapshotclone_define.h b/src/common/snapshotclone/snapshotclone_define.h index 55925ea578..ffa5428a6e 100644 --- a/src/common/snapshotclone/snapshotclone_define.h +++ b/src/common/snapshotclone/snapshotclone_define.h @@ -56,6 +56,7 @@ extern const char* kOffsetStr; extern const char* kSourceStr; extern const char* kDestinationStr; extern const char* kLazyStr; +extern const char* kPoolset; extern const char* kStatusStr; extern const char* kTypeStr; extern const char* kInodeStr; diff --git a/src/fs/ext4_filesystem_impl.cpp b/src/fs/ext4_filesystem_impl.cpp index 7392e2d01e..f4cd6cfcdb 100644 --- a/src/fs/ext4_filesystem_impl.cpp +++ b/src/fs/ext4_filesystem_impl.cpp @@ -337,7 +337,7 @@ int Ext4FileSystemImpl::Write(int fd, butil::IOBuf buf, uint64_t offset, int length) { - if (length != buf.size()) { + if (length != static_cast(buf.size())) { LOG(ERROR) << "IOBuf::pcut_into_file_descriptor failed, fd: " << fd << ", data size doesn't equal to length, data size: " << buf.size() << ", length: " << length; @@ -345,7 +345,6 @@ int Ext4FileSystemImpl::Write(int fd, } int remainLength = length; - int relativeOffset = 0; int retryTimes = 0; while (remainLength > 0) { @@ -380,6 +379,9 @@ int Ext4FileSystemImpl::Sync(int fd) { int Ext4FileSystemImpl::Append(int fd, const char *buf, int length) { + (void)fd; + (void)buf; + (void)length; // TODO(yyk) return 0; } diff --git a/src/fs/local_filesystem.cpp b/src/fs/local_filesystem.cpp index 52301b0c27..a14ae59829 100644 --- a/src/fs/local_filesystem.cpp +++ b/src/fs/local_filesystem.cpp @@ -32,6 +32,7 @@ namespace fs { std::shared_ptr LocalFsFactory::CreateFs( FileSystemType type, const std::string& deviceID) { + (void)deviceID; std::shared_ptr localFs; if (type == FileSystemType::EXT4) { localFs = Ext4FileSystemImpl::getInstance(); diff --git a/src/mds/common/mds_define.h b/src/mds/common/mds_define.h index e64115ea98..54f74c31f3 100644 --- a/src/mds/common/mds_define.h +++ b/src/mds/common/mds_define.h @@ -61,6 +61,7 @@ namespace topology { typedef uint16_t LogicalPoolIdType; typedef uint16_t PhysicalPoolIdType; typedef uint16_t PoolIdType; +typedef uint16_t PoolsetIdType; typedef uint32_t ZoneIdType; typedef uint32_t ServerIdType; typedef uint32_t ChunkServerIdType; @@ -93,6 +94,8 @@ const int kTopoErrCodeNameDuplicated = -16; const int kTopoErrCodeCreateCopysetNodeOnChunkServerFail = -17; const int kTopoErrCodeCannotRemoveNotRetired = -18; const int kTopoErrCodeLogicalPoolExist = -19; +const int kTopoErrCodePoolsetNotFound = -20; +const int kTopoErrCodeCannotDeleteDefaultPoolset = -21; } // namespace topology } // namespace mds diff --git a/src/mds/copyset/copyset_manager.cpp b/src/mds/copyset/copyset_manager.cpp index fc06973da9..445885860c 100644 --- a/src/mds/copyset/copyset_manager.cpp +++ b/src/mds/copyset/copyset_manager.cpp @@ -57,7 +57,7 @@ bool CopysetManager::GenCopyset(const ClusterInfo& cluster, } int numChunkServers = cluster.GetClusterSize(); - if (*scatterWidth > (numChunkServers - 1)) { + if (static_cast(*scatterWidth) > (numChunkServers - 1)) { // It's impossible that scatterWidth is lager than cluster size return false; } diff --git a/src/mds/copyset/copyset_policy.cpp b/src/mds/copyset/copyset_policy.cpp index e938c3498b..8f2612efcd 100644 --- a/src/mds/copyset/copyset_policy.cpp +++ b/src/mds/copyset/copyset_policy.cpp @@ -131,6 +131,8 @@ void CopysetZoneShufflePolicy::GetMinCopySetFromScatterWidth( int CopysetZoneShufflePolicy::GetMaxPermutationNum(int numCopysets, int numChunkServers, int numReplicas) { + (void)numChunkServers; + (void)numReplicas; return numCopysets; } diff --git a/src/mds/heartbeat/chunkserver_healthy_checker.cpp b/src/mds/heartbeat/chunkserver_healthy_checker.cpp index 0b3844a8a1..ce4225bd1d 100644 --- a/src/mds/heartbeat/chunkserver_healthy_checker.cpp +++ b/src/mds/heartbeat/chunkserver_healthy_checker.cpp @@ -90,7 +90,6 @@ bool ChunkserverHealthyChecker::ChunkServerStateNeedUpdate( return false; } - bool shouldOffline = true; if (OnlineState::OFFLINE != info.state) { LOG(WARNING) << "chunkserver " << info.csId << " is offline. " << timePass / milliseconds(1) << "ms from last heartbeat"; diff --git a/src/mds/heartbeat/heartbeat_manager.cpp b/src/mds/heartbeat/heartbeat_manager.cpp index 5aae8a5578..b5100eaa73 100644 --- a/src/mds/heartbeat/heartbeat_manager.cpp +++ b/src/mds/heartbeat/heartbeat_manager.cpp @@ -208,6 +208,9 @@ void HeartbeatManager::ChunkServerHeartbeat( UpdateChunkServerDiskStatus(request); UpdateChunkServerStatistics(request); + + UpdateChunkServerVersion(request); + // no copyset info in the request if (request.copysetinfos_size() == 0) { response->set_statuscode(HeartbeatStatusCode::hbRequestNoCopyset); @@ -395,6 +398,22 @@ ChunkServerIdType HeartbeatManager::GetChunkserverIdByPeerStr( << ", port: " << port << " from topology"; return UNINTIALIZE_ID; } + +void HeartbeatManager::UpdateChunkServerVersion( + const ChunkServerHeartbeatRequest &request) { + // update chunkServer version + if (request.has_version()) { + int ret = topology_->UpdateChunkServerVersion(request.version(), + request.chunkserverid()); + if (ret != 0) { + LOG(ERROR) + << "heartbeat UpdateChunkServerVersion failed, chunkServerId: " + << request.chunkserverid() << ", version: " << request.version() + << ", error is: " << ret; + } + } +} + } // namespace heartbeat } // namespace mds } // namespace curve diff --git a/src/mds/heartbeat/heartbeat_manager.h b/src/mds/heartbeat/heartbeat_manager.h index 81648603b5..91505734d7 100644 --- a/src/mds/heartbeat/heartbeat_manager.h +++ b/src/mds/heartbeat/heartbeat_manager.h @@ -123,11 +123,17 @@ class HeartbeatManager { * @brief Update statistical data of chunkserver * * @param request Heartbeat request - * @param response response of heartbeat request */ void UpdateChunkServerStatistics( const ChunkServerHeartbeatRequest &request); + /** + * @brief Update version of chunkserver + * + * @param request Heartbeat request + */ + void UpdateChunkServerVersion(const ChunkServerHeartbeatRequest &request); + /** * @brief Background thread for heartbeat timeout inspection */ diff --git a/src/mds/heartbeat/heartbeat_service.cpp b/src/mds/heartbeat/heartbeat_service.cpp index db55479a34..1100952d9e 100644 --- a/src/mds/heartbeat/heartbeat_service.cpp +++ b/src/mds/heartbeat/heartbeat_service.cpp @@ -36,6 +36,7 @@ void HeartbeatServiceImpl::ChunkServerHeartbeat( const ::curve::mds::heartbeat::ChunkServerHeartbeatRequest *request, ::curve::mds::heartbeat::ChunkServerHeartbeatResponse *response, ::google::protobuf::Closure *done) { + (void)controller; brpc::ClosureGuard doneGuard(done); heartbeatManager_->ChunkServerHeartbeat(*request, response); } diff --git a/src/mds/main/main.cpp b/src/mds/main/main.cpp index c7349fa91e..70d9a5dc10 100644 --- a/src/mds/main/main.cpp +++ b/src/mds/main/main.cpp @@ -132,4 +132,3 @@ int main(int argc, char **argv) { google::ShutdownGoogleLogging(); return 0; } - diff --git a/src/mds/nameserver2/BUILD b/src/mds/nameserver2/BUILD index 33c22d5096..2af8c5f7b8 100644 --- a/src/mds/nameserver2/BUILD +++ b/src/mds/nameserver2/BUILD @@ -41,5 +41,6 @@ cc_library( "//src/mds/nameserver2/idgenerator:nameserver_idgenerator", "//src/mds/snapshotcloneclient", "//src/mds/topology", + "@com_google_absl//absl/strings", ], ) diff --git a/src/mds/nameserver2/allocstatistic/alloc_statistic.h b/src/mds/nameserver2/allocstatistic/alloc_statistic.h index caa90f2d22..6ecb6f0da4 100644 --- a/src/mds/nameserver2/allocstatistic/alloc_statistic.h +++ b/src/mds/nameserver2/allocstatistic/alloc_statistic.h @@ -32,12 +32,12 @@ #include "src/common/concurrent/concurrent.h" #include "src/common/interruptible_sleeper.h" -using ::curve::mds::topology::PoolIdType; using ::curve::common::Atomic; +using ::curve::common::InterruptibleSleeper; using ::curve::common::Mutex; using ::curve::common::RWLock; using ::curve::common::Thread; -using ::curve::common::InterruptibleSleeper; +using ::curve::mds::topology::PoolIdType; namespace curve { namespace mds { @@ -49,7 +49,8 @@ using ::curve::kvstorage::EtcdClientImp; * The statistics are divided into two parts: * part1: * 1. statistics of the allocation amount before the designated revision - * 2. record the segment allocation amount of each revision since mds started + * 2. record the segment allocation amount of each revision since mds + * started * 3. combine the data in 1 and 2 * part2: the background periodically persists the merged data in part1 * @@ -76,17 +77,12 @@ class AllocStatistic { * @param[in] client Etcd client */ AllocStatistic(uint64_t periodicPersistInterMs, uint64_t retryInterMs, - std::shared_ptr client) : - client_(client), - currentValueAvalible_(false), - segmentAllocFromEtcdOK_(false), - stop_(true), - periodicPersistInterMs_(periodicPersistInterMs), - retryInterMs_(retryInterMs) {} - - ~AllocStatistic() { - Stop(); - } + std::shared_ptr client) + : client_(client), segmentAllocFromEtcdOK_(false), + currentValueAvalible_(false), retryInterMs_(retryInterMs), + periodicPersistInterMs_(periodicPersistInterMs), stop_(true) {} + + ~AllocStatistic() { Stop(); } /** * @brief Init Obtains the allocated segment information and information in @@ -97,7 +93,7 @@ class AllocStatistic { */ int Init(); - /** + /** * @brief Run 1. get all the segments under the specified revision * 2. persist the statistics of allocated segment size in memory * under each logicalPool regularly @@ -138,11 +134,10 @@ class AllocStatistic { * @param[in] changeSize Segment reduction * @param[in] revision Version corresponding to this change */ - virtual void DeAllocSpace( - PoolIdType, int64_t changeSize, int64_t revision); + virtual void DeAllocSpace(PoolIdType, int64_t changeSize, int64_t revision); private: - /** + /** * @brief CalculateSegmentAlloc Get all the segment records of the * specified revision from Etcd */ @@ -154,14 +149,15 @@ class AllocStatistic { */ void PeriodicPersist(); - /** + /** * @brief HandleResult Dealing with the situation that error occur when * obtaining all segment records of specified revision */ bool HandleResult(int res); /** - * @brief DoMerge For each logicalPool, merge the change amount and data read in Etcd //NOLINT + * @brief DoMerge For each logicalPool, merge the change amount and data + * read in Etcd //NOLINT */ void DoMerge(); @@ -201,7 +197,8 @@ class AllocStatistic { std::map existSegmentAllocValues_; RWLock existSegmentAllocValuesLock_; - // At the beginning, stores allocation data of the segment before specified revision //NOLINT + // At the beginning, stores allocation data of the segment before specified + // revision // Later, stores the merged value std::map segmentAlloc_; RWLock segmentAllocLock_; @@ -230,7 +227,8 @@ class AllocStatistic { InterruptibleSleeper sleeper_; - // thread for periodically persisting allocated segment size of each logical pool //NOLINT + // thread for periodically persisting allocated segment size of each logical + // pool Thread periodicPersist_; // thread for calculating allocated segment size under specified revision @@ -240,4 +238,3 @@ class AllocStatistic { } // namespace curve #endif // SRC_MDS_NAMESERVER2_ALLOCSTATISTIC_ALLOC_STATISTIC_H_ - diff --git a/src/mds/nameserver2/allocstatistic/alloc_statistic_helper.cpp b/src/mds/nameserver2/allocstatistic/alloc_statistic_helper.cpp index 89bec51c80..5a4a980615 100644 --- a/src/mds/nameserver2/allocstatistic/alloc_statistic_helper.cpp +++ b/src/mds/nameserver2/allocstatistic/alloc_statistic_helper.cpp @@ -32,22 +32,21 @@ namespace curve { namespace mds { -using ::curve::common::SEGMENTALLOCSIZEKEYEND; using ::curve::common::SEGMENTALLOCSIZEKEY; -using ::curve::common::SEGMENTINFOKEYPREFIX; +using ::curve::common::SEGMENTALLOCSIZEKEYEND; using ::curve::common::SEGMENTINFOKEYEND; +using ::curve::common::SEGMENTINFOKEYPREFIX; const int GETBUNDLE = 1000; int AllocStatisticHelper::GetExistSegmentAllocValues( std::map *out, const std::shared_ptr &client) { // Obtain the segmentSize value of corresponding logical pools from Etcd std::vector allocVec; - int res = client->List( - SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, &allocVec); + int res = + client->List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, &allocVec); if (res != EtcdErrCode::EtcdOK) { LOG(ERROR) << "list [" << SEGMENTALLOCSIZEKEY << "," - << SEGMENTALLOCSIZEKEYEND << ") fail, errorCode: " - << res; + << SEGMENTALLOCSIZEKEYEND << ") fail, errorCode: " << res; return -1; } @@ -55,8 +54,8 @@ int AllocStatisticHelper::GetExistSegmentAllocValues( for (auto &item : allocVec) { PoolIdType lid; uint64_t alloc; - bool res = NameSpaceStorageCodec::DecodeSegmentAllocValue( - item, &lid, &alloc); + bool res = + NameSpaceStorageCodec::DecodeSegmentAllocValue(item, &lid, &alloc); if (false == res) { LOG(ERROR) << "decode segment alloc value: " << item << " fail"; continue; @@ -83,8 +82,9 @@ int AllocStatisticHelper::CalculateSegmentAlloc( // get segments in bundles from Etcd, GETBUNDLE is the number of items // to fetch - int res = client->ListWithLimitAndRevision( - startKey, SEGMENTINFOKEYEND, GETBUNDLE, revision, &values, &lastKey); + int res = client->ListWithLimitAndRevision(startKey, SEGMENTINFOKEYEND, + GETBUNDLE, revision, &values, + &lastKey); if (res != EtcdErrCode::EtcdOK) { LOG(ERROR) << "list [" << startKey << "," << SEGMENTINFOKEYEND << ") at revision: " << revision @@ -94,17 +94,17 @@ int AllocStatisticHelper::CalculateSegmentAlloc( } // decode the obtained value - int startPos = 1; + size_t startPos = 1; if (startKey == SEGMENTINFOKEYPREFIX) { startPos = 0; } - for ( ; startPos < values.size(); startPos++) { + for (; startPos < values.size(); startPos++) { PageFileSegment segment; - bool res = NameSpaceStorageCodec::DecodeSegment( - values[startPos], &segment); + bool res = NameSpaceStorageCodec::DecodeSegment(values[startPos], + &segment); if (false == res) { - LOG(ERROR) << "decode segment item{" - << values[startPos] << "} fail"; + LOG(ERROR) << "decode segment item{" << values[startPos] + << "} fail"; return -1; } else { (*out)[segment.logicalpoolid()] += segment.segmentsize(); diff --git a/src/mds/nameserver2/chunk_allocator.cpp b/src/mds/nameserver2/chunk_allocator.cpp index e2ee64425b..4337d4052e 100644 --- a/src/mds/nameserver2/chunk_allocator.cpp +++ b/src/mds/nameserver2/chunk_allocator.cpp @@ -29,7 +29,8 @@ namespace curve { namespace mds { bool ChunkSegmentAllocatorImpl::AllocateChunkSegment(FileType type, SegmentSizeType segmentSize, ChunkSizeType chunkSize, - offset_t offset, PageFileSegment *segment) { + const std::string& pstName, offset_t offset, + PageFileSegment *segment) { if (segment == nullptr) { LOG(ERROR) << "segment pointer is null"; return false; @@ -54,7 +55,7 @@ bool ChunkSegmentAllocatorImpl::AllocateChunkSegment(FileType type, std::vector copysets; if (!topologyChunkAllocator_-> AllocateChunkRoundRobinInSingleLogicalPool( - type, chunkNum, chunkSize, ©sets)) { + type, pstName, chunkNum, chunkSize, ©sets)) { LOG(ERROR) << "AllocateChunkRoundRobinInSingleLogicalPool error"; return false; } @@ -63,7 +64,7 @@ bool ChunkSegmentAllocatorImpl::AllocateChunkSegment(FileType type, return false; } auto logicalpoolId = copysets[0].logicalPoolId; - for (auto i = 0; i < copysets.size(); i++) { + for (size_t i = 0; i < copysets.size(); i++) { if (copysets[i].logicalPoolId != logicalpoolId) { LOG(ERROR) << "Allocate Copysets id not same, copysets[" << i << "] = " @@ -91,4 +92,3 @@ bool ChunkSegmentAllocatorImpl::AllocateChunkSegment(FileType type, } // namespace mds } // namespace curve - diff --git a/src/mds/nameserver2/chunk_allocator.h b/src/mds/nameserver2/chunk_allocator.h index 52583d75ae..15411c8d27 100644 --- a/src/mds/nameserver2/chunk_allocator.h +++ b/src/mds/nameserver2/chunk_allocator.h @@ -26,6 +26,7 @@ #include #include #include +#include #include #include "src/mds/common/mds_define.h" #include "src/mds/nameserver2/idgenerator/chunk_id_generator.h" @@ -42,10 +43,13 @@ class ChunkSegmentAllocator { virtual bool AllocateChunkSegment(FileType type, SegmentSizeType segmentSize, ChunkSizeType chunkSize, - offset_t offset, PageFileSegment *segment) = 0; + const std::string& pstName, offset_t offset, + PageFileSegment *segment) = 0; + virtual void GetRemainingSpaceInLogicalPool( const std::vector& logicalPools, - std::map* remianingSpace) = 0; + std::map* remainingSpace, + const std::string& pstName) = 0; }; @@ -60,21 +64,18 @@ class ChunkSegmentAllocatorImpl: public ChunkSegmentAllocator { chunkIDGenerator_ = chunkIDGenerator; } - ~ChunkSegmentAllocatorImpl() { - topologyChunkAllocator_ = nullptr; - chunkIDGenerator_ = nullptr; - } - bool AllocateChunkSegment(FileType type, SegmentSizeType segmentSize, ChunkSizeType chunkSize, - offset_t offset, PageFileSegment *segment) override; + const std::string& pstName, offset_t offset, + PageFileSegment *segment) override; void GetRemainingSpaceInLogicalPool( - const std::vector& logicalPools, - std::map* remianingSpace) { - return topologyChunkAllocator_->GetRemainingSpaceInLogicalPool( - logicalPools, remianingSpace); - } + const std::vector& logicalPools, + std::map* remainingSpace, + const std::string& pstName) { + return topologyChunkAllocator_->GetRemainingSpaceInLogicalPool( + logicalPools, remainingSpace, pstName); + } private: std::shared_ptr topologyChunkAllocator_; diff --git a/src/mds/nameserver2/clean_core.cpp b/src/mds/nameserver2/clean_core.cpp index cf5907d5fd..54f743c300 100644 --- a/src/mds/nameserver2/clean_core.cpp +++ b/src/mds/nameserver2/clean_core.cpp @@ -172,7 +172,6 @@ StatusCode CleanCore::CleanDiscardSegment( const DiscardSegmentInfo& discardSegmentInfo, TaskProgress* progress) { const FileInfo& fileInfo = discardSegmentInfo.fileinfo(); const PageFileSegment& segment = discardSegmentInfo.pagefilesegment(); - const LogicalPoolID logicalPoolId = segment.logicalpoolid(); const SeqNum seq = fileInfo.seqnum(); LOG(INFO) << "Start CleanDiscardSegment, filename = " << fileInfo.filename() diff --git a/src/mds/nameserver2/curvefs.cpp b/src/mds/nameserver2/curvefs.cpp index a5dfef17d0..b5e1e07aef 100644 --- a/src/mds/nameserver2/curvefs.cpp +++ b/src/mds/nameserver2/curvefs.cpp @@ -36,7 +36,10 @@ #include "src/mds/nameserver2/helper/namespace_helper.h" #include "src/common/math_util.h" +#include "absl/strings/match.h" + using curve::common::TimeUtility; +using curve::common::kDefaultPoolsetName; using curve::mds::topology::LogicalPool; using curve::mds::topology::LogicalPoolIdType; using curve::mds::topology::PhysicalPool; @@ -45,6 +48,7 @@ using curve::mds::topology::CopySetIdType; using curve::mds::topology::ChunkServer; using curve::mds::topology::ChunkServerStatus; using curve::mds::topology::OnlineState; +using curve::mds::topology::Poolset; namespace curve { namespace mds { @@ -140,6 +144,7 @@ bool CurveFS::Init(std::shared_ptr storage, maxFileLength_ = curveFSOptions.maxFileLength; topology_ = topology; snapshotCloneClient_ = snapshotCloneClient; + poolsetRules_ = curveFSOptions.poolsetRules; InitRootFile(); bool ret = InitRecycleBinDir(); @@ -244,10 +249,13 @@ StatusCode CurveFS::SnapShotFile(const FileInfo * origFileInfo, } } -StatusCode CurveFS::CreateFile(const std::string & fileName, +StatusCode CurveFS::CreateFile(const std::string& fileName, + std::string poolset, const std::string& owner, - FileType filetype, uint64_t length, - uint64_t stripeUnit, uint64_t stripeCount) { + FileType filetype, + uint64_t length, + uint64_t stripeUnit, + uint64_t stripeCount) { FileInfo parentFileInfo; std::string lastEntry; @@ -260,6 +268,11 @@ StatusCode CurveFS::CreateFile(const std::string & fileName, // check param if (filetype == FileType::INODE_PAGEFILE) { + StatusCode retCode = CheckOrAssignPoolset(fileName, &poolset); + if (retCode != StatusCode::kOK) { + return retCode; + } + if (length < minFileLength_) { LOG(ERROR) << "file Length < MinFileLength " << minFileLength_ << ", length = " << length; @@ -286,8 +299,9 @@ StatusCode CurveFS::CreateFile(const std::string & fileName, topology_->GetLogicalPoolInCluster(); std::map remianingSpace; uint64_t allRemianingSpace = 0; + chunkSegAllocator_->GetRemainingSpaceInLogicalPool(logicalPools, - &remianingSpace); + &remianingSpace, poolset); for (auto it = remianingSpace.begin(); it != remianingSpace.end(); it++) { allRemianingSpace +=it->second; @@ -328,6 +342,7 @@ StatusCode CurveFS::CreateFile(const std::string & fileName, fileInfo.set_id(inodeID); fileInfo.set_filename(lastEntry); + fileInfo.set_poolset(poolset); fileInfo.set_parentid(parentFileInfo.id()); fileInfo.set_filetype(filetype); fileInfo.set_owner(owner); @@ -453,6 +468,7 @@ StatusCode CurveFS::GetAllocatedSize(const std::string& fileName, StatusCode CurveFS::GetFileAllocSize(const std::string& fileName, const FileInfo& fileInfo, AllocatedSize* allocSize) { + (void)fileName; std::vector segments; auto listSegmentRet = storage_->ListSegment(fileInfo.id(), &segments); @@ -470,6 +486,7 @@ StatusCode CurveFS::GetFileAllocSize(const std::string& fileName, StatusCode CurveFS::GetDirAllocSize(const std::string& fileName, const FileInfo& fileInfo, AllocatedSize* allocSize) { + (void)fileInfo; std::vector files; StatusCode ret = ReadDir(fileName, &files); if (ret != StatusCode::kOK) { @@ -771,6 +788,28 @@ StatusCode CurveFS::DeleteFile(const std::string & filename, uint64_t fileId, } } +StatusCode CurveFS::CheckOrAssignPoolset(const std::string& filename, + std::string* poolset) const { + const auto names = topology_->GetPoolsetNameInCluster(); + if (names.empty()) { + LOG(WARNING) << "Cluster doesn't have poolsets"; + return StatusCode::kPoolsetNotExist; + } + + if (poolset->empty()) { + *poolset = SelectPoolsetByRules(filename, poolsetRules_); + LOG(INFO) << "Poolset is empty, set to: " << *poolset; + } + + auto it = std::find(names.begin(), names.end(), *poolset); + if (it == names.end()) { + LOG(WARNING) << "Poolset `" << *poolset << "` not found"; + return StatusCode::kPoolsetNotExist; + } + + return StatusCode::kOK; +} + StatusCode CurveFS::RecoverFile(const std::string & originFileName, const std::string & recycleFileName, uint64_t fileId) { @@ -1253,8 +1292,11 @@ StatusCode CurveFS::GetOrAllocateSegment(const std::string & filename, } else { // TODO(hzsunjianliang): check the user and define the logical pool auto ifok = chunkSegAllocator_->AllocateChunkSegment( - fileInfo.filetype(), fileInfo.segmentsize(), - fileInfo.chunksize(), offset, segment); + fileInfo.filetype(), fileInfo.segmentsize(), + fileInfo.chunksize(), + fileInfo.has_poolset() ? fileInfo.poolset() + : kDefaultPoolsetName, + offset, segment); if (ifok == false) { LOG(ERROR) << "AllocateChunkSegment error"; return StatusCode::kSegmentAllocateError; @@ -1785,6 +1827,7 @@ StatusCode CurveFS::CreateCloneFile(const std::string &fileName, ChunkSizeType chunksize, uint64_t stripeUnit, uint64_t stripeCount, + std::string poolset, FileInfo *retFileInfo, const std::string & cloneSource, uint64_t cloneLength) { @@ -1808,6 +1851,11 @@ StatusCode CurveFS::CreateCloneFile(const std::string &fileName, return ret; } + ret = CheckOrAssignPoolset(fileName, &poolset); + if (ret != StatusCode::kOK) { + return ret; + } + // check the existence of the file FileInfo parentFileInfo; std::string lastEntry; @@ -1855,6 +1903,7 @@ StatusCode CurveFS::CreateCloneFile(const std::string &fileName, fileInfo.set_filestatus(FileStatus::kFileCloning); fileInfo.set_stripeunit(stripeUnit); fileInfo.set_stripecount(stripeCount); + fileInfo.set_poolset(poolset); fileInfo.set_allocated_throttleparams( new FileThrottleParams(GenerateDefaultThrottleParams(length))); @@ -1955,6 +2004,7 @@ StatusCode CurveFS::CheckPathOwnerInternal(const std::string &filename, const std::string &signature, std::string *lastEntry, uint64_t *parentID) { + (void)signature; std::vector paths; ::curve::common::SplitString(filename, "/", &paths); @@ -2247,6 +2297,7 @@ bool CurveFS::CheckSignature(const std::string& owner, StatusCode CurveFS::ListClient(bool listAllClient, std::vector* clientInfos) { + (void)listAllClient; std::set allClients = fileRecordManager_->ListAllClient(); for (const auto &c : allClients) { @@ -2650,5 +2701,29 @@ uint64_t GetOpenFileNum(void *varg) { bvar::PassiveStatus g_open_file_num_bvar( CURVE_MDS_CURVEFS_METRIC_PREFIX, "open_file_num", GetOpenFileNum, &kCurveFS); + +std::string SelectPoolsetByRules( + const std::string& filename, + const std::map& rules) { + if (rules.empty()) { + return kDefaultPoolsetName; + } + + // using reverse order, so that we support subdir rules + // + // for example + // /A/ -> poolset1 + // /A/B/ -> poolset2 + // + // if filename is /A/B/C, then we select `poolset2` + for (auto it = rules.rbegin(); it != rules.rend(); ++it) { + if (absl::StartsWith(filename, it->first)) { + return it->second; + } + } + + return kDefaultPoolsetName; +} + } // namespace mds } // namespace curve diff --git a/src/mds/nameserver2/curvefs.h b/src/mds/nameserver2/curvefs.h index 3297827381..bde5788412 100644 --- a/src/mds/nameserver2/curvefs.h +++ b/src/mds/nameserver2/curvefs.h @@ -30,6 +30,7 @@ #include //NOLINT #include //NOLINT #include +#include #include "proto/nameserver2.pb.h" #include "src/mds/nameserver2/namespace_storage.h" #include "src/mds/common/mds_define.h" @@ -73,6 +74,7 @@ struct CurveFSOption { RootAuthOption authOptions; FileRecordOptions fileRecordOptions; ThrottleOption throttleOption; + std::map poolsetRules; }; struct AllocatedSize { @@ -141,6 +143,7 @@ class CurveFS { * @return return StatusCode::kOK if succeeded */ StatusCode CreateFile(const std::string & fileName, + std::string poolset, const std::string& owner, FileType filetype, uint64_t length, @@ -214,6 +217,14 @@ class CurveFS { const std::string & recycleFileName, uint64_t fileId); + /** + * @brief check or assign poolset name + * @param[in|out] poolset: poolset name + * @return StatusCode::kOK if success + */ + StatusCode CheckOrAssignPoolset(const std::string& filename, + std::string* poolset) const; + /** * @brief increase file epoch * @@ -441,6 +452,7 @@ class CurveFS { ChunkSizeType chunksize, uint64_t stripeUnit, uint64_t stripeCount, + std::string poolset, FileInfo *fileInfo, const std::string & cloneSource = "", uint64_t cloneLength = 0); @@ -818,6 +830,8 @@ class CurveFS { uint64_t minFileLength_; uint64_t maxFileLength_; std::chrono::steady_clock::time_point startTime_; + + std::map poolsetRules_; }; extern CurveFS &kCurveFS; @@ -840,6 +854,10 @@ StatusCode CheckStripeParam(uint64_t segmentSize, uint64_t stripeUnit, uint64_t stripeCount); +std::string SelectPoolsetByRules( + const std::string& filename, + const std::map& rules); + } // namespace mds } // namespace curve #endif // SRC_MDS_NAMESERVER2_CURVEFS_H_ diff --git a/src/mds/nameserver2/namespace_service.cpp b/src/mds/nameserver2/namespace_service.cpp index 0c64f34f33..980fd0b036 100644 --- a/src/mds/nameserver2/namespace_service.cpp +++ b/src/mds/nameserver2/namespace_service.cpp @@ -55,9 +55,7 @@ void NameSpaceService::CreateFile(::google::protobuf::RpcController* controller, } LOG(INFO) << "logid = " << cntl->log_id() - << ", CreateFile request, filename = " << request->filename() - << ", filetype = " << request->filetype() - << ", filelength = " << request->filelength(); + << ", CreateFile request: " << request->ShortDebugString(); FileWriteLockGuard guard(fileLockManager_, request->filename()); @@ -85,9 +83,9 @@ void NameSpaceService::CreateFile(::google::protobuf::RpcController* controller, return; } - retCode = kCurveFS.CreateFile(request->filename(), request->owner(), - request->filetype(), request->filelength(), request->stripeunit(), - request->stripecount()); + retCode = kCurveFS.CreateFile(request->filename(), request->poolset(), + request->owner(), request->filetype(), request->filelength(), + request->stripeunit(), request->stripecount()); if (retCode != StatusCode::kOK) { response->set_statuscode(retCode); // TODO(hzsunjianliang): check if we should really print error here @@ -1848,6 +1846,7 @@ void NameSpaceService::CreateCloneFile( request->chunksize(), request->stripeunit(), request->stripecount(), + request->poolset(), response->mutable_fileinfo(), request->clonesource(), request->filelength()); diff --git a/src/mds/nameserver2/namespace_storage.cpp b/src/mds/nameserver2/namespace_storage.cpp index 9191178884..6546869f69 100644 --- a/src/mds/nameserver2/namespace_storage.cpp +++ b/src/mds/nameserver2/namespace_storage.cpp @@ -26,43 +26,40 @@ #include "src/mds/nameserver2/helper/namespace_helper.h" #include "src/common/namespace_define.h" -using ::curve::common::SNAPSHOTFILEINFOKEYPREFIX; -using ::curve::common::SNAPSHOTFILEINFOKEYEND; -using ::curve::common::DISCARDSEGMENTKEYPREFIX; using ::curve::common::DISCARDSEGMENTKEYEND; +using ::curve::common::DISCARDSEGMENTKEYPREFIX; +using ::curve::common::SNAPSHOTFILEINFOKEYEND; +using ::curve::common::SNAPSHOTFILEINFOKEYPREFIX; namespace curve { namespace mds { -std::ostream& operator << (std::ostream & os, StoreStatus &s) { +std::ostream &operator<<(std::ostream &os, StoreStatus &s) { os << static_cast::type>(s); return os; } NameServerStorageImp::NameServerStorageImp( std::shared_ptr client, std::shared_ptr cache) - : client_(client), cache_(cache), discardMetric_() {} + : cache_(cache), client_(client), discardMetric_() {} StoreStatus NameServerStorageImp::PutFile(const FileInfo &fileInfo) { std::string storeKey; - if (GetStoreKey(fileInfo.filetype(), - fileInfo.parentid(), - fileInfo.filename(), - &storeKey) - != StoreStatus::OK) { + if (GetStoreKey(fileInfo.filetype(), fileInfo.parentid(), + fileInfo.filename(), &storeKey) != StoreStatus::OK) { LOG(ERROR) << "get store key failed,filename = " << fileInfo.filename(); return StoreStatus::InternalError; } std::string encodeFileInfo; if (!NameSpaceStorageCodec::EncodeFileInfo(fileInfo, &encodeFileInfo)) { - LOG(ERROR) << "encode file: " << fileInfo.filename()<< "err"; + LOG(ERROR) << "encode file: " << fileInfo.filename() << "err"; return StoreStatus::InternalError; } int errCode = client_->Put(storeKey, encodeFileInfo); if (errCode != EtcdErrCode::EtcdOK) { - LOG(ERROR) << "put file: [" << fileInfo.filename() << "] err: " - << errCode; + LOG(ERROR) << "put file: [" << fileInfo.filename() + << "] err: " << errCode; } else { // update to cache cache_->Put(storeKey, encodeFileInfo); @@ -75,8 +72,8 @@ StoreStatus NameServerStorageImp::GetFile(InodeID parentid, const std::string &filename, FileInfo *fileInfo) { std::string storeKey; - if (GetStoreKey(FileType::INODE_PAGEFILE, parentid, filename, &storeKey) - != StoreStatus::OK) { + if (GetStoreKey(FileType::INODE_PAGEFILE, parentid, filename, &storeKey) != + StoreStatus::OK) { LOG(ERROR) << "get store key failed, filename = " << filename; return StoreStatus::InternalError; } @@ -112,10 +109,10 @@ StoreStatus NameServerStorageImp::GetFile(InodeID parentid, } StoreStatus NameServerStorageImp::DeleteFile(InodeID id, - const std::string &filename) { + const std::string &filename) { std::string storeKey; - if (GetStoreKey(FileType::INODE_PAGEFILE, id, filename, &storeKey) - != StoreStatus::OK) { + if (GetStoreKey(FileType::INODE_PAGEFILE, id, filename, &storeKey) != + StoreStatus::OK) { LOG(ERROR) << "get store key failed,filename = " << filename; return StoreStatus::InternalError; } @@ -131,11 +128,12 @@ StoreStatus NameServerStorageImp::DeleteFile(InodeID id, return getErrorCode(resCode); } -StoreStatus NameServerStorageImp::DeleteSnapshotFile(InodeID id, - const std::string &filename) { +StoreStatus +NameServerStorageImp::DeleteSnapshotFile(InodeID id, + const std::string &filename) { std::string storeKey; - if (GetStoreKey(FileType::INODE_SNAPSHOT_PAGEFILE, - id, filename, &storeKey) != StoreStatus::OK) { + if (GetStoreKey(FileType::INODE_SNAPSHOT_PAGEFILE, id, filename, + &storeKey) != StoreStatus::OK) { LOG(ERROR) << "get store key failed, filename = " << filename; return StoreStatus::InternalError; } @@ -152,12 +150,10 @@ StoreStatus NameServerStorageImp::DeleteSnapshotFile(InodeID id, } StoreStatus NameServerStorageImp::RenameFile(const FileInfo &oldFInfo, - const FileInfo &newFInfo) { + const FileInfo &newFInfo) { std::string oldStoreKey; - auto res = GetStoreKey(FileType::INODE_PAGEFILE, - oldFInfo.parentid(), - oldFInfo.filename(), - &oldStoreKey); + auto res = GetStoreKey(FileType::INODE_PAGEFILE, oldFInfo.parentid(), + oldFInfo.filename(), &oldStoreKey); if (res != StoreStatus::OK) { LOG(ERROR) << "get store key failed, filename = " << oldFInfo.filename(); @@ -165,10 +161,8 @@ StoreStatus NameServerStorageImp::RenameFile(const FileInfo &oldFInfo, } std::string newStoreKey; - res = GetStoreKey(FileType::INODE_PAGEFILE, - newFInfo.parentid(), - newFInfo.filename(), - &newStoreKey); + res = GetStoreKey(FileType::INODE_PAGEFILE, newFInfo.parentid(), + newFInfo.filename(), &newStoreKey); if (res != StoreStatus::OK) { LOG(ERROR) << "get store key failed, filename = " << newFInfo.filename(); @@ -179,33 +173,29 @@ StoreStatus NameServerStorageImp::RenameFile(const FileInfo &oldFInfo, std::string encodeNewFileInfo; if (!NameSpaceStorageCodec::EncodeFileInfo(oldFInfo, &encodeOldFileInfo) || !NameSpaceStorageCodec::EncodeFileInfo(newFInfo, &encodeNewFileInfo)) { - LOG(ERROR) << "encode oldfile inodeid : " << oldFInfo.id() - << ", oldfile: " << oldFInfo.filename() - << " or newfile inodeid : " << newFInfo.id() - << ", newfile: " << newFInfo.filename() << "err"; - return StoreStatus::InternalError; + LOG(ERROR) << "encode oldfile inodeid : " << oldFInfo.id() + << ", oldfile: " << oldFInfo.filename() + << " or newfile inodeid : " << newFInfo.id() + << ", newfile: " << newFInfo.filename() << "err"; + return StoreStatus::InternalError; } // delete the data in the cache first cache_->Remove(oldStoreKey); // update Etcd - Operation op1{ - OpType::OpDelete, - const_cast(oldStoreKey.c_str()), "", - oldStoreKey.size(), 0}; - Operation op2{ - OpType::OpPut, - const_cast(newStoreKey.c_str()), - const_cast(encodeNewFileInfo.c_str()), - newStoreKey.size(), encodeNewFileInfo.size()}; + Operation op1{OpType::OpDelete, const_cast(oldStoreKey.c_str()), "", + static_cast(oldStoreKey.size()), 0}; + Operation op2{OpType::OpPut, const_cast(newStoreKey.c_str()), + const_cast(encodeNewFileInfo.c_str()), + static_cast(newStoreKey.size()), + static_cast(encodeNewFileInfo.size())}; std::vector ops{op1, op2}; int errCode = client_->TxnN(ops); if (errCode != EtcdErrCode::EtcdOK) { LOG(ERROR) << "rename file from [" << oldFInfo.id() << ", " - << oldFInfo.filename() << "] to [" << newFInfo.id() - << ", " << newFInfo.filename() << "] err: " - << errCode; + << oldFInfo.filename() << "] to [" << newFInfo.id() << ", " + << newFInfo.filename() << "] err: " << errCode; } else { // update to cache at last cache_->Put(newStoreKey, encodeNewFileInfo); @@ -214,13 +204,11 @@ StoreStatus NameServerStorageImp::RenameFile(const FileInfo &oldFInfo, } StoreStatus NameServerStorageImp::ReplaceFileAndRecycleOldFile( - const FileInfo &oldFInfo, - const FileInfo &newFInfo, - const FileInfo &conflictFInfo, - const FileInfo &recycleFInfo) { + const FileInfo &oldFInfo, const FileInfo &newFInfo, + const FileInfo &conflictFInfo, const FileInfo &recycleFInfo) { std::string oldStoreKey, newStoreKey, conflictStoreKey, recycleStoreKey; auto res = GetStoreKey(oldFInfo.filetype(), oldFInfo.parentid(), - oldFInfo.filename(), &oldStoreKey); + oldFInfo.filename(), &oldStoreKey); if (res != StoreStatus::OK) { LOG(ERROR) << "get store key failed, filename = " << oldFInfo.filename(); @@ -228,7 +216,7 @@ StoreStatus NameServerStorageImp::ReplaceFileAndRecycleOldFile( } res = GetStoreKey(newFInfo.filetype(), newFInfo.parentid(), - newFInfo.filename(), &newStoreKey); + newFInfo.filename(), &newStoreKey); if (res != StoreStatus::OK) { LOG(ERROR) << "get store key failed, filename = " << newFInfo.filename(); @@ -236,7 +224,7 @@ StoreStatus NameServerStorageImp::ReplaceFileAndRecycleOldFile( } res = GetStoreKey(conflictFInfo.filetype(), conflictFInfo.parentid(), - conflictFInfo.filename(), &conflictStoreKey); + conflictFInfo.filename(), &conflictStoreKey); if (res != StoreStatus::OK) { LOG(ERROR) << "get store key failed, filename = " << conflictFInfo.filename(); @@ -249,7 +237,7 @@ StoreStatus NameServerStorageImp::ReplaceFileAndRecycleOldFile( } res = GetStoreKey(recycleFInfo.filetype(), recycleFInfo.parentid(), - recycleFInfo.filename(), &recycleStoreKey); + recycleFInfo.filename(), &recycleStoreKey); if (res != StoreStatus::OK) { LOG(ERROR) << "get store key failed, filename = " << recycleFInfo.filename(); @@ -258,16 +246,15 @@ StoreStatus NameServerStorageImp::ReplaceFileAndRecycleOldFile( std::string encodeRecycleFInfo; std::string encodeNewFInfo; - if (!NameSpaceStorageCodec::EncodeFileInfo( - recycleFInfo, &encodeRecycleFInfo)) { + if (!NameSpaceStorageCodec::EncodeFileInfo(recycleFInfo, + &encodeRecycleFInfo)) { LOG(ERROR) << "encode recycle file: " << recycleFInfo.filename() - << " err"; + << " err"; return StoreStatus::InternalError; } if (!NameSpaceStorageCodec::EncodeFileInfo(newFInfo, &encodeNewFInfo)) { - LOG(ERROR) << "encode recycle file: " << newFInfo.filename() - << " err"; + LOG(ERROR) << "encode recycle file: " << newFInfo.filename() << " err"; return StoreStatus::InternalError; } @@ -276,27 +263,22 @@ StoreStatus NameServerStorageImp::ReplaceFileAndRecycleOldFile( cache_->Remove(oldStoreKey); // put recycleFInfo; delete oldFInfo; put newFInfo - Operation op1{ - OpType::OpPut, - const_cast(recycleStoreKey.c_str()), - const_cast(encodeRecycleFInfo.c_str()), - recycleStoreKey.size(), encodeRecycleFInfo.size()}; - Operation op2{ - OpType::OpDelete, - const_cast(oldStoreKey.c_str()), "", - oldStoreKey.size(), 0}; - Operation op3{ - OpType::OpPut, - const_cast(newStoreKey.c_str()), - const_cast(encodeNewFInfo.c_str()), - newStoreKey.size(), encodeNewFInfo.size()}; + Operation op1{OpType::OpPut, const_cast(recycleStoreKey.c_str()), + const_cast(encodeRecycleFInfo.c_str()), + static_cast(recycleStoreKey.size()), + static_cast(encodeRecycleFInfo.size())}; + Operation op2{OpType::OpDelete, const_cast(oldStoreKey.c_str()), "", + static_cast(oldStoreKey.size()), 0}; + Operation op3{OpType::OpPut, const_cast(newStoreKey.c_str()), + const_cast(encodeNewFInfo.c_str()), + static_cast(newStoreKey.size()), + static_cast(encodeNewFInfo.size())}; std::vector ops{op1, op2, op3}; int errCode = client_->TxnN(ops); if (errCode != EtcdErrCode::EtcdOK) { - LOG(ERROR) << "rename file from [" << oldFInfo.filename() - << "] to [" << newFInfo.filename() << "] err: " - << errCode; + LOG(ERROR) << "rename file from [" << oldFInfo.filename() << "] to [" + << newFInfo.filename() << "] err: " << errCode; } else { // update to cache cache_->Put(recycleStoreKey, encodeRecycleFInfo); @@ -305,11 +287,12 @@ StoreStatus NameServerStorageImp::ReplaceFileAndRecycleOldFile( return getErrorCode(errCode); } -StoreStatus NameServerStorageImp::MoveFileToRecycle( - const FileInfo &originFileInfo, const FileInfo &recycleFileInfo) { +StoreStatus +NameServerStorageImp::MoveFileToRecycle(const FileInfo &originFileInfo, + const FileInfo &recycleFileInfo) { std::string originFileInfoKey; auto res = GetStoreKey(originFileInfo.filetype(), originFileInfo.parentid(), - originFileInfo.filename(), &originFileInfoKey); + originFileInfo.filename(), &originFileInfoKey); if (res != StoreStatus::OK) { LOG(ERROR) << "get store key failed, filename = " << originFileInfo.filename(); @@ -318,7 +301,7 @@ StoreStatus NameServerStorageImp::MoveFileToRecycle( std::string recycleFileInfoKey; res = GetStoreKey(recycleFileInfo.filetype(), recycleFileInfo.parentid(), - recycleFileInfo.filename(), &recycleFileInfoKey); + recycleFileInfo.filename(), &recycleFileInfoKey); if (res != StoreStatus::OK) { LOG(ERROR) << "get store key failed, filename = " << recycleFileInfo.filename(); @@ -326,10 +309,10 @@ StoreStatus NameServerStorageImp::MoveFileToRecycle( } std::string encodeRecycleFInfo; - if (!NameSpaceStorageCodec::EncodeFileInfo( - recycleFileInfo, &encodeRecycleFInfo)) { + if (!NameSpaceStorageCodec::EncodeFileInfo(recycleFileInfo, + &encodeRecycleFInfo)) { LOG(ERROR) << "encode recycle file: " << recycleFileInfo.filename() - << " err"; + << " err"; return StoreStatus::InternalError; } @@ -337,23 +320,20 @@ StoreStatus NameServerStorageImp::MoveFileToRecycle( cache_->Remove(originFileInfoKey); // remove originFileInfo from Etcd, and put recycleFileInfo - Operation op1{ - OpType::OpDelete, - const_cast(originFileInfoKey.c_str()), "", - originFileInfoKey.size(), 0}; - Operation op2{ - OpType::OpPut, - const_cast(recycleFileInfoKey.c_str()), - const_cast(encodeRecycleFInfo.c_str()), - recycleFileInfoKey.size(), encodeRecycleFInfo.size()}; + Operation op1{OpType::OpDelete, + const_cast(originFileInfoKey.c_str()), "", + static_cast(originFileInfoKey.size()), 0}; + Operation op2{OpType::OpPut, const_cast(recycleFileInfoKey.c_str()), + const_cast(encodeRecycleFInfo.c_str()), + static_cast(recycleFileInfoKey.size()), + static_cast(encodeRecycleFInfo.size())}; std::vector ops{op1, op2}; int errCode = client_->TxnN(ops); if (errCode != EtcdErrCode::EtcdOK) { LOG(ERROR) << "move file [" << originFileInfo.filename() - << "] to recycle file [" - << recycleFileInfo.filename() << "] err: " - << errCode; + << "] to recycle file [" << recycleFileInfo.filename() + << "] err: " << errCode; } else { // update to cache cache_->Put(recycleFileInfoKey, encodeRecycleFInfo); @@ -361,8 +341,7 @@ StoreStatus NameServerStorageImp::MoveFileToRecycle( return getErrorCode(errCode); } -StoreStatus NameServerStorageImp::ListFile(InodeID startid, - InodeID endid, +StoreStatus NameServerStorageImp::ListFile(InodeID startid, InodeID endid, std::vector *files) { std::string startStoreKey; auto res = @@ -382,26 +361,25 @@ StoreStatus NameServerStorageImp::ListFile(InodeID startid, return ListFileInternal(startStoreKey, endStoreKey, files); } -StoreStatus NameServerStorageImp::ListSegment(InodeID id, - std::vector *segments) { +StoreStatus +NameServerStorageImp::ListSegment(InodeID id, + std::vector *segments) { std::string startStoreKey = - NameSpaceStorageCodec::EncodeSegmentStoreKey(id, 0); + NameSpaceStorageCodec::EncodeSegmentStoreKey(id, 0); std::string endStoreKey = - NameSpaceStorageCodec::EncodeSegmentStoreKey(id + 1, 0); + NameSpaceStorageCodec::EncodeSegmentStoreKey(id + 1, 0); std::vector out; - int errCode = client_->List( - startStoreKey, endStoreKey, &out); + int errCode = client_->List(startStoreKey, endStoreKey, &out); if (errCode != EtcdErrCode::EtcdOK) { LOG(ERROR) << "list segment err:" << errCode; return getErrorCode(errCode); } - for (int i = 0; i < out.size(); i++) { + for (size_t i = 0; i < out.size(); i++) { PageFileSegment segment; - bool decodeOK = NameSpaceStorageCodec::DecodeSegment(out[i], - &segment); + bool decodeOK = NameSpaceStorageCodec::DecodeSegment(out[i], &segment); if (decodeOK) { segments->emplace_back(segment); } else { @@ -412,20 +390,20 @@ StoreStatus NameServerStorageImp::ListSegment(InodeID id, return StoreStatus::OK; } -StoreStatus NameServerStorageImp::ListSnapshotFile(InodeID startid, - InodeID endid, - std::vector *files) { +StoreStatus +NameServerStorageImp::ListSnapshotFile(InodeID startid, InodeID endid, + std::vector *files) { std::string startStoreKey; - auto res = GetStoreKey(FileType::INODE_SNAPSHOT_PAGEFILE, - startid, "", &startStoreKey); + auto res = GetStoreKey(FileType::INODE_SNAPSHOT_PAGEFILE, startid, "", + &startStoreKey); if (res != StoreStatus::OK) { LOG(ERROR) << "get store key failed, id = " << startid; return StoreStatus::InternalError; } std::string endStoreKey; - res = GetStoreKey(FileType::INODE_SNAPSHOT_PAGEFILE, - endid, "", &endStoreKey); + res = + GetStoreKey(FileType::INODE_SNAPSHOT_PAGEFILE, endid, "", &endStoreKey); if (res != StoreStatus::OK) { LOG(ERROR) << "get store key failed, id = " << endid; return StoreStatus::InternalError; @@ -434,23 +412,22 @@ StoreStatus NameServerStorageImp::ListSnapshotFile(InodeID startid, return ListFileInternal(startStoreKey, endStoreKey, files); } -StoreStatus NameServerStorageImp::ListFileInternal( - const std::string& startStoreKey, - const std::string& endStoreKey, - std::vector *files) { +StoreStatus +NameServerStorageImp::ListFileInternal(const std::string &startStoreKey, + const std::string &endStoreKey, + std::vector *files) { std::vector out; - int errCode = client_->List( - startStoreKey, endStoreKey, &out); + int errCode = client_->List(startStoreKey, endStoreKey, &out); if (errCode != EtcdErrCode::EtcdOK) { LOG(ERROR) << "list file err:" << errCode; return getErrorCode(errCode); } - for (int i = 0; i < out.size(); i++) { + for (size_t i = 0; i < out.size(); i++) { FileInfo fileInfo; - bool decodeOK = NameSpaceStorageCodec::DecodeFileInfo(out[i], - &fileInfo); + bool decodeOK = + NameSpaceStorageCodec::DecodeFileInfo(out[i], &fileInfo); if (decodeOK) { files->emplace_back(fileInfo); } else { @@ -461,8 +438,7 @@ StoreStatus NameServerStorageImp::ListFileInternal( return StoreStatus::OK; } -StoreStatus NameServerStorageImp::PutSegment(InodeID id, - uint64_t off, +StoreStatus NameServerStorageImp::PutSegment(InodeID id, uint64_t off, const PageFileSegment *segment, int64_t *revision) { std::string storeKey = @@ -482,8 +458,7 @@ StoreStatus NameServerStorageImp::PutSegment(InodeID id, return getErrorCode(errCode); } -StoreStatus NameServerStorageImp::GetSegment(InodeID id, - uint64_t off, +StoreStatus NameServerStorageImp::GetSegment(InodeID id, uint64_t off, PageFileSegment *segment) { std::string storeKey = NameSpaceStorageCodec::EncodeSegmentStoreKey(id, off); @@ -498,21 +473,21 @@ StoreStatus NameServerStorageImp::GetSegment(InodeID id, if (decodeOK) { return StoreStatus::OK; } else { - LOG(ERROR) << "decode segment inodeid: " << id - << ", off: " << off <<" err"; + LOG(ERROR) << "decode segment inodeid: " << id << ", off: " << off + << " err"; return StoreStatus::InternalError; } } else if (errCode == EtcdErrCode::EtcdKeyNotExist) { LOG(INFO) << "segment not exist. inodeid: " << id << ", off: " << off; } else { - LOG(ERROR) << "get segment inodeid: " << id - << ", off: " << off << " err: " << errCode; + LOG(ERROR) << "get segment inodeid: " << id << ", off: " << off + << " err: " << errCode; } return getErrorCode(errCode); } -StoreStatus NameServerStorageImp::DeleteSegment( - InodeID id, uint64_t off, int64_t *revision) { +StoreStatus NameServerStorageImp::DeleteSegment(InodeID id, uint64_t off, + int64_t *revision) { std::string storeKey = NameSpaceStorageCodec::EncodeSegmentStoreKey(id, off); int errCode = client_->DeleteRewithRevision(storeKey, revision); @@ -520,14 +495,14 @@ StoreStatus NameServerStorageImp::DeleteSegment( // update the cache first, then update Etcd cache_->Remove(storeKey); if (errCode != EtcdErrCode::EtcdOK) { - LOG(ERROR) << "delete segment of inodeid: " << id - << "off: " << off << ", err:" << errCode; + LOG(ERROR) << "delete segment of inodeid: " << id << "off: " << off + << ", err:" << errCode; } return getErrorCode(errCode); } StoreStatus NameServerStorageImp::ListDiscardSegment( - std::map* discardSegments) { + std::map *discardSegments) { assert(discardSegments != nullptr); std::vector> out; @@ -538,7 +513,7 @@ StoreStatus NameServerStorageImp::ListDiscardSegment( return StoreStatus::InternalError; } - for (const auto& kv : out) { + for (const auto &kv : out) { DiscardSegmentInfo info; if (!NameSpaceStorageCodec::DecodeDiscardSegment(kv.second, &info)) { LOG(ERROR) << "Decode DiscardSegment failed"; @@ -551,8 +526,9 @@ StoreStatus NameServerStorageImp::ListDiscardSegment( return StoreStatus::OK; } -StoreStatus NameServerStorageImp::DiscardSegment( - const FileInfo& fileInfo, const PageFileSegment& segment) { +StoreStatus +NameServerStorageImp::DiscardSegment(const FileInfo &fileInfo, + const PageFileSegment &segment) { const uint64_t inodeId = fileInfo.id(); const uint64_t offset = segment.startoffset(); const std::string segmentKey = @@ -574,16 +550,14 @@ StoreStatus NameServerStorageImp::DiscardSegment( return StoreStatus::InternalError; } - Operation op1{ - OpType::OpDelete, - const_cast(segmentKey.c_str()), - const_cast(encodeSegment.c_str()), - segmentKey.size(), encodeSegment.size()}; - Operation op2{ - OpType::OpPut, - const_cast(cleanSegmentKey.c_str()), - const_cast(encodeDiscardSegment.c_str()), - cleanSegmentKey.size(), encodeDiscardSegment.size()}; + Operation op1{OpType::OpDelete, const_cast(segmentKey.c_str()), + const_cast(encodeSegment.c_str()), + static_cast(segmentKey.size()), + static_cast(encodeSegment.size())}; + Operation op2{OpType::OpPut, const_cast(cleanSegmentKey.c_str()), + const_cast(encodeDiscardSegment.c_str()), + static_cast(cleanSegmentKey.size()), + static_cast(encodeDiscardSegment.size())}; std::vector ops{op1, op2}; auto errCode = client_->TxnN(ops); @@ -600,8 +574,8 @@ StoreStatus NameServerStorageImp::DiscardSegment( } StoreStatus NameServerStorageImp::CleanDiscardSegment(uint64_t segmentSize, - const std::string& key, - int64_t* revision) { + const std::string &key, + int64_t *revision) { int errCode = client_->DeleteRewithRevision(key, revision); if (errCode != EtcdErrCode::EtcdOK) { LOG(ERROR) << "CleanDiscardSegment failed, key = " << key @@ -614,12 +588,10 @@ StoreStatus NameServerStorageImp::CleanDiscardSegment(uint64_t segmentSize, } StoreStatus NameServerStorageImp::SnapShotFile(const FileInfo *originFInfo, - const FileInfo *snapshotFInfo) { + const FileInfo *snapshotFInfo) { std::string originFileKey; - auto res = GetStoreKey(originFInfo->filetype(), - originFInfo->parentid(), - originFInfo->filename(), - &originFileKey); + auto res = GetStoreKey(originFInfo->filetype(), originFInfo->parentid(), + originFInfo->filename(), &originFileKey); if (res != StoreStatus::OK) { LOG(ERROR) << "get store key failed, filename = " << originFInfo->filename(); @@ -627,10 +599,8 @@ StoreStatus NameServerStorageImp::SnapShotFile(const FileInfo *originFInfo, } std::string snapshotFileKey; - res = GetStoreKey(snapshotFInfo->filetype(), - snapshotFInfo->parentid(), - snapshotFInfo->filename(), - &snapshotFileKey); + res = GetStoreKey(snapshotFInfo->filetype(), snapshotFInfo->parentid(), + snapshotFInfo->filename(), &snapshotFileKey); if (res != StoreStatus::OK) { LOG(ERROR) << "get store key failed, filename = " << snapshotFInfo->filename(); @@ -640,7 +610,8 @@ StoreStatus NameServerStorageImp::SnapShotFile(const FileInfo *originFInfo, std::string encodeFileInfo; std::string encodeSnapshot; if (!NameSpaceStorageCodec::EncodeFileInfo(*originFInfo, &encodeFileInfo) || - !NameSpaceStorageCodec::EncodeFileInfo(*snapshotFInfo, &encodeSnapshot)) { + !NameSpaceStorageCodec::EncodeFileInfo(*snapshotFInfo, + &encodeSnapshot)) { LOG(ERROR) << "encode originfile inodeid: " << originFInfo->id() << ", originfile: " << originFInfo->filename() << " or snapshotfile inodeid: " << snapshotFInfo->id() @@ -652,16 +623,14 @@ StoreStatus NameServerStorageImp::SnapShotFile(const FileInfo *originFInfo, cache_->Remove(originFileKey); // then update Etcd - Operation op1{ - OpType::OpPut, - const_cast(originFileKey.c_str()), - const_cast(encodeFileInfo.c_str()), - originFileKey.size(), encodeFileInfo.size()}; - Operation op2{ - OpType::OpPut, - const_cast(snapshotFileKey.c_str()), - const_cast(encodeSnapshot.c_str()), - snapshotFileKey.size(), encodeSnapshot.size()}; + Operation op1{OpType::OpPut, const_cast(originFileKey.c_str()), + const_cast(encodeFileInfo.c_str()), + static_cast(originFileKey.size()), + static_cast(encodeFileInfo.size())}; + Operation op2{OpType::OpPut, const_cast(snapshotFileKey.c_str()), + const_cast(encodeSnapshot.c_str()), + static_cast(snapshotFileKey.size()), + static_cast(encodeSnapshot.size())}; std::vector ops{op1, op2}; int errCode = client_->TxnN(ops); @@ -678,63 +647,62 @@ StoreStatus NameServerStorageImp::SnapShotFile(const FileInfo *originFInfo, return getErrorCode(errCode); } -StoreStatus NameServerStorageImp::LoadSnapShotFile( - std::vector *snapshotFiles) { - return ListFileInternal(SNAPSHOTFILEINFOKEYPREFIX, - SNAPSHOTFILEINFOKEYEND, snapshotFiles); +StoreStatus +NameServerStorageImp::LoadSnapShotFile(std::vector *snapshotFiles) { + return ListFileInternal(SNAPSHOTFILEINFOKEYPREFIX, SNAPSHOTFILEINFOKEYEND, + snapshotFiles); } StoreStatus NameServerStorageImp::getErrorCode(int errCode) { switch (errCode) { - case EtcdErrCode::EtcdOK: - return StoreStatus::OK; - - case EtcdErrCode::EtcdKeyNotExist: - return StoreStatus::KeyNotExist; - - case EtcdErrCode::EtcdUnknown: - case EtcdErrCode::EtcdInvalidArgument: - case EtcdErrCode::EtcdAlreadyExists: - case EtcdErrCode::EtcdPermissionDenied: - case EtcdErrCode::EtcdOutOfRange: - case EtcdErrCode::EtcdUnimplemented: - case EtcdErrCode::EtcdInternal: - case EtcdErrCode::EtcdNotFound: - case EtcdErrCode::EtcdDataLoss: - case EtcdErrCode::EtcdUnauthenticated: - case EtcdErrCode::EtcdCanceled: - case EtcdErrCode::EtcdDeadlineExceeded: - case EtcdErrCode::EtcdResourceExhausted: - case EtcdErrCode::EtcdFailedPrecondition: - case EtcdErrCode::EtcdAborted: - case EtcdErrCode::EtcdUnavailable: - case EtcdErrCode::EtcdTxnUnkownOp: - case EtcdErrCode::EtcdObjectNotExist: - case EtcdErrCode::EtcdErrObjectType: - return StoreStatus::InternalError; + case EtcdErrCode::EtcdOK: + return StoreStatus::OK; + + case EtcdErrCode::EtcdKeyNotExist: + return StoreStatus::KeyNotExist; + + case EtcdErrCode::EtcdUnknown: + case EtcdErrCode::EtcdInvalidArgument: + case EtcdErrCode::EtcdAlreadyExists: + case EtcdErrCode::EtcdPermissionDenied: + case EtcdErrCode::EtcdOutOfRange: + case EtcdErrCode::EtcdUnimplemented: + case EtcdErrCode::EtcdInternal: + case EtcdErrCode::EtcdNotFound: + case EtcdErrCode::EtcdDataLoss: + case EtcdErrCode::EtcdUnauthenticated: + case EtcdErrCode::EtcdCanceled: + case EtcdErrCode::EtcdDeadlineExceeded: + case EtcdErrCode::EtcdResourceExhausted: + case EtcdErrCode::EtcdFailedPrecondition: + case EtcdErrCode::EtcdAborted: + case EtcdErrCode::EtcdUnavailable: + case EtcdErrCode::EtcdTxnUnkownOp: + case EtcdErrCode::EtcdObjectNotExist: + case EtcdErrCode::EtcdErrObjectType: + return StoreStatus::InternalError; - default: - return StoreStatus::InternalError; + default: + return StoreStatus::InternalError; } } -StoreStatus NameServerStorageImp::GetStoreKey(FileType filetype, - InodeID id, - const std::string& filename, - std::string* storeKey) { +StoreStatus NameServerStorageImp::GetStoreKey(FileType filetype, InodeID id, + const std::string &filename, + std::string *storeKey) { switch (filetype) { - case FileType::INODE_PAGEFILE: - case FileType::INODE_DIRECTORY: - *storeKey = NameSpaceStorageCodec::EncodeFileStoreKey(id, filename); - break; - case FileType::INODE_SNAPSHOT_PAGEFILE: - *storeKey = - NameSpaceStorageCodec::EncodeSnapShotFileStoreKey(id, filename); - break; - default: - LOG(ERROR) << "filetype: " - << filetype << " of " << filename << " not exist"; - return StoreStatus::InternalError; + case FileType::INODE_PAGEFILE: + case FileType::INODE_DIRECTORY: + *storeKey = NameSpaceStorageCodec::EncodeFileStoreKey(id, filename); + break; + case FileType::INODE_SNAPSHOT_PAGEFILE: + *storeKey = + NameSpaceStorageCodec::EncodeSnapShotFileStoreKey(id, filename); + break; + default: + LOG(ERROR) << "filetype: " << filetype << " of " << filename + << " not exist"; + return StoreStatus::InternalError; } return StoreStatus::OK; } diff --git a/src/mds/schedule/copySetScheduler.cpp b/src/mds/schedule/copySetScheduler.cpp index 84b5c84e93..418d86a55f 100644 --- a/src/mds/schedule/copySetScheduler.cpp +++ b/src/mds/schedule/copySetScheduler.cpp @@ -208,12 +208,12 @@ void CopySetScheduler::StatsCopysetDistribute( for (auto &item : distribute) { num += item.second.size(); - if (max == -1 || item.second.size() > max) { + if (max == -1 || static_cast(item.second.size()) > max) { max = item.second.size(); maxcsId = item.first; } - if (min == -1 || item.second.size() < min) { + if (min == -1 || static_cast(item.second.size()) < min) { min = item.second.size(); mincsId = item.first; } @@ -357,7 +357,7 @@ bool CopySetScheduler::CopySetSatisfiyBasicMigrationCond( } // the replica num of copyset is not standard - if (info.peers.size() != + if (static_cast(info.peers.size()) != topo_->GetStandardReplicaNumInLogicalPool(info.id.first)) { return false; } diff --git a/src/mds/schedule/leaderScheduler.cpp b/src/mds/schedule/leaderScheduler.cpp index 4d7cdc455d..2a75e6e14b 100644 --- a/src/mds/schedule/leaderScheduler.cpp +++ b/src/mds/schedule/leaderScheduler.cpp @@ -53,8 +53,8 @@ int LeaderScheduler::DoLeaderSchedule(PoolIdType lid) { int maxId = -1; int minLeaderCount = -1; int minId = -1; - std::vector csInfos - = topo_->GetChunkServersInLogicalPool(lid); + std::vector csInfos = + topo_->GetChunkServersInLogicalPool(lid); static std::random_device rd; static std::mt19937 g(rd()); std::shuffle(csInfos.begin(), csInfos.end(), g); @@ -65,12 +65,14 @@ int LeaderScheduler::DoLeaderSchedule(PoolIdType lid) { continue; } - if (maxLeaderCount == -1 || csInfo.leaderCount > maxLeaderCount) { + if (maxLeaderCount == -1 || + static_cast(csInfo.leaderCount) > maxLeaderCount) { maxId = csInfo.info.id; maxLeaderCount = csInfo.leaderCount; } - if (minLeaderCount == -1 || csInfo.leaderCount < minLeaderCount) { + if (minLeaderCount == -1 || + static_cast(csInfo.leaderCount) < minLeaderCount) { // the chunkserver with minLeaderCount and not in coolingTime // can be the transfer target if (!coolingTimeExpired(csInfo.startUpTime)) { @@ -85,9 +87,9 @@ int LeaderScheduler::DoLeaderSchedule(PoolIdType lid) { << ", maxLeaderCount:" << maxLeaderCount << "), (id:" << minId << ", minleaderCount:" << minLeaderCount << ")"; - // leader scheduling is not required when (maxLeaderCount-minLeaderCount <= 1) //NOLINT - if (maxLeaderCount >= 0 && - minLeaderCount >= 0 && + // leader scheduling is not required when + // (maxLeaderCount-minLeaderCount <= 1) + if (maxLeaderCount >= 0 && minLeaderCount >= 0 && maxLeaderCount - minLeaderCount <= 1) { LOG(INFO) << "leaderScheduler no need to generate transferLeader op"; return oneRoundGenOp; @@ -101,12 +103,12 @@ int LeaderScheduler::DoLeaderSchedule(PoolIdType lid) { Operator transferLeaderOutOp; CopySetInfo selectedCopySet; if (transferLeaderOut(maxId, maxLeaderCount, lid, &transferLeaderOutOp, - &selectedCopySet)) { + &selectedCopySet)) { if (opController_->AddOperator(transferLeaderOutOp)) { oneRoundGenOp += 1; LOG(INFO) << "leaderScheduler generatre operator " - << transferLeaderOutOp.OpToString() - << " for " << selectedCopySet.CopySetInfoStr() + << transferLeaderOutOp.OpToString() << " for " + << selectedCopySet.CopySetInfoStr() << " from transfer leader out"; return oneRoundGenOp; } @@ -120,12 +122,12 @@ int LeaderScheduler::DoLeaderSchedule(PoolIdType lid) { Operator transferLeaderInOp; CopySetInfo selectedCopySet; if (transferLeaderIn(minId, minLeaderCount, lid, &transferLeaderInOp, - &selectedCopySet)) { + &selectedCopySet)) { if (opController_->AddOperator(transferLeaderInOp)) { oneRoundGenOp += 1; LOG(INFO) << "leaderScheduler generatre operator " - << transferLeaderInOp.OpToString() - << " for " << selectedCopySet.CopySetInfoStr() + << transferLeaderInOp.OpToString() << " for " + << selectedCopySet.CopySetInfoStr() << " from transfer leader in"; return oneRoundGenOp; } @@ -136,13 +138,14 @@ int LeaderScheduler::DoLeaderSchedule(PoolIdType lid) { } bool LeaderScheduler::transferLeaderOut(ChunkServerIdType source, int count, - PoolIdType lid, Operator *op, CopySetInfo *selectedCopySet) { + PoolIdType lid, Operator *op, + CopySetInfo *selectedCopySet) { // find all copyset with source chunkserver as its leader as the candidate std::vector candidateInfos; for (auto &cInfo : topo_->GetCopySetInfosInLogicalPool(lid)) { // skip those copysets that the source is the follower in it if (cInfo.leader != source) { - continue; + continue; } // skip the copyset under configuration changing @@ -162,7 +165,7 @@ bool LeaderScheduler::transferLeaderOut(ChunkServerIdType source, int count, while (retryTimes < maxRetryTransferLeader) { // select a copyset from candidates randomly srand((unsigned)time(NULL)); - *selectedCopySet = candidateInfos[rand()%candidateInfos.size()]; + *selectedCopySet = candidateInfos[rand() % candidateInfos.size()]; // choose the chunkserver with least leaders from follower ChunkServerIdType targetId = UNINTIALIZE_ID; uint32_t targetLeaderCount = std::numeric_limits::max(); @@ -199,15 +202,14 @@ bool LeaderScheduler::transferLeaderOut(ChunkServerIdType source, int count, } if (targetId == UNINTIALIZE_ID || - count - 1 < targetLeaderCount + 1 || + count - 1 < static_cast(targetLeaderCount + 1) || !coolingTimeExpired(targetStartUpTime)) { retryTimes++; continue; } else { *op = operatorFactory.CreateTransferLeaderOperator( - *selectedCopySet, targetId, OperatorPriority::NormalPriority); - op->timeLimit = - std::chrono::seconds(transTimeSec_); + *selectedCopySet, targetId, OperatorPriority::NormalPriority); + op->timeLimit = std::chrono::seconds(transTimeSec_); return true; } } @@ -216,7 +218,8 @@ bool LeaderScheduler::transferLeaderOut(ChunkServerIdType source, int count, } bool LeaderScheduler::transferLeaderIn(ChunkServerIdType target, int count, - PoolIdType lid, Operator *op, CopySetInfo *selectedCopySet) { + PoolIdType lid, Operator *op, + CopySetInfo *selectedCopySet) { // find the copyset on follower and transfer leader to the target std::vector candidateInfos; for (auto &cInfo : topo_->GetCopySetInfosInLogicalPool(lid)) { @@ -246,7 +249,7 @@ bool LeaderScheduler::transferLeaderIn(ChunkServerIdType target, int count, int retryTimes = 1; while (retryTimes < maxRetryTransferLeader) { // select a copyset randomly from candidates - *selectedCopySet = candidateInfos[rand()%candidateInfos.size()]; + *selectedCopySet = candidateInfos[rand() % candidateInfos.size()]; // fetch the leader number of the leader of the selected copyset and // the target @@ -258,7 +261,7 @@ bool LeaderScheduler::transferLeaderIn(ChunkServerIdType target, int count, continue; } - if (sourceInfo.leaderCount - 1 < count + 1) { + if (static_cast(sourceInfo.leaderCount - 1) < count + 1) { retryTimes++; continue; } @@ -279,7 +282,7 @@ bool LeaderScheduler::copySetHealthy(const CopySetInfo &cInfo) { ChunkServerInfo csInfo; if (!topo_->GetChunkServerInfo(peer.id, &csInfo)) { LOG(ERROR) << "leaderScheduler cannot get info of chukServer:" - << peer.id; + << peer.id; healthy = false; break; } @@ -302,9 +305,7 @@ bool LeaderScheduler::coolingTimeExpired(uint64_t startUpTime) { return tm.tv_sec - startUpTime > chunkserverCoolingTimeSec_; } -int64_t LeaderScheduler::GetRunningInterval() { - return runInterval_; -} +int64_t LeaderScheduler::GetRunningInterval() { return runInterval_; } } // namespace schedule } // namespace mds } // namespace curve diff --git a/src/mds/schedule/operatorTemplate.h b/src/mds/schedule/operatorTemplate.h index 753f9d91b0..cfca1c7adb 100644 --- a/src/mds/schedule/operatorTemplate.h +++ b/src/mds/schedule/operatorTemplate.h @@ -98,6 +98,7 @@ OperatorT::OperatorT( EpochType startEpoch, const CopySetKey &id, OperatorPriority pri, const steady_clock::time_point &timeLimit, std::shared_ptr step) { + (void)timeLimit; this->startEpoch = startEpoch; this->copysetID.first = id.first; this->copysetID.second = id.second; diff --git a/src/mds/schedule/rapidLeaderScheduler.cpp b/src/mds/schedule/rapidLeaderScheduler.cpp index 16ead9f686..a23f30d30b 100644 --- a/src/mds/schedule/rapidLeaderScheduler.cpp +++ b/src/mds/schedule/rapidLeaderScheduler.cpp @@ -149,7 +149,7 @@ ChunkServerIdType RapidLeaderScheduler::SelectTargetPeer( // the replica with least leader number int possibleSelected = MinLeaderNumInCopySetPeers(info, stat); - if (possibleSelected == curChunkServerId) { + if (possibleSelected == static_cast(curChunkServerId)) { return selected; } diff --git a/src/mds/schedule/recoverScheduler.cpp b/src/mds/schedule/recoverScheduler.cpp index 68831ccdeb..7876bd442c 100644 --- a/src/mds/schedule/recoverScheduler.cpp +++ b/src/mds/schedule/recoverScheduler.cpp @@ -77,11 +77,11 @@ int RecoverScheduler::Schedule() { // alarm if over half of the replicas are offline int deadBound = - copysetInfo.peers.size() - (copysetInfo.peers.size()/2 + 1); - if (offlinelists.size() > deadBound) { + copysetInfo.peers.size() - (copysetInfo.peers.size() / 2 + 1); + if (static_cast(offlinelists.size()) > deadBound) { LOG(ERROR) << "recoverSchdeuler find " - << copysetInfo.CopySetInfoStr() - << " has " << offlinelists.size() + << copysetInfo.CopySetInfoStr() << " has " + << offlinelists.size() << " replica offline, cannot repair, please check"; continue; } @@ -90,10 +90,10 @@ int RecoverScheduler::Schedule() { for (auto it = offlinelists.begin(); it != offlinelists.end();) { if (excludes.count(*it) > 0) { LOG(ERROR) << "can not recover offline chunkserver " << *it - << " on " << copysetInfo.CopySetInfoStr() - << ", because it's server has more than " - << chunkserverFailureTolerance_ - << " offline chunkservers"; + << " on " << copysetInfo.CopySetInfoStr() + << ", because it's server has more than " + << chunkserverFailureTolerance_ + << " offline chunkservers"; it = offlinelists.erase(it); } else { ++it; @@ -108,22 +108,21 @@ int RecoverScheduler::Schedule() { Operator fixRes; ChunkServerIdType target; // failed to recover the replica - if (!FixOfflinePeer( - copysetInfo, *offlinelists.begin(), &fixRes, &target)) { + if (!FixOfflinePeer(copysetInfo, *offlinelists.begin(), &fixRes, + &target)) { continue; - // succeeded but failed to add the operator to the controller + // succeeded but failed to add the operator to the controller } else if (!opController_->AddOperator(fixRes)) { LOG(WARNING) << "recover scheduler add operator " - << fixRes.OpToString() << " on " - << copysetInfo.CopySetInfoStr() << " fail"; + << fixRes.OpToString() << " on " + << copysetInfo.CopySetInfoStr() << " fail"; continue; - // succeeded in recovering replica and adding it to the controller + // succeeded in recovering replica and adding it to the controller } else { LOG(INFO) << "recoverScheduler generate operator:" - << fixRes.OpToString() << " for " - << copysetInfo.CopySetInfoStr() - << ", remove offlinePeer: " - << *offlinelists.begin(); + << fixRes.OpToString() << " for " + << copysetInfo.CopySetInfoStr() + << ", remove offlinePeer: " << *offlinelists.begin(); // if the target returned has the initial value, that means offline // replicas are removed directly. if (target == UNINTIALIZE_ID) { @@ -135,10 +134,11 @@ int RecoverScheduler::Schedule() { // should be generated on target. If failed to generate, delete the // operator. if (!topo_->CreateCopySetAtChunkServer(copysetInfo.id, target)) { - LOG(WARNING) << "recoverScheduler create " - << copysetInfo.CopySetInfoStr() - << " on chunkServer: " << target - << " error, delete operator" << fixRes.OpToString(); + LOG(WARNING) + << "recoverScheduler create " + << copysetInfo.CopySetInfoStr() + << " on chunkServer: " << target + << " error, delete operator" << fixRes.OpToString(); opController_->RemoveOperator(copysetInfo.id); continue; } @@ -150,24 +150,21 @@ int RecoverScheduler::Schedule() { return 1; } -int64_t RecoverScheduler::GetRunningInterval() { - return runInterval_; -} +int64_t RecoverScheduler::GetRunningInterval() { return runInterval_; } -bool RecoverScheduler::FixOfflinePeer( - const CopySetInfo &info, ChunkServerIdType peerId, - Operator *op, ChunkServerIdType *target) { +bool RecoverScheduler::FixOfflinePeer(const CopySetInfo &info, + ChunkServerIdType peerId, Operator *op, + ChunkServerIdType *target) { assert(op != nullptr); // check the standard number of replicas first auto standardReplicaNum = topo_->GetStandardReplicaNumInLogicalPool(info.id.first); if (standardReplicaNum <= 0) { - LOG(WARNING) << "RecoverScheduler find logical pool " - << info.id.first << " standard num " - << standardReplicaNum << " invalid"; + LOG(WARNING) << "RecoverScheduler find logical pool " << info.id.first + << " standard num " << standardReplicaNum << " invalid"; return false; } - if (info.peers.size() > standardReplicaNum) { + if (static_cast(info.peers.size()) > standardReplicaNum) { // remove the offline replica *op = operatorFactory.CreateRemovePeerOperator( info, peerId, OperatorPriority::HighPriority); @@ -180,8 +177,9 @@ bool RecoverScheduler::FixOfflinePeer( auto csId = SelectBestPlacementChunkServer(info, peerId); if (csId == UNINTIALIZE_ID) { LOG(WARNING) << "recoverScheduler can not select chunkServer to " - "repair " << info.CopySetInfoStr() - << ", which replica: " << peerId << " is offline"; + "repair " + << info.CopySetInfoStr() << ", which replica: " << peerId + << " is offline"; return false; } else { *op = operatorFactory.CreateChangePeerOperator( @@ -220,17 +218,19 @@ void RecoverScheduler::CalculateExcludesChunkServer( // tolerance threshold. If it does, the chunkservers on this server will not // be recovered for (auto item : unhealthyStateCS) { - if (item.second.size() < chunkserverFailureTolerance_) { + if (static_cast(item.second.size()) < + chunkserverFailureTolerance_) { continue; } - LOG(WARNING) << "server " << item.first << " has " - << item.second.size() << " offline chunkservers"; + LOG(WARNING) << "server " << item.first << " has " << item.second.size() + << " offline chunkservers"; for (auto cs : item.second) { excludes->emplace(cs); } } - // if the chunkserver is in pending status, it will be considered recoverable //NOLINT + // if the chunkserver is in pending status, it will be considered + // recoverable //NOLINT for (auto it : pendingCS) { excludes->erase(it); } @@ -238,4 +238,3 @@ void RecoverScheduler::CalculateExcludesChunkServer( } // namespace schedule } // namespace mds } // namespace curve - diff --git a/src/mds/schedule/scanScheduler.cpp b/src/mds/schedule/scanScheduler.cpp index 72ea4fa902..6f6bc3af48 100644 --- a/src/mds/schedule/scanScheduler.cpp +++ b/src/mds/schedule/scanScheduler.cpp @@ -35,44 +35,44 @@ int ScanScheduler::Schedule() { LOG(INFO) << "ScanScheduler begin."; auto currentHour = ::curve::common::TimeUtility::GetCurrentHour(); - bool duringScanTime = currentHour >= scanStartHour_ && - currentHour <= scanEndHour_; + bool duringScanTime = + currentHour >= scanStartHour_ && currentHour <= scanEndHour_; auto count = 0; ::curve::mds::topology::LogicalPool lpool; auto logicPoolIds = topo_->GetLogicalpools(); - for (const auto& lpid : logicPoolIds) { + for (const auto &lpid : logicPoolIds) { CopySetInfos copysets2start, copysets2cancel; auto copysetInfos = topo_->GetCopySetInfosInLogicalPool(lpid); topo_->GetLogicalPool(lpid, &lpool); if (!duringScanTime || !lpool.ScanEnable()) { - for (const auto& copysetInfo : copysetInfos) { + for (const auto ©setInfo : copysetInfos) { if (StartOrReadyToScan(copysetInfo)) { copysets2cancel.push_back(copysetInfo); } } } else { - SelectCopysetsForScan( - copysetInfos, ©sets2start, ©sets2cancel); + SelectCopysetsForScan(copysetInfos, ©sets2start, + ©sets2cancel); } - count += GenScanOperator(copysets2start, - ConfigChangeType::START_SCAN_PEER); + count += + GenScanOperator(copysets2start, ConfigChangeType::START_SCAN_PEER); count += GenScanOperator(copysets2cancel, ConfigChangeType::CANCEL_SCAN_PEER); } - LOG(INFO) << "ScanScheduelr generate " - << count << " operators at this round"; + LOG(INFO) << "ScanScheduelr generate " << count + << " operators at this round"; return 1; } -bool ScanScheduler::StartOrReadyToScan(const CopySetInfo& copysetInfo) { +bool ScanScheduler::StartOrReadyToScan(const CopySetInfo ©setInfo) { Operator op; if (copysetInfo.scaning) { return true; } else if (opController_->GetOperatorById(copysetInfo.id, &op)) { - auto step = dynamic_cast(op.step.get()); + auto step = dynamic_cast(op.step.get()); return nullptr != step && step->IsStartScanOp(); } else if (copysetInfo.HasCandidate()) { return copysetInfo.configChangeInfo.type() == @@ -82,12 +82,11 @@ bool ScanScheduler::StartOrReadyToScan(const CopySetInfo& copysetInfo) { return false; } -void ScanScheduler::SelectCopysetsToStartScan(CopySetInfos* copysetInfos, - int count, - Selected* selected, - CopySetInfos* copysets2start) { +void ScanScheduler::SelectCopysetsToStartScan(CopySetInfos *copysetInfos, + int count, Selected *selected, + CopySetInfos *copysets2start) { std::sort(copysetInfos->begin(), copysetInfos->end(), - [](const CopySetInfo& a, const CopySetInfo& b) { + [](const CopySetInfo &a, const CopySetInfo &b) { if (a.lastScanSec == b.lastScanSec) { return a.id < b.id; } @@ -95,14 +94,15 @@ void ScanScheduler::SelectCopysetsToStartScan(CopySetInfos* copysetInfos, }); auto nowSec = ::curve::common::TimeUtility::GetTimeofDaySec(); - for (const auto& copysetInfo : *copysetInfos) { + for (const auto ©setInfo : *copysetInfos) { if (nowSec - copysetInfo.lastScanSec < scanIntervalSec_ || count <= 0) { return; } bool succ = true; - for (const auto& peer : copysetInfo.peers) { - if ((*selected)[peer.id] >= scanConcurrentPerChunkserver_) { + for (const auto &peer : copysetInfo.peers) { + if ((*selected)[peer.id] >= + static_cast(scanConcurrentPerChunkserver_)) { succ = false; break; } @@ -112,25 +112,25 @@ void ScanScheduler::SelectCopysetsToStartScan(CopySetInfos* copysetInfos, if (succ) { count--; copysets2start->push_back(copysetInfo); - for (const auto& peer : copysetInfo.peers) { + for (const auto &peer : copysetInfo.peers) { (*selected)[peer.id]++; } } } } -void ScanScheduler::SelectCopysetsToCancelScan(CopySetInfos* copysetInfos, +void ScanScheduler::SelectCopysetsToCancelScan(CopySetInfos *copysetInfos, int count, - CopySetInfos* copysets2cancel) { + CopySetInfos *copysets2cancel) { std::sort(copysetInfos->begin(), copysetInfos->end(), - [](const CopySetInfo& a, const CopySetInfo& b) { + [](const CopySetInfo &a, const CopySetInfo &b) { if (a.scaning == b.scaning) { return a.id < b.id; } return a.scaning == false; }); - for (const auto& copysetInfo : *copysetInfos) { + for (const auto ©setInfo : *copysetInfos) { if (count-- <= 0) { return; } @@ -138,15 +138,15 @@ void ScanScheduler::SelectCopysetsToCancelScan(CopySetInfos* copysetInfos, } } -void ScanScheduler::SelectCopysetsForScan(const CopySetInfos& copysetInfos, - CopySetInfos* copysets2start, - CopySetInfos* copysets2cancel) { +void ScanScheduler::SelectCopysetsForScan(const CopySetInfos ©setInfos, + CopySetInfos *copysets2start, + CopySetInfos *copysets2cancel) { CopySetInfos scaning, nonScan; Selected selected; // scaning chunk server - for (const auto& copysetInfo : copysetInfos) { + for (const auto ©setInfo : copysetInfos) { if (StartOrReadyToScan(copysetInfo)) { scaning.push_back(copysetInfo); - for (const auto& peer : copysetInfo.peers) { + for (const auto &peer : copysetInfo.peers) { selected[peer.id]++; } LOG(INFO) << "Copyset is on scaning: " @@ -168,11 +168,11 @@ void ScanScheduler::SelectCopysetsForScan(const CopySetInfos& copysetInfos, } } -int ScanScheduler::GenScanOperator(const CopySetInfos& copysetInfos, +int ScanScheduler::GenScanOperator(const CopySetInfos ©setInfos, ConfigChangeType opType) { auto count = 0; bool ready2start = (opType == ConfigChangeType::START_SCAN_PEER); - for (auto& copysetInfo : copysetInfos) { + for (auto ©setInfo : copysetInfos) { auto priority = ready2start ? OperatorPriority::LowPriority : OperatorPriority::HighPriority; @@ -182,17 +182,15 @@ int ScanScheduler::GenScanOperator(const CopySetInfos& copysetInfos, auto succ = opController_->AddOperator(op); count += succ ? 1 : 0; - LOG(INFO) << "Generate operator " << op.OpToString() - << " for " << copysetInfo.CopySetInfoStr() + LOG(INFO) << "Generate operator " << op.OpToString() << " for " + << copysetInfo.CopySetInfoStr() << (succ ? " success" : " fail"); } return count; } -int64_t ScanScheduler::GetRunningInterval() { - return runInterval_; -} +int64_t ScanScheduler::GetRunningInterval() { return runInterval_; } } // namespace schedule } // namespace mds diff --git a/src/mds/schedule/scheduleMetricsTemplate.h b/src/mds/schedule/scheduleMetricsTemplate.h index 416d9c278f..3bec2b679c 100644 --- a/src/mds/schedule/scheduleMetricsTemplate.h +++ b/src/mds/schedule/scheduleMetricsTemplate.h @@ -242,6 +242,9 @@ void ScheduleMetricsT< TopoCopySetInfoT>::RemoveUpdateOperatorsMap(const Operator &op, std::string type, IdType target) { + (void)type; + (void)target; + auto findOp = operators.find(op.copysetID); if (findOp == operators.end()) { return; @@ -306,7 +309,7 @@ void ScheduleMetricsT(members.size())) { copysetPeers += hostPort; } else { copysetPeers += hostPort + ","; diff --git a/src/mds/schedule/scheduler.cpp b/src/mds/schedule/scheduler.cpp index 801842a26b..0117b8607b 100644 --- a/src/mds/schedule/scheduler.cpp +++ b/src/mds/schedule/scheduler.cpp @@ -116,7 +116,7 @@ ChunkServerIdType Scheduler::SelectBestPlacementChunkServer( << " invalid"; return UNINTIALIZE_ID; } - if (excludeZones.size() >= standardZoneNum) { + if (static_cast(excludeZones.size()) >= standardZoneNum) { excludeZones.clear(); } @@ -210,7 +210,7 @@ ChunkServerIdType Scheduler::SelectRedundantReplicaToRemove( " replicaNum must >=0, please check"; return UNINTIALIZE_ID; } - if (copySetInfo.peers.size() <= standardReplicaNum) { + if (static_cast(copySetInfo.peers.size()) <= standardReplicaNum) { LOG(ERROR) << "topoAdapter cannot select redundent replica for " << copySetInfo.CopySetInfoStr() << ", beacuse replicaNum " << copySetInfo.peers.size() @@ -242,7 +242,7 @@ ChunkServerIdType Scheduler::SelectRedundantReplicaToRemove( // 1. alarm if the zone number is lass than the standard // TODO(lixiaocui): adjust by adding or deleting replica in this case - if (zoneList.size() < standardZoneNum) { + if (static_cast(zoneList.size()) < standardZoneNum) { LOG(ERROR) << "topoAdapter find " << copySetInfo.CopySetInfoStr() << " replicas distribute in " << zoneList.size() << " zones, less than standard zoneNum " @@ -265,7 +265,7 @@ ChunkServerIdType Scheduler::SelectRedundantReplicaToRemove( std::vector candidateChunkServer; for (auto item : zoneList) { if (item.second.size() == 1) { - if (zoneList.size() == standardZoneNum) { + if (static_cast(zoneList.size()) == standardZoneNum) { continue; } } diff --git a/src/mds/schedule/scheduler_helper.cpp b/src/mds/schedule/scheduler_helper.cpp index f17f303c04..3e0fa106c0 100644 --- a/src/mds/schedule/scheduler_helper.cpp +++ b/src/mds/schedule/scheduler_helper.cpp @@ -139,7 +139,7 @@ bool SchedulerHelper::SatisfyZoneAndScatterWidthLimit( zoneList[targetZone] += 1; } - if (zoneList.size() < minZone) { + if (static_cast(zoneList.size()) < minZone) { return false; } diff --git a/src/mds/schedule/topoAdapter.cpp b/src/mds/schedule/topoAdapter.cpp index 1b235d0e5e..47cab8cb2c 100644 --- a/src/mds/schedule/topoAdapter.cpp +++ b/src/mds/schedule/topoAdapter.cpp @@ -308,7 +308,8 @@ bool TopoAdapterImpl::GetPeerInfo(ChunkServerIdType id, PeerInfo *peerInfo) { ::curve::mds::topology::ChunkServer cs; ::curve::mds::topology::Server server; - bool canGetChunkServer, canGetServer; + bool canGetChunkServer = false; + bool canGetServer = false; if ((canGetChunkServer = topo_->GetChunkServer(id, &cs)) && (canGetServer = topo_->GetServer(cs.GetServerId(), &server))) { *peerInfo = PeerInfo( diff --git a/src/mds/schedule/topoAdapter.h b/src/mds/schedule/topoAdapter.h index d9e74259e8..348cf95fa2 100644 --- a/src/mds/schedule/topoAdapter.h +++ b/src/mds/schedule/topoAdapter.h @@ -148,7 +148,7 @@ struct CopySetInfo { struct ChunkServerInfo { public: ChunkServerInfo() : - leaderCount(0), diskCapacity(0), diskUsed(0), startUpTime(0) {} + startUpTime(0), leaderCount(0), diskCapacity(0), diskUsed(0) {} ChunkServerInfo(const PeerInfo &info, OnlineState state, DiskState diskState, ChunkServerStatus status, uint32_t leaderCount, uint64_t capacity, uint64_t used, diff --git a/src/mds/server/mds.cpp b/src/mds/server/mds.cpp index 7085ce1e25..2ebcdb137f 100644 --- a/src/mds/server/mds.cpp +++ b/src/mds/server/mds.cpp @@ -21,11 +21,14 @@ */ #include +#include #include "src/mds/server/mds.h" #include "src/mds/nameserver2/helper/namespace_helper.h" #include "src/mds/topology/topology_storge_etcd.h" #include "src/common/lru_cache.h" +#include "absl/strings/str_split.h" + using ::curve::mds::topology::TopologyStorageEtcd; using ::curve::mds::topology::TopologyStorageCodec; using ::curve::mds::topology::ChunkServerRegistInfoBuilder; @@ -244,7 +247,7 @@ void MDS::InitEtcdClient(const EtcdConf& etcdConf, auto res = etcdClient_->Init(etcdConf, etcdTimeout, retryTimes); LOG_IF(FATAL, res != EtcdErrCode::EtcdOK) << "init etcd client err! " - << "etcdaddr: " << std::string{etcdConf.Endpoints, etcdConf.len} + << "etcdaddr: " << std::string(etcdConf.Endpoints, etcdConf.len) << ", etcdaddr len: " << etcdConf.len << ", etcdtimeout: " << etcdConf.DialTimeout << ", operation timeout: " << etcdTimeout @@ -258,7 +261,7 @@ void MDS::InitEtcdClient(const EtcdConf& etcdConf, << "Run mds err. Check if etcd is running."; LOG(INFO) << "init etcd client ok! " - << "etcdaddr: " << std::string{etcdConf.Endpoints, etcdConf.len} + << "etcdaddr: " << std::string(etcdConf.Endpoints, etcdConf.len) << ", etcdaddr len: " << etcdConf.len << ", etcdtimeout: " << etcdConf.DialTimeout << ", operation timeout: " << etcdTimeout @@ -510,13 +513,15 @@ void MDS::InitCurveFSOptions(CurveFSOption *curveFSOptions) { "mds.curvefs.minFileLength", &curveFSOptions->minFileLength); conf_->GetValueFatalIfFail( "mds.curvefs.maxFileLength", &curveFSOptions->maxFileLength); - FileRecordOptions fileRecordOptions; InitFileRecordOptions(&curveFSOptions->fileRecordOptions); - RootAuthOption authOptions; InitAuthOptions(&curveFSOptions->authOptions); InitThrottleOption(&curveFSOptions->throttleOption); + + LOG_IF(FATAL, !ParsePoolsetRules(conf_->GetStringValue("mds.poolset.rules"), + &curveFSOptions->poolsetRules)) + << "Fail to parse poolset rules"; } void MDS::InitDLockOption(std::shared_ptr dlockOpts) { @@ -657,5 +662,32 @@ void MDS::InitHeartbeatOption(HeartbeatOption* heartbeatOption) { conf_->GetValueFatalIfFail("mds.heartbeat.clean_follower_afterMs", &heartbeatOption->cleanFollowerAfterMs); } + +bool ParsePoolsetRules(const std::string& str, + std::map* rules) { + rules->clear(); + + if (str.empty()) { + return true; + } + + for (absl::string_view sp : absl::StrSplit(str, ';')) { + rules->insert(absl::StrSplit(sp, ':')); + } + + for (const auto& rule : *rules) { + const auto& key = rule.first; + + if (key.empty() || key.front() != '/' || key.back() != '/') { + LOG(ERROR) << "Invalid poolset rules, key must starts and ends " + "with `/`, rules: `" + << str << "`"; + return false; + } + } + + return true; +} + } // namespace mds } // namespace curve diff --git a/src/mds/server/mds.h b/src/mds/server/mds.h index e38ce476e2..30e96a5105 100644 --- a/src/mds/server/mds.h +++ b/src/mds/server/mds.h @@ -29,6 +29,7 @@ #include #include #include +#include #include "src/mds/nameserver2/namespace_storage.h" #include "src/mds/nameserver2/namespace_service.h" @@ -239,6 +240,9 @@ class MDS { std::shared_ptr snapshotCloneClient_; }; +bool ParsePoolsetRules(const std::string& str, + std::map* rules); + } // namespace mds } // namespace curve diff --git a/src/mds/snapshotcloneclient/snapshotclone_client.cpp b/src/mds/snapshotcloneclient/snapshotclone_client.cpp index 3f76d111a7..c8a04f41e2 100644 --- a/src/mds/snapshotcloneclient/snapshotclone_client.cpp +++ b/src/mds/snapshotcloneclient/snapshotclone_client.cpp @@ -24,32 +24,32 @@ #include #include -using curve::snapshotcloneserver::kServiceName; +#include + using curve::snapshotcloneserver::kActionStr; -using curve::snapshotcloneserver::kGetCloneRefStatusAction; -using curve::snapshotcloneserver::kVersionStr; -using curve::snapshotcloneserver::kUserStr; -using curve::snapshotcloneserver::kSourceStr; -using curve::snapshotcloneserver::kCodeStr; -using curve::snapshotcloneserver::kRefStatusStr; -using curve::snapshotcloneserver::kTotalCountStr; using curve::snapshotcloneserver::kCloneFileInfoStr; +using curve::snapshotcloneserver::kCodeStr; using curve::snapshotcloneserver::kFileStr; +using curve::snapshotcloneserver::kGetCloneRefStatusAction; using curve::snapshotcloneserver::kInodeStr; +using curve::snapshotcloneserver::kRefStatusStr; +using curve::snapshotcloneserver::kServiceName; +using curve::snapshotcloneserver::kSourceStr; +using curve::snapshotcloneserver::kTotalCountStr; +using curve::snapshotcloneserver::kUserStr; +using curve::snapshotcloneserver::kVersionStr; namespace curve { namespace mds { namespace snapshotcloneclient { -StatusCode SnapshotCloneClient::GetCloneRefStatus(std::string filename, - std::string user, - CloneRefStatus *status, - std::vector *fileCheckList) { +StatusCode SnapshotCloneClient::GetCloneRefStatus( + std::string filename, std::string user, CloneRefStatus *status, + std::vector *fileCheckList) { if (!inited_) { LOG(WARNING) << "GetCloneRefStatus, snapshot clone server not inited" - << ", filename = " << filename - << ", user = " << user; + << ", filename = " << filename << ", user = " << user; return StatusCode::kSnapshotCloneServerNotInit; } @@ -57,17 +57,13 @@ StatusCode SnapshotCloneClient::GetCloneRefStatus(std::string filename, brpc::ChannelOptions option; option.protocol = "http"; - std::string url = addr_ - + "/" + kServiceName + "?" - + kActionStr+ "=" + kGetCloneRefStatusAction + "&" - + kVersionStr + "=1&" - + kUserStr + "=" + user + "&" - + kSourceStr + "=" + filename; + std::string url = addr_ + "/" + kServiceName + "?" + kActionStr + "=" + + kGetCloneRefStatusAction + "&" + kVersionStr + "=1&" + + kUserStr + "=" + user + "&" + kSourceStr + "=" + filename; if (channel.Init(url.c_str(), "", &option) != 0) { LOG(ERROR) << "GetCloneRefStatus, Fail to init channel, url is " << url - << ", filename = " << filename - << ", user = " << user; + << ", filename = " << filename << ", user = " << user; return StatusCode::kSnapshotCloneConnectFail; } @@ -77,8 +73,7 @@ StatusCode SnapshotCloneClient::GetCloneRefStatus(std::string filename, channel.CallMethod(NULL, &cntl, NULL, NULL, NULL); if (cntl.Failed()) { LOG(ERROR) << "GetCloneRefStatus, CallMethod faile, errMsg :" - << cntl.ErrorText() - << ", filename = " << filename + << cntl.ErrorText() << ", filename = " << filename << ", user = " << user; return StatusCode::KInternalError; } @@ -86,12 +81,16 @@ StatusCode SnapshotCloneClient::GetCloneRefStatus(std::string filename, std::stringstream ss; ss << cntl.response_attachment(); std::string data = ss.str(); - Json::Reader jsonReader; + + Json::CharReaderBuilder jsonBuilder; + std::unique_ptr jsonReader(jsonBuilder.newCharReader()); Json::Value jsonObj; - if (!jsonReader.parse(data, jsonObj)) { + JSONCPP_STRING errormsg; + if (!jsonReader->parse(data.data(), data.data() + data.length(), &jsonObj, + &errormsg)) { LOG(ERROR) << "GetCloneRefStatus, parse json fail, data = " << data - << ", filename = " << filename - << ", user = " << user; + << ", filename = " << filename << ", user = " << user + << ", error = " << errormsg; return StatusCode::KInternalError; } @@ -100,27 +99,24 @@ StatusCode SnapshotCloneClient::GetCloneRefStatus(std::string filename, std::string requestCode = jsonObj[kCodeStr].asCString(); if (requestCode != "0") { LOG(ERROR) << "GetCloneRefStatus, Code is not 0, data = " << data - << ", filename = " << filename - << ", user = " << user; + << ", filename = " << filename << ", user = " << user; return StatusCode::KInternalError; } CloneRefStatus tempStatus = - static_cast(jsonObj[kRefStatusStr].asInt()); + static_cast(jsonObj[kRefStatusStr].asInt()); *status = tempStatus; - if (tempStatus == CloneRefStatus::kNoRef - || tempStatus == CloneRefStatus::kHasRef) { + if (tempStatus == CloneRefStatus::kNoRef || + tempStatus == CloneRefStatus::kHasRef) { return StatusCode::kOK; } if (tempStatus != CloneRefStatus::kNeedCheck) { LOG(ERROR) << "GetCloneRefStatus, invalid status, data = " << data - << ", filename = " << filename - << ", user = " << user; + << ", filename = " << filename << ", user = " << user; return StatusCode::KInternalError; } - int totalCount = jsonObj[kTotalCountStr].asInt(); int listSize = jsonObj[kCloneFileInfoStr].size(); for (int i = 0; i < listSize; i++) { DestFileInfo file; @@ -138,10 +134,7 @@ void SnapshotCloneClient::Init(const SnapshotCloneClientOption &option) { } } -bool SnapshotCloneClient::GetInitStatus() { - return inited_; -} +bool SnapshotCloneClient::GetInitStatus() { return inited_; } } // namespace snapshotcloneclient } // namespace mds } // namespace curve - diff --git a/src/mds/topology/topology.cpp b/src/mds/topology/topology.cpp index c88ab48eb8..e490e533d0 100644 --- a/src/mds/topology/topology.cpp +++ b/src/mds/topology/topology.cpp @@ -20,18 +20,28 @@ * Author: xuchaojie */ #include "src/mds/topology/topology.h" + #include + +#include + +#include "src/common/namespace_define.h" #include "src/common/timeutility.h" #include "src/common/uuid.h" - -#include //NOLINT +#include "src/mds/common/mds_define.h" using ::curve::common::UUIDGenerator; +using ::curve::common::kDefaultPoolsetId; +using ::curve::common::kDefaultPoolsetName; namespace curve { namespace mds { namespace topology { +PoolsetIdType TopologyImpl::AllocatePoolsetId() { + return idGenerator_->GenPoolsetId(); +} + PoolIdType TopologyImpl::AllocateLogicalPoolId() { return idGenerator_->GenLogicalPoolId(); } @@ -56,6 +66,19 @@ std::string TopologyImpl::AllocateToken() { return tokenGenerator_->GenToken(); } +int TopologyImpl::AddPoolset(const Poolset &data) { + WriteLockGuard wlockPoolset(poolsetMutex_); + if (poolsetMap_.find(data.GetId()) == poolsetMap_.end()) { + if (!storage_->StoragePoolset(data)) { + return kTopoErrCodeStorgeFail; + } + poolsetMap_.emplace(data.GetId(), data); + return kTopoErrCodeSuccess; + } else { + return kTopoErrCodeIdDuplicated; + } +} + int TopologyImpl::AddLogicalPool(const LogicalPool &data) { ReadLockGuard rlockPhysicalPool(physicalPoolMutex_); WriteLockGuard wlockLogicalPool(logicalPoolMutex_); @@ -76,15 +99,22 @@ int TopologyImpl::AddLogicalPool(const LogicalPool &data) { } int TopologyImpl::AddPhysicalPool(const PhysicalPool &data) { + ReadLockGuard rlockPoolset(poolsetMutex_); WriteLockGuard wlockPhysicalPool(physicalPoolMutex_); - if (physicalPoolMap_.find(data.GetId()) == physicalPoolMap_.end()) { - if (!storage_->StoragePhysicalPool(data)) { - return kTopoErrCodeStorgeFail; + auto it = poolsetMap_.find(data.GetPoolsetId()); + if (it != poolsetMap_.end()) { + if (physicalPoolMap_.find(data.GetId()) == physicalPoolMap_.end()) { + if (!storage_->StoragePhysicalPool(data)) { + return kTopoErrCodeStorgeFail; + } + it->second.AddPhysicalPool(data.GetId()); + physicalPoolMap_.emplace(data.GetId(), data); + return kTopoErrCodeSuccess; + } else { + return kTopoErrCodeIdDuplicated; } - physicalPoolMap_[data.GetId()] = data; - return kTopoErrCodeSuccess; } else { - return kTopoErrCodeIdDuplicated; + return kTopoErrCodePoolsetNotFound; } } @@ -184,7 +214,30 @@ int TopologyImpl::RemoveLogicalPool(PoolIdType id) { } } +int TopologyImpl::RemovePoolset(PoolsetIdType id) { + if (id == kDefaultPoolsetId) { + LOG(WARNING) << "Default poolset is not allowed to be deleted"; + return kTopoErrCodeCannotDeleteDefaultPoolset; + } + + WriteLockGuard wlockPoolset(poolsetMutex_); + auto it = poolsetMap_.find(id); + if (it != poolsetMap_.end()) { + if (!it->second.GetPhysicalPoolList().empty()) { + return kTopoErrCodeCannotRemoveWhenNotEmpty; + } + if (!storage_->DeletePoolset(id)) { + return kTopoErrCodeStorgeFail; + } + poolsetMap_.erase(it); + return kTopoErrCodeSuccess; + } else { + return kTopoErrCodePoolsetNotFound; + } +} + int TopologyImpl::RemovePhysicalPool(PoolIdType id) { + WriteLockGuard wlockPoolset(poolsetMutex_); WriteLockGuard wlockPhysicalPool(physicalPoolMutex_); auto it = physicalPoolMap_.find(id); if (it != physicalPoolMap_.end()) { @@ -194,6 +247,11 @@ int TopologyImpl::RemovePhysicalPool(PoolIdType id) { if (!storage_->DeletePhysicalPool(id)) { return kTopoErrCodeStorgeFail; } + auto ix = poolsetMap_.find(it->second.GetPoolsetId()); + if (ix != poolsetMap_.end()) { + ix->second.RemovePhysicalPool(id); + } + physicalPoolMap_.erase(it); return kTopoErrCodeSuccess; } else { @@ -544,6 +602,16 @@ int TopologyImpl::UpdateChunkServerStartUpTime(uint64_t time, } } +PoolsetIdType TopologyImpl::FindPoolset(const std::string &poolsetName) const { + ReadLockGuard rlockPoolset(poolsetMutex_); + for (const auto& it : poolsetMap_) { + if (it.second.GetName() == poolsetName) { + return it.first; + } + } + return static_cast(UNINTIALIZE_ID); +} + PoolIdType TopologyImpl::FindLogicalPool( const std::string &logicalPoolName, const std::string &physicalPoolName) const { @@ -560,11 +628,11 @@ PoolIdType TopologyImpl::FindLogicalPool( return static_cast(UNINTIALIZE_ID); } + PoolIdType TopologyImpl::FindPhysicalPool( const std::string &physicalPoolName) const { ReadLockGuard rlockPhysicalPool(physicalPoolMutex_); - for (auto it = physicalPoolMap_.begin(); - it != physicalPoolMap_.end(); + for (auto it = physicalPoolMap_.begin(); it != physicalPoolMap_.end(); it++) { if (it->second.GetName() == physicalPoolName) { return it->first; @@ -573,6 +641,25 @@ PoolIdType TopologyImpl::FindPhysicalPool( return static_cast(UNINTIALIZE_ID); } +PoolIdType TopologyImpl::FindPhysicalPool(const std::string &poolName, + PoolsetIdType poolsetId) const { + ReadLockGuard rlockPhysicalPool(physicalPoolMutex_); + for (const auto& it : physicalPoolMap_) { + if ((it.second.GetPoolsetId() == poolsetId) && + (it.second.GetName() == poolName)) { + return it.first; + } + } + return static_cast(UNINTIALIZE_ID); +} + +PoolIdType TopologyImpl::FindPhysicalPool( + const std::string& poolName, + const std::string& poolsetName) const { + PoolsetIdType poolsetId = FindPoolset(poolsetName); + return FindPhysicalPool(poolName, poolsetId); +} + ZoneIdType TopologyImpl::FindZone(const std::string &zoneName, const std::string &physicalPoolName) const { PoolIdType physicalPoolId = FindPhysicalPool(physicalPoolName); @@ -641,6 +728,15 @@ ChunkServerIdType TopologyImpl::FindChunkServerNotRetired( } return static_cast(UNINTIALIZE_ID); } +bool TopologyImpl::GetPoolset(PoolsetIdType poolsetId, Poolset *out) const { + ReadLockGuard rlockPoolset(poolsetMutex_); + auto it = poolsetMap_.find(poolsetId); + if (it != poolsetMap_.end()) { + *out = it->second; + return true; + } + return false; +} bool TopologyImpl::GetLogicalPool(PoolIdType poolId, LogicalPool *out) const { ReadLockGuard rlockLogicalPool(logicalPoolMutex_); @@ -737,6 +833,33 @@ std::vector TopologyImpl::GetZoneInCluster( return ret; } +std::vector TopologyImpl::GetPoolsetInCluster( + PoolsetFilter filter) const { + std::vector ret; + ReadLockGuard rlockPoolset(poolsetMutex_); + for (const auto& it : poolsetMap_) { + if (filter(it.second)) { + ret.push_back(it.first); + } + } + return ret; +} + +std::vector TopologyImpl::GetPoolsetNameInCluster( + PoolsetFilter filter) const { + std::vector ret; + ret.reserve(poolsetMap_.size()); + + ReadLockGuard rlockPoolset(poolsetMutex_); + for (const auto& it : poolsetMap_) { + if (filter(it.second)) { + ret.push_back(it.second.GetName()); + } + } + + return ret; +} + std::vector TopologyImpl::GetPhysicalPoolInCluster( PhysicalPoolFilter filter) const { std::vector ret; @@ -829,6 +952,19 @@ std::list TopologyImpl::GetServerInPhysicalPool( return ret; } +std::list TopologyImpl::GetPhysicalPoolInPoolset( + PoolsetIdType id, + PhysicalPoolFilter filter) const { + std::list ret; + ReadLockGuard rlockPhysicalPool(physicalPoolMutex_); + for (const auto& it : physicalPoolMap_) { + if (it.second.GetPoolsetId() == id && filter(it.second)) { + ret.push_back(it.first); + } + } + return ret; +} + std::list TopologyImpl::GetZoneInPhysicalPool(PoolIdType id, ZoneFilter filter) const { std::list ret; @@ -892,6 +1028,7 @@ int TopologyImpl::Init(const TopologyOption &option) { return ret; } + WriteLockGuard wlockPoolset(poolsetMutex_); WriteLockGuard wlockLogicalPool(logicalPoolMutex_); WriteLockGuard wlockPhysicalPool(physicalPoolMutex_); WriteLockGuard wlockZone(zoneMutex_); @@ -899,6 +1036,25 @@ int TopologyImpl::Init(const TopologyOption &option) { WriteLockGuard wlockChunkServer(chunkServerMutex_); WriteLockGuard wlockCopySet(copySetMutex_); + PoolsetIdType maxPoolsetId; + if (!storage_->LoadPoolset(&poolsetMap_, &maxPoolsetId)) { + LOG(ERROR) << "[TopologyImpl::init], LoadPoolset fail."; + return kTopoErrCodeStorgeFail; + } + + if (poolsetMap_.empty()) { + const bool succ = CreateDefaultPoolset(); + if (!succ) { + return kTopoErrCodeStorgeFail; + } + + maxPoolsetId = kDefaultPoolsetId; + } + + idGenerator_->initPoolsetIdGenerator(maxPoolsetId); + LOG(INFO) << "[TopologyImpl::init], LoadPoolset success, " + << "poolset num = " << poolsetMap_.size(); + PoolIdType maxLogicalPoolId; if (!storage_->LoadLogicalPool(&logicalPoolMap_, &maxLogicalPoolId)) { LOG(ERROR) << "[TopologyImpl::init], LoadLogicalPool fail."; @@ -988,6 +1144,19 @@ int TopologyImpl::Init(const TopologyOption &option) { LOG(INFO) << "[TopologyImpl::init], LoadCopySet success, " << "copyset num = " << copySetMap_.size(); + for (auto& phy : physicalPoolMap_) { + auto pid = phy.second.GetPoolsetId(); + if (pid == UNINTIALIZE_ID) { + // old cluster's physicals don't have corresponding poolset, we just + // place them into the default poolset + pid = curve::common::kDefaultPoolsetId; + phy.second.SetPoolsetId(pid); + } + + assert(pid != UNINTIALIZE_ID); + poolsetMap_[pid].AddPhysicalPool(phy.first); + } + for (auto it : zoneMap_) { PoolIdType poolid = it.second.GetPhysicalPoolId(); physicalPoolMap_[poolid].AddZone(it.first); @@ -1311,7 +1480,37 @@ std::string TopologyImpl::GetHostNameAndPortById(ChunkServerIdType csId) { // get hostName of the chunkserver return server.GetHostName() + ":" + std::to_string(cs.GetPort()); } + +int TopologyImpl::UpdateChunkServerVersion(const std::string &version, + ChunkServerIdType id) { + int ret = kTopoErrCodeSuccess; + ReadLockGuard rlockChunkServerMap(chunkServerMutex_); + auto iter = chunkServerMap_.find(id); + if (iter != chunkServerMap_.end()) { + WriteLockGuard wlockChunkServer(iter->second.GetRWLockRef()); + iter->second.SetVersion(version); + } else { + ret = kTopoErrCodeChunkServerNotFound; + } + return ret; +} + +bool TopologyImpl::CreateDefaultPoolset() { + assert(poolsetMap_.empty()); + + Poolset data{kDefaultPoolsetId, kDefaultPoolsetName, kDefaultPoolsetName, + kDefaultPoolsetName}; + const bool succ = storage_->StoragePoolset(data); + if (!succ) { + LOG(WARNING) << "Create default poolset failed"; + return false; + } + + poolsetMap_.emplace(kDefaultPoolsetId, std::move(data)); + LOG(INFO) << "Create default poolset success"; + return true; +} + } // namespace topology } // namespace mds } // namespace curve - diff --git a/src/mds/topology/topology.h b/src/mds/topology/topology.h index 418b5d7001..42a1e1a6c9 100644 --- a/src/mds/topology/topology.h +++ b/src/mds/topology/topology.h @@ -53,6 +53,7 @@ using ServerFilter = std::function; using ZoneFilter = std::function; using PhysicalPoolFilter = std::function; using LogicalPoolFilter = std::function; +using PoolsetFilter = std::function; using CopySetFilter = std::function; class Topology { @@ -62,6 +63,7 @@ class Topology { virtual bool GetClusterInfo(ClusterInformation *info) = 0; + virtual PoolsetIdType AllocatePoolsetId() = 0; virtual PoolIdType AllocateLogicalPoolId() = 0; virtual PoolIdType AllocatePhysicalPoolId() = 0; virtual ZoneIdType AllocateZoneId() = 0; @@ -71,6 +73,7 @@ class Topology { virtual std::string AllocateToken() = 0; + virtual int AddPoolset(const Poolset &data) = 0; virtual int AddLogicalPool(const LogicalPool &data) = 0; virtual int AddPhysicalPool(const PhysicalPool &data) = 0; virtual int AddZone(const Zone &data) = 0; @@ -78,6 +81,7 @@ class Topology { virtual int AddChunkServer(const ChunkServer &data) = 0; virtual int AddCopySet(const CopySetInfo &data) = 0; + virtual int RemovePoolset(PoolsetIdType id) = 0; virtual int RemoveLogicalPool(PoolIdType id) = 0; virtual int RemovePhysicalPool(PoolIdType id) = 0; virtual int RemoveZone(ZoneIdType id) = 0; @@ -155,6 +159,17 @@ class Topology { virtual int UpdateChunkServerDiskStatus(const ChunkServerState &state, ChunkServerIdType id) = 0; + /** + * @brief update chunkserver version + * + * @param version disk status + * @param id chunkserverid + * @return error code + * kTopoErrCodeSuccess: success + * kTopoErrCodeChunkServerNotFound: no this chunkserver + */ + virtual int UpdateChunkServerVersion(const std::string &version, + ChunkServerIdType id) = 0; /** * @brief update chunkserver start up time @@ -183,11 +198,20 @@ class Topology { virtual int SetCopySetAvalFlag(const CopySetKey &key, bool aval) = 0; - virtual PoolIdType - FindLogicalPool(const std::string &logicalPoolName, - const std::string &physicalPoolName) const = 0; + virtual PoolsetIdType FindPoolset(const std::string &poolsetName) const = 0; + + virtual PoolIdType FindLogicalPool(const std::string &logicalPoolName, + const std::string &physicalPoolName) const = 0; + virtual PoolIdType FindPhysicalPool( const std::string &physicalPoolName) const = 0; + + virtual PoolIdType FindPhysicalPool(const std::string &physicalPoolName, + const std::string &poolsetName) const = 0; + + virtual PoolIdType FindPhysicalPool(const std::string &physicalPoolName, + PoolsetIdType poolsetid) const = 0; + virtual ZoneIdType FindZone(const std::string &zoneName, const std::string &physicalPoolName) const = 0; virtual ZoneIdType FindZone(const std::string &zoneName, @@ -200,10 +224,10 @@ class Topology { const std::string &hostIp, uint32_t port) const = 0; + virtual bool GetPoolset(PoolsetIdType poolsetId, Poolset *out) const = 0; + virtual bool GetLogicalPool(PoolIdType poolId, LogicalPool *out) const = 0; - virtual bool GetPhysicalPool(PoolIdType poolId, - PhysicalPool *out) const = 0; virtual bool GetZone(ZoneIdType zoneId, Zone *out) const = 0; virtual bool GetServer(ServerIdType serverId, @@ -213,11 +237,24 @@ class Topology { virtual bool GetCopySet(CopySetKey key, CopySetInfo *out) const = 0; + virtual bool GetPoolset(const std::string &poolsetName, + Poolset *out) const = 0; + virtual bool GetLogicalPool(const std::string &logicalPoolName, - const std::string &physicalPoolName, - LogicalPool *out) const = 0; + const std::string &physicalPoolName, + LogicalPool *out) const = 0; + virtual bool GetPhysicalPool(PoolIdType poolId, + PhysicalPool *out) const = 0; virtual bool GetPhysicalPool(const std::string &physicalPoolName, + PhysicalPool *out) const = 0; + + virtual bool GetPhysicalPool(const std::string &poolName, + const std::string &poolsetName, + PhysicalPool *out) const = 0; + + virtual bool GetPhysicalPool(const std::string &poolName, + PoolsetIdType poolsetId, PhysicalPool *out) const = 0; virtual bool GetZone(const std::string &zoneName, @@ -262,6 +299,15 @@ class Topology { LogicalPoolFilter filter = [](const LogicalPool&) { return true;}) const = 0; + virtual std::vector GetPoolsetInCluster( + PoolsetFilter filter = [](const Poolset&) { + return true;}) const = 0; + + virtual std::vector GetPoolsetNameInCluster( + PoolsetFilter filter = [](const Poolset&) { + return true; + }) const = 0; + virtual std::vector GetCopySetsInCluster( CopySetFilter filter = [](const CopySetInfo&) { return true;}) const = 0; @@ -297,6 +343,13 @@ class Topology { ServerFilter filter = [](const Server&) { return true;}) const = 0; + // get physicalPool List + virtual std::list GetPhysicalPoolInPoolset( + PoolsetIdType id, + PhysicalPoolFilter filter = [](const PhysicalPool&) { + return true; + }) const = 0; + // get zone list virtual std::list GetZoneInPhysicalPool( PoolIdType id, @@ -354,6 +407,7 @@ class TopologyImpl : public Topology { bool GetClusterInfo(ClusterInformation *info) override; + PoolIdType AllocatePoolsetId() override; PoolIdType AllocateLogicalPoolId() override; PoolIdType AllocatePhysicalPoolId() override; ZoneIdType AllocateZoneId() override; @@ -363,8 +417,10 @@ class TopologyImpl : public Topology { std::string AllocateToken() override; + int AddPoolset(const Poolset &data) override; int AddLogicalPool(const LogicalPool &data) override; int AddPhysicalPool(const PhysicalPool &data) override; + // int AddPhysicalPoolJustForTest(const PhysicalPool &data) override; int AddZone(const Zone &data) override; int AddServer(const Server &data) override; int AddChunkServer(const ChunkServer &data) override; @@ -372,6 +428,7 @@ class TopologyImpl : public Topology { int RemoveLogicalPool(PoolIdType id) override; int RemovePhysicalPool(PoolIdType id) override; + int RemovePoolset(PoolsetIdType id) override; int RemoveZone(ZoneIdType id) override; int RemoveServer(ServerIdType id) override; int RemoveChunkServer(ChunkServerIdType id) override; @@ -397,15 +454,26 @@ class TopologyImpl : public Topology { ChunkServerIdType id) override; int UpdateChunkServerStartUpTime(uint64_t time, ChunkServerIdType id) override; + int UpdateChunkServerVersion(const std::string &version, + ChunkServerIdType id) override; int UpdateCopySetTopo(const CopySetInfo &data) override; int SetCopySetAvalFlag(const CopySetKey &key, bool aval) override; + PoolsetIdType FindPoolset(const std::string &poolsetName) const override; + PoolIdType FindLogicalPool(const std::string &logicalPoolName, const std::string &physicalPoolName) const override; + PoolIdType FindPhysicalPool( const std::string &physicalPoolName) const override; + PoolIdType FindPhysicalPool(const std::string &physicalPoolName, + const std::string &poolsetName) const override; + + PoolIdType FindPhysicalPool(const std::string &physicalPoolName, + PoolsetIdType poolsetid) const override; + ZoneIdType FindZone(const std::string &zoneName, const std::string &physicalPoolName) const override; ZoneIdType FindZone(const std::string &zoneName, @@ -417,8 +485,8 @@ class TopologyImpl : public Topology { ChunkServerIdType FindChunkServerNotRetired(const std::string &hostIp, uint32_t port) const override; + bool GetPoolset(PoolsetIdType poolsetId, Poolset *out) const override; bool GetLogicalPool(PoolIdType poolId, LogicalPool *out) const override; - bool GetPhysicalPool(PoolIdType poolId, PhysicalPool *out) const override; bool GetZone(ZoneIdType zoneId, Zone *out) const override; bool GetServer(ServerIdType serverId, Server *out) const override; bool GetChunkServer(ChunkServerIdType chunkserverId, @@ -426,16 +494,34 @@ class TopologyImpl : public Topology { bool GetCopySet(CopySetKey key, CopySetInfo *out) const override; + bool GetPoolset(const std::string &poolsetName, + Poolset *out) const override { + return GetPoolset(FindPoolset(poolsetName), out); + } bool GetLogicalPool(const std::string &logicalPoolName, const std::string &physicalPoolName, LogicalPool *out) const override { return GetLogicalPool( FindLogicalPool(logicalPoolName, physicalPoolName), out); } + + bool GetPhysicalPool(PoolIdType poolId, PhysicalPool *out) const override; + bool GetPhysicalPool(const std::string &physicalPoolName, PhysicalPool *out) const override { return GetPhysicalPool(FindPhysicalPool(physicalPoolName), out); } + + bool GetPhysicalPool(const std::string &poolName, + const std::string &poolsetName, PhysicalPool *out) const override { + return GetPhysicalPool(FindPhysicalPool(poolName, poolsetName), out); + } + + bool GetPhysicalPool(const std::string &poolName, + PoolsetIdType poolsetId, PhysicalPool *out) const override { + return GetPhysicalPool(FindPhysicalPool(poolName, poolsetId), out); + } + bool GetZone(const std::string &zoneName, const std::string &physicalPoolName, Zone *out) const override { @@ -481,6 +567,15 @@ class TopologyImpl : public Topology { LogicalPoolFilter filter = [](const LogicalPool&) { return true;}) const override; + std::vector GetPoolsetInCluster( + PoolsetFilter filter = [](const Poolset&) { + return true;}) const override; + + std::vector GetPoolsetNameInCluster( + PoolsetFilter filter = [](const Poolset&) { + return true; + }) const override; + std::vector GetCopySetsInCluster( CopySetFilter filter = [](const CopySetInfo&) { return true;}) const override; @@ -517,6 +612,13 @@ class TopologyImpl : public Topology { ServerFilter filter = [](const Server&) { return true;}) const override; + // get physicalPool list + std::list GetPhysicalPoolInPoolset( + PoolsetIdType id, + PhysicalPoolFilter filter = [](const PhysicalPool&) { + return true; + }) const override; + // get zone list std::list GetZoneInPhysicalPool(PoolIdType id, @@ -586,7 +688,10 @@ class TopologyImpl : public Topology { void SetChunkServerExternalIp(); + bool CreateDefaultPoolset(); + private: + std::unordered_map poolsetMap_; std::unordered_map logicalPoolMap_; std::unordered_map physicalPoolMap_; std::unordered_map zoneMap_; @@ -603,6 +708,7 @@ class TopologyImpl : public Topology { std::shared_ptr storage_; // fetch lock in the order below to avoid deadlock + mutable curve::common::RWLock poolsetMutex_; mutable curve::common::RWLock logicalPoolMutex_; mutable curve::common::RWLock physicalPoolMutex_; mutable curve::common::RWLock zoneMutex_; diff --git a/src/mds/topology/topology_chunk_allocator.cpp b/src/mds/topology/topology_chunk_allocator.cpp index bf07557172..72447a5785 100644 --- a/src/mds/topology/topology_chunk_allocator.cpp +++ b/src/mds/topology/topology_chunk_allocator.cpp @@ -38,24 +38,23 @@ namespace topology { // logical pool is not designated when calling this function. When executing, // a logical will be chosen following the policy (randomly or weighted) bool TopologyChunkAllocatorImpl::AllocateChunkRandomInSingleLogicalPool( - curve::mds::FileType fileType, - uint32_t chunkNumber, - ChunkSizeType chunkSize, + curve::mds::FileType fileType, const std::string& pstName, + uint32_t chunkNumber, ChunkSizeType chunkSize, std::vector *infos) { + (void)chunkSize; if (fileType != INODE_PAGEFILE) { - LOG(ERROR) << "Invalid FileType, fileType = " - << fileType; + LOG(ERROR) << "Invalid FileType, fileType = " << fileType; return false; } PoolIdType logicalPoolChosenId = 0; - bool ret = ChooseSingleLogicalPool(fileType, &logicalPoolChosenId); + bool ret = ChooseSingleLogicalPool(fileType, pstName, &logicalPoolChosenId); if (!ret) { LOG(ERROR) << "ChooseSingleLogicalPool fail, ret = " << ret; return false; } - CopySetFilter filter = [](const CopySetInfo& copyset) { - return copyset.IsAvailable(); + CopySetFilter filter = [](const CopySetInfo ©set) { + return copyset.IsAvailable(); }; std::vector copySetIds = topology_->GetCopySetsInLogicalPool(logicalPoolChosenId, filter); @@ -67,32 +66,28 @@ bool TopologyChunkAllocatorImpl::AllocateChunkRandomInSingleLogicalPool( return false; } ret = AllocateChunkPolicy::AllocateChunkRandomInSingleLogicalPool( - copySetIds, - logicalPoolChosenId, - chunkNumber, - infos); + copySetIds, logicalPoolChosenId, chunkNumber, infos); return ret; } bool TopologyChunkAllocatorImpl::AllocateChunkRoundRobinInSingleLogicalPool( - curve::mds::FileType fileType, - uint32_t chunkNumber, - ChunkSizeType chunkSize, + curve::mds::FileType fileType, const std::string& pstName, + uint32_t chunkNumber, ChunkSizeType chunkSize, std::vector *infos) { + (void)chunkSize; if (fileType != INODE_PAGEFILE) { - LOG(ERROR) << "Invalid FileType, fileType = " - << fileType; + LOG(ERROR) << "Invalid FileType, fileType = " << fileType; return false; } PoolIdType logicalPoolChosenId = 0; - bool ret = ChooseSingleLogicalPool(fileType, &logicalPoolChosenId); + bool ret = ChooseSingleLogicalPool(fileType, pstName, &logicalPoolChosenId); if (!ret) { LOG(ERROR) << "ChooseSingleLogicalPool fail, ret = false."; return false; } - CopySetFilter filter = [](const CopySetInfo& copyset) { - return copyset.IsAvailable(); + CopySetFilter filter = [](const CopySetInfo ©set) { + return copyset.IsAvailable(); }; std::vector copySetIds = topology_->GetCopySetsInLogicalPool(logicalPoolChosenId, filter); @@ -120,11 +115,7 @@ bool TopologyChunkAllocatorImpl::AllocateChunkRoundRobinInSingleLogicalPool( } ret = AllocateChunkPolicy::AllocateChunkRoundRobinInSingleLogicalPool( - copySetIds, - logicalPoolChosenId, - &nextIndex, - chunkNumber, - infos); + copySetIds, logicalPoolChosenId, &nextIndex, chunkNumber, infos); if (ret) { nextIndexMap_[logicalPoolChosenId] = nextIndex; } @@ -132,7 +123,7 @@ bool TopologyChunkAllocatorImpl::AllocateChunkRoundRobinInSingleLogicalPool( } bool TopologyChunkAllocatorImpl::ChooseSingleLogicalPool( - curve::mds::FileType fileType, + curve::mds::FileType fileType, const std::string& pstName, PoolIdType *poolOut) { std::vector logicalPools; @@ -149,12 +140,11 @@ bool TopologyChunkAllocatorImpl::ChooseSingleLogicalPool( break; } - auto logicalPoolFilter = - [poolType, this] (const LogicalPool &pool) { + auto logicalPoolFilter = [poolType, this](const LogicalPool &pool) { return pool.GetLogicalPoolAvaliableFlag() && - (!this->enableLogicalPoolStatus_ || - AllocateStatus::ALLOW == pool.GetStatus()) && - pool.GetLogicalPoolType() == poolType; + (!this->enableLogicalPoolStatus_ || + AllocateStatus::ALLOW == pool.GetStatus()) && + pool.GetLogicalPoolType() == poolType; }; logicalPools = topology_->GetLogicalPoolInCluster(logicalPoolFilter); @@ -167,7 +157,7 @@ bool TopologyChunkAllocatorImpl::ChooseSingleLogicalPool( std::map poolWeightMap; std::vector poolToChoose; std::map poolsEnough; - GetRemainingSpaceInLogicalPool(logicalPools, &poolsEnough); + GetRemainingSpaceInLogicalPool(logicalPools, &poolsEnough, pstName); for (auto pool : poolsEnough) { // choose logical pool according to its weight if (ChoosePoolPolicy::kWeight == policy_) { @@ -181,15 +171,16 @@ bool TopologyChunkAllocatorImpl::ChooseSingleLogicalPool( return AllocateChunkPolicy::ChooseSingleLogicalPoolByWeight( poolWeightMap, poolOut); } else { - return AllocateChunkPolicy::ChooseSingleLogicalPoolRandom( - poolToChoose, poolOut); + return AllocateChunkPolicy::ChooseSingleLogicalPoolRandom(poolToChoose, + poolOut); } } void TopologyChunkAllocatorImpl::GetRemainingSpaceInLogicalPool( - const std::vector& logicalPools, - std::map* enoughSpacePools) { - for (auto pid : logicalPools) { + const std::vector &logicalPools, + std::map *enoughSpacePools, + const std::string& pstName) { + for (auto pid : logicalPools) { LogicalPool lPool; if (!topology_->GetLogicalPool(pid, &lPool)) { continue; @@ -198,13 +189,24 @@ void TopologyChunkAllocatorImpl::GetRemainingSpaceInLogicalPool( if (!topology_->GetPhysicalPool(lPool.GetPhysicalPoolId(), &pPool)) { continue; } + + PoolsetIdType poolsetId = pPool.GetPoolsetId(); + Poolset poolset; + if (!topology_->GetPoolset(poolsetId, &poolset)) { + LOG(WARNING) << "Get poolset fail , poolset is null"; + continue; + } + if (pstName != poolset.GetName()) { + continue; + } + uint64_t diskCapacity = 0; double available = available_; if (chunkFilePoolAllocHelp_->GetUseChunkFilepool()) { topoStat_->GetChunkPoolSize(lPool.GetPhysicalPoolId(), - &diskCapacity); - available = available * - chunkFilePoolAllocHelp_->GetAvailable() / 100; + &diskCapacity); + available = + available * chunkFilePoolAllocHelp_->GetAvailable() / 100; diskCapacity = diskCapacity * available / 100; } else { diskCapacity = pPool.GetDiskCapacity(); @@ -221,24 +223,21 @@ void TopologyChunkAllocatorImpl::GetRemainingSpaceInLogicalPool( alloc *= lPool.GetReplicaNum(); // calculate remaining capacity - uint64_t diskRemainning = - (diskCapacity > alloc) ? diskCapacity - alloc : 0; + uint64_t diskRemainning = (static_cast(diskCapacity) > alloc) + ? diskCapacity - alloc + : 0; LOG(INFO) << "ChooseSingleLogicalPool find pool {" - << "diskCapacity:" << diskCapacity - << ", diskAlloc:" << alloc - << ", diskRemainning:" << diskRemainning - << "}"; + << "diskCapacity:" << diskCapacity << ", diskAlloc:" << alloc + << ", diskRemainning:" << diskRemainning << "}"; if (diskRemainning > 0) { (*enoughSpacePools)[pid] = diskRemainning; } } } bool AllocateChunkPolicy::AllocateChunkRandomInSingleLogicalPool( - std::vector copySetIds, - PoolIdType logicalPoolId, - uint32_t chunkNumber, - std::vector *infos) { + std::vector copySetIds, PoolIdType logicalPoolId, + uint32_t chunkNumber, std::vector *infos) { infos->clear(); static std::random_device rd; // generating seed for random number engine @@ -256,10 +255,8 @@ bool AllocateChunkPolicy::AllocateChunkRandomInSingleLogicalPool( } bool AllocateChunkPolicy::AllocateChunkRoundRobinInSingleLogicalPool( - std::vector copySetIds, - PoolIdType logicalPoolId, - uint32_t *nextIndex, - uint32_t chunkNumber, + std::vector copySetIds, PoolIdType logicalPoolId, + uint32_t *nextIndex, uint32_t chunkNumber, std::vector *infos) { if (copySetIds.empty()) { return false; @@ -279,8 +276,7 @@ bool AllocateChunkPolicy::AllocateChunkRoundRobinInSingleLogicalPool( } bool AllocateChunkPolicy::ChooseSingleLogicalPoolByWeight( - const std::map &poolWeightMap, - PoolIdType *poolIdOut) { + const std::map &poolWeightMap, PoolIdType *poolIdOut) { if (poolWeightMap.empty()) { LOG(ERROR) << "ChooseSingleLogicalPoolByWeight, " << "poolWeightMap is empty."; @@ -316,8 +312,7 @@ bool AllocateChunkPolicy::ChooseSingleLogicalPoolByWeight( } bool AllocateChunkPolicy::ChooseSingleLogicalPoolRandom( - const std::vector &pools, - PoolIdType *poolIdOut) { + const std::vector &pools, PoolIdType *poolIdOut) { if (pools.empty()) { LOG(ERROR) << "ChooseSingleLogicalPoolRandom, " << "pools is empty."; diff --git a/src/mds/topology/topology_chunk_allocator.h b/src/mds/topology/topology_chunk_allocator.h index 9f9771f5fd..911393f879 100644 --- a/src/mds/topology/topology_chunk_allocator.h +++ b/src/mds/topology/topology_chunk_allocator.h @@ -26,6 +26,7 @@ #include #include #include +#include #include #include "src/mds/topology/topology.h" @@ -49,18 +50,18 @@ enum class ChoosePoolPolicy { class ChunkFilePoolAllocHelp { public: ChunkFilePoolAllocHelp() - : ChunkFilePoolPoolWalReserve(0), - useChunkFilepool(false), - useChunkFilePoolAsWalPool(false) {} + : useChunkFilepool(false), useChunkFilePoolAsWalPool(false), + ChunkFilePoolPoolWalReserve(0) {} ~ChunkFilePoolAllocHelp() {} - void UpdateChunkFilePoolAllocConfig(bool useChunkFilepool_, - bool useChunkFilePoolAsWalPool_, - uint32_t useChunkFilePoolAsWalPoolReserve_) { + void + UpdateChunkFilePoolAllocConfig(bool useChunkFilepool_, + bool useChunkFilePoolAsWalPool_, + uint32_t useChunkFilePoolAsWalPoolReserve_) { useChunkFilepool.store(useChunkFilepool_, std::memory_order_release); useChunkFilePoolAsWalPool.store(useChunkFilePoolAsWalPool_, - std::memory_order_release); + std::memory_order_release); ChunkFilePoolPoolWalReserve.store(useChunkFilePoolAsWalPoolReserve_, - std::memory_order_release); + std::memory_order_release); } bool GetUseChunkFilepool() { return useChunkFilepool.load(std::memory_order_acquire); @@ -68,8 +69,8 @@ class ChunkFilePoolAllocHelp { // After removing the reserved space, the remaining percentage uint32_t GetAvailable() { if (useChunkFilePoolAsWalPool.load(std::memory_order_acquire)) { - return 100 - ChunkFilePoolPoolWalReserve.load( - std::memory_order_acquire); + return 100 - + ChunkFilePoolPoolWalReserve.load(std::memory_order_acquire); } else { return 100; } @@ -80,7 +81,7 @@ class ChunkFilePoolAllocHelp { std::atomic useChunkFilepool; std::atomic useChunkFilePoolAsWalPool; // Reserve extra space for walpool - std::atomic ChunkFilePoolPoolWalReserve; + std::atomic ChunkFilePoolPoolWalReserve; }; class TopologyChunkAllocator { @@ -89,17 +90,21 @@ class TopologyChunkAllocator { virtual ~TopologyChunkAllocator() {} virtual bool AllocateChunkRandomInSingleLogicalPool( ::curve::mds::FileType fileType, + const std::string& pstName, uint32_t chunkNumer, ChunkSizeType chunkSize, std::vector *infos) = 0; virtual bool AllocateChunkRoundRobinInSingleLogicalPool( ::curve::mds::FileType fileType, + const std::string &pstName, uint32_t chunkNumer, ChunkSizeType chunkSize, std::vector *infos) = 0; virtual void GetRemainingSpaceInLogicalPool( const std::vector& logicalPools, - std::map* remianingSpace) = 0; + std::map* remianingSpace, + const std::string& pstName) = 0; + virtual void UpdateChunkFilePoolAllocConfig( bool useChunkFilepool_, bool useChunkFilePoolAsWalPool_, uint32_t useChunkFilePoolAsWalPoolReserve_) = 0; @@ -107,18 +112,18 @@ class TopologyChunkAllocator { class TopologyChunkAllocatorImpl : public TopologyChunkAllocator { public: - TopologyChunkAllocatorImpl(std::shared_ptr topology, + TopologyChunkAllocatorImpl( + std::shared_ptr topology, std::shared_ptr allocStatistic, std::shared_ptr topologyStat, std::shared_ptr ChunkFilePoolAllocHelp, const TopologyOption &option) - : topology_(topology), - allocStatistic_(allocStatistic), - topoStat_(topologyStat), - chunkFilePoolAllocHelp_(ChunkFilePoolAllocHelp), - available_(option.PoolUsagePercentLimit), - policy_(static_cast(option.choosePoolPolicy)), - enableLogicalPoolStatus_(option.enableLogicalPoolStatus) { + : topology_(topology), allocStatistic_(allocStatistic), + available_(option.PoolUsagePercentLimit), + topoStat_(topologyStat), + chunkFilePoolAllocHelp_(ChunkFilePoolAllocHelp), + policy_(static_cast(option.choosePoolPolicy)), + enableLogicalPoolStatus_(option.enableLogicalPoolStatus) { std::srand(std::time(nullptr)); } ~TopologyChunkAllocatorImpl() {} @@ -137,6 +142,7 @@ class TopologyChunkAllocatorImpl : public TopologyChunkAllocator { */ bool AllocateChunkRandomInSingleLogicalPool( curve::mds::FileType fileType, + const std::string& pstName, uint32_t chunkNumber, ChunkSizeType chunkSize, std::vector *infos) override; @@ -154,12 +160,15 @@ class TopologyChunkAllocatorImpl : public TopologyChunkAllocator { */ bool AllocateChunkRoundRobinInSingleLogicalPool( curve::mds::FileType fileType, + const std::string &pstName, uint32_t chunkNumber, ChunkSizeType chunkSize, std::vector *infos) override; void GetRemainingSpaceInLogicalPool( const std::vector& logicalPools, - std::map* remianingSpace) override; + std::map* remianingSpace, + const std::string& pstName) override; + void UpdateChunkFilePoolAllocConfig(bool useChunkFilepool_, bool useChunkFilePoolAsWalPool_, uint32_t useChunkFilePoolAsWalPoolReserve_) override { @@ -179,6 +188,7 @@ class TopologyChunkAllocatorImpl : public TopologyChunkAllocator { * @retval false if failed */ bool ChooseSingleLogicalPool(curve::mds::FileType fileType, + const std::string& pstName, PoolIdType *poolOut); private: @@ -230,10 +240,8 @@ class AllocateChunkPolicy { * @retval false if failed */ static bool AllocateChunkRandomInSingleLogicalPool( - std::vector copySetIds, - PoolIdType logicalPoolId, - uint32_t chunkNumber, - std::vector *infos); + std::vector copySetIds, PoolIdType logicalPoolId, + uint32_t chunkNumber, std::vector *infos); /** * @brief allocate chunks by round robin in a single logical pool @@ -253,10 +261,8 @@ class AllocateChunkPolicy { * @retval false if failed */ static bool AllocateChunkRoundRobinInSingleLogicalPool( - std::vector copySetIds, - PoolIdType logicalPoolId, - uint32_t *nextIndex, - uint32_t chunkNumber, + std::vector copySetIds, PoolIdType logicalPoolId, + uint32_t *nextIndex, uint32_t chunkNumber, std::vector *infos); /** @@ -281,13 +287,12 @@ class AllocateChunkPolicy { * @retval true if succeeded * @retval false if failed */ - static bool ChooseSingleLogicalPoolRandom( - const std::vector &pools, - PoolIdType *poolIdOut); + static bool + ChooseSingleLogicalPoolRandom(const std::vector &pools, + PoolIdType *poolIdOut); }; - } // namespace topology } // namespace mds } // namespace curve diff --git a/src/mds/topology/topology_id_generator.cpp b/src/mds/topology/topology_id_generator.cpp index c964d33627..46d42a24b6 100644 --- a/src/mds/topology/topology_id_generator.cpp +++ b/src/mds/topology/topology_id_generator.cpp @@ -24,6 +24,11 @@ namespace curve { namespace mds { namespace topology { + +void DefaultIdGenerator::initPoolsetIdGenerator(PoolsetIdType idMax) { + poolsetIdGentor_.init(idMax); +} + void DefaultIdGenerator::initLogicalPoolIdGenerator(PoolIdType idMax) { logicPoolIdGentor_.init(idMax); } @@ -51,6 +56,9 @@ void DefaultIdGenerator::initCopySetIdGenerator( copySetIdGentor_[it.first].init(it.second); } } +PoolsetIdType DefaultIdGenerator::GenPoolsetId() { + return poolsetIdGentor_.GenId(); +} PoolIdType DefaultIdGenerator::GenLogicalPoolId() { return logicPoolIdGentor_.GenId(); @@ -79,4 +87,3 @@ CopySetIdType DefaultIdGenerator::GenCopySetId(PoolIdType logicalPoolId) { } // namespace topology } // namespace mds } // namespace curve - diff --git a/src/mds/topology/topology_id_generator.h b/src/mds/topology/topology_id_generator.h index 23971dec22..640dbdf5b0 100644 --- a/src/mds/topology/topology_id_generator.h +++ b/src/mds/topology/topology_id_generator.h @@ -40,6 +40,7 @@ class TopologyIdGenerator { TopologyIdGenerator() {} virtual ~TopologyIdGenerator() {} + virtual void initPoolsetIdGenerator(PoolsetIdType idMax) = 0; virtual void initLogicalPoolIdGenerator(PoolIdType idMax) = 0; virtual void initPhysicalPoolIdGenerator(PoolIdType idMax) = 0; virtual void initZoneIdGenerator(ZoneIdType idMax) = 0; @@ -48,6 +49,7 @@ class TopologyIdGenerator { virtual void initCopySetIdGenerator( const std::map &idMaxMap) = 0; + virtual PoolsetIdType GenPoolsetId() = 0; virtual PoolIdType GenLogicalPoolId() = 0; virtual PoolIdType GenPhysicalPoolId() = 0; virtual ZoneIdType GenZoneId() = 0; @@ -62,6 +64,7 @@ class DefaultIdGenerator : public TopologyIdGenerator { DefaultIdGenerator() {} ~DefaultIdGenerator() {} + virtual void initPoolsetIdGenerator(PoolsetIdType idMax); virtual void initLogicalPoolIdGenerator(PoolIdType idMax); virtual void initPhysicalPoolIdGenerator(PoolIdType idMax); virtual void initZoneIdGenerator(ZoneIdType idMax); @@ -69,7 +72,7 @@ class DefaultIdGenerator : public TopologyIdGenerator { virtual void initChunkServerIdGenerator(ChunkServerIdType idMax); virtual void initCopySetIdGenerator(const std::map &idMaxMap); - + virtual PoolsetIdType GenPoolsetId(); virtual PoolIdType GenLogicalPoolId(); virtual PoolIdType GenPhysicalPoolId(); virtual ZoneIdType GenZoneId(); @@ -97,6 +100,7 @@ class DefaultIdGenerator : public TopologyIdGenerator { std::atomic idMax_; }; + IdGenerator poolsetIdGentor_; IdGenerator logicPoolIdGentor_; IdGenerator physicalPoolIdGentor_; IdGenerator zoneIdGentor_; diff --git a/src/mds/topology/topology_item.cpp b/src/mds/topology/topology_item.cpp index 951ca03d23..9eac140870 100644 --- a/src/mds/topology/topology_item.cpp +++ b/src/mds/topology/topology_item.cpp @@ -24,6 +24,7 @@ #include #include +#include #include "json/json.h" #include "src/common/string_util.h" @@ -47,51 +48,57 @@ bool ClusterInformation::ParseFromString(const std::string &value) { } bool LogicalPool::TransRedundanceAndPlaceMentPolicyFromJsonStr( - const std::string &jsonStr, - LogicalPoolType type, + const std::string &jsonStr, LogicalPoolType type, RedundanceAndPlaceMentPolicy *rap) { - Json::Reader reader; + Json::CharReaderBuilder builder; + std::unique_ptr reader(builder.newCharReader()); Json::Value rapJson; - if (!reader.parse(jsonStr, rapJson)) { + JSONCPP_STRING errormsg; + if (!reader->parse(jsonStr.data(), jsonStr.data() + jsonStr.length(), + &rapJson, &errormsg)) { return false; } switch (type) { - case LogicalPoolType::PAGEFILE: { - if (!rapJson["replicaNum"].isNull()) { - rap->pageFileRAP.replicaNum = rapJson["replicaNum"].asInt(); - } else { - return false; - } - if (!rapJson["copysetNum"].isNull()) { - rap->pageFileRAP.copysetNum = rapJson["copysetNum"].asInt(); - } else { - return false; - } - if (!rapJson["zoneNum"].isNull()) { - rap->pageFileRAP.zoneNum = rapJson["zoneNum"].asInt(); - } else { - return false; - } - break; - } - case LogicalPoolType::APPENDFILE: { - // TODO(xuchaojie): it is not done. + case LogicalPoolType::PAGEFILE: { + if (!rapJson["replicaNum"].isNull()) { + rap->pageFileRAP.replicaNum = rapJson["replicaNum"].asInt(); + } else { return false; } - case LogicalPoolType::APPENDECFILE: { - // TODO(xuchaojie): it is not done. + if (!rapJson["copysetNum"].isNull()) { + rap->pageFileRAP.copysetNum = rapJson["copysetNum"].asInt(); + } else { return false; } - default: { + if (!rapJson["zoneNum"].isNull()) { + rap->pageFileRAP.zoneNum = rapJson["zoneNum"].asInt(); + } else { return false; } + break; + } + case LogicalPoolType::APPENDFILE: { + // TODO(xuchaojie): it is not done. + return false; + } + case LogicalPoolType::APPENDECFILE: { + // TODO(xuchaojie): it is not done. + return false; + } + default: { + return false; + } } return true; } -bool LogicalPool::TransUserPolicyFromJsonStr( - const std::string &jsonStr, LogicalPoolType type, UserPolicy *policy) { +bool LogicalPool::TransUserPolicyFromJsonStr(const std::string &jsonStr, + LogicalPoolType type, + UserPolicy *policy) { + (void)jsonStr; + (void)type; + (void)policy; // TODO(xuchaojie): to finish it. return true; } @@ -99,32 +106,30 @@ bool LogicalPool::TransUserPolicyFromJsonStr( bool LogicalPool::SetRedundanceAndPlaceMentPolicyByJson( const std::string &jsonStr) { return LogicalPool::TransRedundanceAndPlaceMentPolicyFromJsonStr( - jsonStr, - GetLogicalPoolType(), - &rap_); + jsonStr, GetLogicalPoolType(), &rap_); } std::string LogicalPool::GetRedundanceAndPlaceMentPolicyJsonStr() const { std::string rapStr; Json::Value rapJson; switch (GetLogicalPoolType()) { - case LogicalPoolType::PAGEFILE : { - rapJson["replicaNum"] = rap_.pageFileRAP.replicaNum; - rapJson["copysetNum"] = rap_.pageFileRAP.copysetNum; - rapJson["zoneNum"] = rap_.pageFileRAP.zoneNum; - rapStr = rapJson.toStyledString(); - break; - } - case LogicalPoolType::APPENDFILE : { - // TODO(xuchaojie): fix it - break; - } - case LogicalPoolType::APPENDECFILE : { - // TODO(xuchaojie): fix it - break; - } - default: - break; + case LogicalPoolType::PAGEFILE: { + rapJson["replicaNum"] = rap_.pageFileRAP.replicaNum; + rapJson["copysetNum"] = rap_.pageFileRAP.copysetNum; + rapJson["zoneNum"] = rap_.pageFileRAP.zoneNum; + rapStr = rapJson.toStyledString(); + break; + } + case LogicalPoolType::APPENDFILE: { + // TODO(xuchaojie): fix it + break; + } + case LogicalPoolType::APPENDECFILE: { + // TODO(xuchaojie): fix it + break; + } + default: + break; } return rapStr; } @@ -132,9 +137,7 @@ std::string LogicalPool::GetRedundanceAndPlaceMentPolicyJsonStr() const { bool LogicalPool::SetUserPolicyByJson(const std::string &jsonStr) { return LogicalPool::TransUserPolicyFromJsonStr( - jsonStr, - GetLogicalPoolType(), - &policy_); + jsonStr, GetLogicalPoolType(), &policy_); } std::string LogicalPool::GetUserPolicyJsonStr() const { @@ -145,20 +148,20 @@ std::string LogicalPool::GetUserPolicyJsonStr() const { uint16_t LogicalPool::GetReplicaNum() const { uint16_t ret = 0; switch (GetLogicalPoolType()) { - case LogicalPoolType::PAGEFILE : { - ret = rap_.pageFileRAP.replicaNum; - break; - } - case LogicalPoolType::APPENDFILE : { - // TODO(xuchaojie): fix it - break; - } - case LogicalPoolType::APPENDECFILE : { - // TODO(xuchaojie): fix it - break; - } - default: - break; + case LogicalPoolType::PAGEFILE: { + ret = rap_.pageFileRAP.replicaNum; + break; + } + case LogicalPoolType::APPENDFILE: { + // TODO(xuchaojie): fix it + break; + } + case LogicalPoolType::APPENDECFILE: { + // TODO(xuchaojie): fix it + break; + } + default: + break; } return ret; } @@ -187,10 +190,8 @@ bool LogicalPool::ParseFromString(const std::string &value) { name_ = data.logicalpoolname(); physicalPoolId_ = data.physicalpoolid(); type_ = data.type(); - SetRedundanceAndPlaceMentPolicyByJson( - data.redundanceandplacementpolicy()); - SetUserPolicyByJson( - data.userpolicy()); + SetRedundanceAndPlaceMentPolicyByJson(data.redundanceandplacementpolicy()); + SetUserPolicyByJson(data.userpolicy()); initialScatterWidth_ = data.initialscatterwidth(); createTime_ = data.createtime(); status_ = data.status(); @@ -199,11 +200,35 @@ bool LogicalPool::ParseFromString(const std::string &value) { return ret; } +bool Poolset::SerializeToString(std::string *value) const { + PoolsetData data; + data.set_poolsetid(id_); + data.set_poolsetname(name_); + data.set_type(type_); + data.set_desc(desc_); + return data.SerializeToString(value); +} + +bool Poolset::ParseFromString(const std::string &value) { + PoolsetData data; + bool ret = data.ParseFromString(value); + id_ = data.poolsetid(); + name_ = std::move(*data.mutable_poolsetname()); + type_ = std::move(*data.mutable_type()); + desc_ = std::move(*data.mutable_desc()); + return ret; +} + bool PhysicalPool::SerializeToString(std::string *value) const { PhysicalPoolData data; data.set_physicalpoolid(id_); data.set_physicalpoolname(name_); data.set_desc(desc_); + + if (poolsetId_ != UNINTIALIZE_ID) { + data.set_poolsetid(poolsetId_); + } + return data.SerializeToString(value); } @@ -213,6 +238,13 @@ bool PhysicalPool::ParseFromString(const std::string &value) { id_ = data.physicalpoolid(); name_ = data.physicalpoolname(); desc_ = data.desc(); + + if (data.has_poolsetid()) { + poolsetId_ = data.poolsetid(); + } else { + poolsetId_ = UNINTIALIZE_ID; + } + return ret; } @@ -278,6 +310,7 @@ bool ChunkServer::SerializeToString(std::string *value) const { data.set_mountpoint(mountPoint_); data.set_diskcapacity(state_.GetDiskCapacity()); data.set_diskused(state_.GetDiskUsed()); + data.set_version(version_); return data.SerializeToString(value); } @@ -296,6 +329,7 @@ bool ChunkServer::ParseFromString(const std::string &value) { state_.SetDiskState(data.diskstate()); state_.SetDiskCapacity(data.diskcapacity()); state_.SetDiskUsed(data.diskused()); + version_ = data.version(); return ret; } @@ -309,13 +343,17 @@ std::string CopySetInfo::GetCopySetMembersStr() const { } bool CopySetInfo::SetCopySetMembersByJson(const std::string &jsonStr) { - Json::Reader reader; + Json::CharReaderBuilder builder; + std::unique_ptr reader(builder.newCharReader()); Json::Value copysetMemJson; - if (!reader.parse(jsonStr, copysetMemJson)) { + JSONCPP_STRING errormsg; + if (!reader->parse(jsonStr.data(), jsonStr.data() + jsonStr.length(), + ©setMemJson, &errormsg)) { return false; } + peers_.clear(); - for (int i = 0; i < copysetMemJson.size(); i++) { + for (int i = 0; i < static_cast(copysetMemJson.size()); i++) { if (copysetMemJson[i].isInt()) { peers_.insert(copysetMemJson[i].asInt()); } else { @@ -355,16 +393,13 @@ bool CopySetInfo::ParseFromString(const std::string &value) { peers_.insert(data.chunkserverids(i)); } lastScanSec_ = data.has_lastscansec() ? data.lastscansec() : 0; - lastScanConsistent_ = data.has_lastscanconsistent() ? - data.lastscanconsistent() : true; + lastScanConsistent_ = + data.has_lastscanconsistent() ? data.lastscanconsistent() : true; return ret; } -bool SplitPeerId( - const std::string &peerId, - std::string *ip, - uint32_t *port, - uint32_t *idx) { +bool SplitPeerId(const std::string &peerId, std::string *ip, uint32_t *port, + uint32_t *idx) { std::vector items; curve::common::SplitString(peerId, ":", &items); if (3 == items.size()) { @@ -378,6 +413,22 @@ bool SplitPeerId( return false; } +void ChunkServer::ToChunkServerInfo(ChunkServerInfo *csInfo) const { + csInfo->set_chunkserverid(id_); + csInfo->set_disktype(diskType_); + csInfo->set_hostip(internalHostIp_); + csInfo->set_externalip(externalHostIp_); + csInfo->set_port(port_); + csInfo->set_status(status_); + csInfo->set_onlinestate(onlineState_); + csInfo->set_version(version_); + + csInfo->set_diskstatus(state_.GetDiskState()); + csInfo->set_mountpoint(GetMountPoint()); + csInfo->set_diskcapacity(state_.GetDiskCapacity()); + csInfo->set_diskused(state_.GetDiskUsed()); +} + } // namespace topology } // namespace mds } // namespace curve diff --git a/src/mds/topology/topology_item.h b/src/mds/topology/topology_item.h index ebe4bfb6b8..ab16be4220 100644 --- a/src/mds/topology/topology_item.h +++ b/src/mds/topology/topology_item.h @@ -230,18 +230,76 @@ class LogicalPool { bool scanEnable_; }; +class Poolset { + public: + Poolset() : id_(UNINTIALIZE_ID) {} + + Poolset(PoolsetIdType id, + const std::string& name, + const std::string& type, + const std::string& desc) + : id_(id), name_(name), type_(type), desc_(desc) {} + + PoolsetIdType GetId() const { + return id_; + } + + std::string GetName() const { + return name_; + } + + std::string GetType() const { + return type_; + } + + void SetDesc(const std::string &desc) { + desc_ = desc; + } + + std::string GetDesc() const { + return desc_; + } + + void AddPhysicalPool(PhysicalPoolIdType id) { + physicalPoolList_.push_back(id); + } + + void RemovePhysicalPool(PhysicalPoolIdType id) { + physicalPoolList_.remove(id); + } + + const std::list& GetPhysicalPoolList() const { + return physicalPoolList_; + } + + bool SerializeToString(std::string *value) const; + + bool ParseFromString(const std::string &value); + + private: + PoolsetIdType id_; + std::string name_; + std::string type_; + std::string desc_; + + std::list physicalPoolList_; +}; + class PhysicalPool { public: PhysicalPool() : id_(UNINTIALIZE_ID), name_(""), + poolsetId_(UNINTIALIZE_ID), desc_(""), diskCapacity_(0) {} PhysicalPool(PoolIdType id, const std::string &name, + const PoolsetIdType poolsetId, const std::string &desc) : id_(id), name_(name), + poolsetId_(poolsetId), desc_(desc), diskCapacity_(0) {} @@ -252,9 +310,18 @@ class PhysicalPool { return name_; } + PoolsetIdType GetPoolsetId() const { + return poolsetId_; + } + + void SetPoolsetId(PoolsetIdType id) { + poolsetId_ = id; + } + void SetDesc(const std::string &desc) { desc_ = desc; } + std::string GetDesc() const { return desc_; } @@ -283,6 +350,7 @@ class PhysicalPool { private: PoolIdType id_; std::string name_; + PoolsetIdType poolsetId_; std::string desc_; // logical total capacity @@ -498,7 +566,8 @@ class ChunkServer { const std::string &diskPath, ChunkServerStatus status = READWRITE, OnlineState onlineState = OnlineState::OFFLINE, - const std::string &externalHostIp = "") + const std::string &externalHostIp = "", + std::string version = "") : id_(id), token_(token), diskType_(diskType), @@ -510,6 +579,7 @@ class ChunkServer { startUpTime_(0), status_(status), onlineState_(onlineState), + version_(std::move(version)), dirty_(false) {} ChunkServer(const ChunkServer& v) : @@ -524,6 +594,7 @@ class ChunkServer { status_(v.status_), onlineState_(v.onlineState_), state_(v.state_), + version_(v.version_), dirty_(v.dirty_) {} ChunkServer& operator= (const ChunkServer& v) { @@ -543,6 +614,7 @@ class ChunkServer { onlineState_ = v.onlineState_; state_ = v.state_; dirty_ = v.dirty_; + version_ = v.version_; return *this; } @@ -628,6 +700,8 @@ class ChunkServer { dirty_ = dirty; } + void SetVersion(const std::string &version) { version_ = version; } + ::curve::common::RWLock& GetRWLockRef() const { return mutex_; } @@ -636,6 +710,8 @@ class ChunkServer { bool ParseFromString(const std::string &value); + void ToChunkServerInfo(ChunkServerInfo *csinfo) const; + private: ChunkServerIdType id_; std::string token_; @@ -656,6 +732,8 @@ class ChunkServer { ChunkServerState state_; + std::string version_; // chunk server version + /** * @brief to mark whether data is dirty, for writing to storage regularly */ diff --git a/src/mds/topology/topology_service.cpp b/src/mds/topology/topology_service.cpp index fdb4818970..2671b5d8d5 100644 --- a/src/mds/topology/topology_service.cpp +++ b/src/mds/topology/topology_service.cpp @@ -576,6 +576,39 @@ void TopologyServiceImpl::GetPhysicalPool( } } +void TopologyServiceImpl::ListPhysicalPoolsInPoolset( + google::protobuf::RpcController *cntl_base, + const ListPhysicalPoolsInPoolsetRequest *request, + ListPhysicalPoolResponse *response, + google::protobuf::Closure *done) { + brpc::ClosureGuard done_guard(done); + + brpc::Controller* cntl = + static_cast(cntl_base); + + LOG(INFO) << "Received request[log_id=" << cntl->log_id() + << "] from " << cntl->remote_side() + << " to " << cntl->local_side() + << ". [ListPhysicalPoolInPoolsetRequest] " + << request->DebugString(); + + topology_->ListPhysicalPoolsInPoolset(request, response); + + if (kTopoErrCodeSuccess != response->statuscode()) { + LOG(ERROR) << "Send response[log_id=" << cntl->log_id() + << "] from " << cntl->local_side() + << " to " << cntl->remote_side() + << ". [ListPhysicalPoolResponse] " + << response->DebugString(); + } else { + LOG(INFO) << "Send response[log_id=" << cntl->log_id() + << "] from " << cntl->local_side() + << " to " << cntl->remote_side() + << ". [ListPhysicalPoolResponse] " + << response->DebugString(); + } +} + void TopologyServiceImpl::ListPhysicalPool( google::protobuf::RpcController* cntl_base, const ListPhysicalPoolRequest* request, @@ -609,6 +642,115 @@ void TopologyServiceImpl::ListPhysicalPool( } } +void TopologyServiceImpl::CreatePoolset( + google::protobuf::RpcController* cntl_base, + const PoolsetRequest* request, + PoolsetResponse* response, + google::protobuf::Closure* done) { + brpc::ClosureGuard done_guard(done); + + brpc::Controller* cntl = static_cast(cntl_base); + + LOG(INFO) << "Received request[log_id=" << cntl->log_id() << "] from " + << cntl->remote_side() << " to " << cntl->local_side() + << ". [CreatePoolset_PoolsetRequest] " << request->DebugString(); + + topology_->CreatePoolset(request, response); + + if (kTopoErrCodeSuccess != response->statuscode()) { + LOG(ERROR) << "Send response[log_id=" << cntl->log_id() << "] from " + << cntl->local_side() << " to " << cntl->remote_side() + << ". [CreatePoolset_PoolsetResponse] " + << response->DebugString(); + } else { + LOG(INFO) << "Send response[log_id=" << cntl->log_id() << "] from " + << cntl->local_side() << " to " << cntl->remote_side() + << ". [CreatePoolset_PoolsetResponse] " + << response->DebugString(); + } +} + +void TopologyServiceImpl::ListPoolset( + google::protobuf::RpcController* cntl_base, + const ListPoolsetRequest* request, + ListPoolsetResponse* response, + google::protobuf::Closure* done) { + brpc::ClosureGuard done_guard(done); + + brpc::Controller* cntl = static_cast(cntl_base); + + LOG(INFO) << "Received request[log_id=" << cntl->log_id() << "] from " + << cntl->remote_side() << " to " << cntl->local_side() + << ". [ListPoolsetRequest] " << request->DebugString(); + + topology_->ListPoolset(request, response); + + if (kTopoErrCodeSuccess != response->statuscode()) { + LOG(ERROR) << "Send response[log_id=" << cntl->log_id() << "] from " + << cntl->local_side() << " to " << cntl->remote_side() + << ". [ListPoolsetResponse] " << response->DebugString(); + } else { + LOG(INFO) << "Send response[log_id=" << cntl->log_id() << "] from " + << cntl->local_side() << " to " << cntl->remote_side() + << ". [ListPoolsetResponse] " << response->DebugString(); + } +} + +void TopologyServiceImpl::GetPoolset(google::protobuf::RpcController* cntl_base, + const PoolsetRequest* request, + PoolsetResponse* response, + google::protobuf::Closure* done) { + brpc::ClosureGuard done_guard(done); + + brpc::Controller* cntl = static_cast(cntl_base); + + LOG(INFO) << "Received request[log_id=" << cntl->log_id() << "] from " + << cntl->remote_side() << " to " << cntl->local_side() + << ". [GetPoolset_PoolsetRequest] " << request->DebugString(); + + topology_->GetPoolset(request, response); + + if (kTopoErrCodeSuccess != response->statuscode()) { + LOG(ERROR) << "Send response[log_id=" << cntl->log_id() << "] from " + << cntl->local_side() << " to " << cntl->remote_side() + << ". [GetPoolset_PoolsetResponse] " + << response->DebugString(); + } else { + LOG(INFO) << "Send response[log_id=" << cntl->log_id() << "] from " + << cntl->local_side() << " to " << cntl->remote_side() + << ". [GetPoolset_PoolsetResponse] " + << response->DebugString(); + } +} + +void TopologyServiceImpl::DeletePoolset( + google::protobuf::RpcController* cntl_base, + const PoolsetRequest* request, + PoolsetResponse* response, + google::protobuf::Closure* done) { + brpc::ClosureGuard done_guard(done); + + brpc::Controller* cntl = static_cast(cntl_base); + + LOG(INFO) << "Received request[log_id=" << cntl->log_id() << "] from " + << cntl->remote_side() << " to " << cntl->local_side() + << ". [DeletePoolset_PoolsetRequest] " << request->DebugString(); + + topology_->DeletePoolset(request, response); + + if (kTopoErrCodeSuccess != response->statuscode()) { + LOG(ERROR) << "Send response[log_id=" << cntl->log_id() << "] from " + << cntl->local_side() << " to " << cntl->remote_side() + << ". [DeletePoolset_PoolsetResponse] " + << response->DebugString(); + } else { + LOG(INFO) << "Send response[log_id=" << cntl->log_id() << "] from " + << cntl->local_side() << " to " << cntl->remote_side() + << ". [DeletePoolset_PoolsetlResponse] " + << response->DebugString(); + } +} + void TopologyServiceImpl::CreateLogicalPool( google::protobuf::RpcController* cntl_base, const CreateLogicalPoolRequest* request, @@ -1015,21 +1157,3 @@ void TopologyServiceImpl::ListUnAvailCopySets( } // namespace topology } // namespace mds } // namespace curve - - - - - - - - - - - - - - - - - - diff --git a/src/mds/topology/topology_service.h b/src/mds/topology/topology_service.h index 6ec7e0fe90..a3eeaed257 100644 --- a/src/mds/topology/topology_service.h +++ b/src/mds/topology/topology_service.h @@ -135,6 +135,32 @@ class TopologyServiceImpl : public TopologyService { ListPhysicalPoolResponse* response, google::protobuf::Closure* done); + virtual void ListPhysicalPoolsInPoolset( + google::protobuf::RpcController* cntl_base, + const ListPhysicalPoolsInPoolsetRequest* request, + ListPhysicalPoolResponse* response, + google::protobuf::Closure* done); + + virtual void CreatePoolset(google::protobuf::RpcController* cntl_base, + const PoolsetRequest* request, + PoolsetResponse* response, + google::protobuf::Closure* done); + + virtual void ListPoolset(google::protobuf::RpcController* cntl_base, + const ListPoolsetRequest* request, + ListPoolsetResponse* response, + google::protobuf::Closure* done); + + virtual void DeletePoolset(google::protobuf::RpcController* cntl_base, + const PoolsetRequest* request, + PoolsetResponse* response, + google::protobuf::Closure* done); + + virtual void GetPoolset(google::protobuf::RpcController* cntl_base, + const PoolsetRequest* request, + PoolsetResponse* response, + google::protobuf::Closure* done); + virtual void CreateLogicalPool(google::protobuf::RpcController* cntl_base, const CreateLogicalPoolRequest* request, CreateLogicalPoolResponse* response, diff --git a/src/mds/topology/topology_service_manager.cpp b/src/mds/topology/topology_service_manager.cpp index 1abd39f85e..c96edc446c 100644 --- a/src/mds/topology/topology_service_manager.cpp +++ b/src/mds/topology/topology_service_manager.cpp @@ -32,6 +32,7 @@ #include //NOLINT #include //NOLINT #include +#include #include "brpc/channel.h" #include "brpc/controller.h" @@ -280,20 +281,7 @@ void TopologyServiceManager::ListChunkServer( for (ChunkServerIdType id : chunkserverList) { ChunkServer cs; if (topology_->GetChunkServer(id, &cs)) { - ChunkServerInfo *csInfo = response->add_chunkserverinfos(); - csInfo->set_chunkserverid(cs.GetId()); - csInfo->set_disktype(cs.GetDiskType()); - csInfo->set_hostip(cs.GetHostIp()); - csInfo->set_externalip(cs.GetExternalHostIp()); - csInfo->set_port(cs.GetPort()); - csInfo->set_status(cs.GetStatus()); - csInfo->set_onlinestate(cs.GetOnlineState()); - - ChunkServerState st = cs.GetChunkServerState(); - csInfo->set_diskstatus(st.GetDiskState()); - csInfo->set_mountpoint(cs.GetMountPoint()); - csInfo->set_diskcapacity(st.GetDiskCapacity()); - csInfo->set_diskused(st.GetDiskUsed()); + cs.ToChunkServerInfo(response->add_chunkserverinfos()); } else { LOG(ERROR) << "Topology has counter an internal error: " << "[func:] ListChunkServer, " @@ -347,6 +335,7 @@ void TopologyServiceManager::GetChunkServer( void TopologyServiceManager::GetChunkServerInCluster( const GetChunkServerInClusterRequest *request, GetChunkServerInClusterResponse *response) { + (void)request; response->set_statuscode(kTopoErrCodeSuccess); auto chunkserverIds = topology_->GetChunkServerInCluster(); for (const auto id : chunkserverIds) { @@ -355,20 +344,7 @@ void TopologyServiceManager::GetChunkServerInCluster( response->set_statuscode(kTopoErrCodeChunkServerNotFound); return; } - auto *csInfo = response->add_chunkserverinfos(); - csInfo->set_chunkserverid(cs.GetId()); - csInfo->set_disktype(cs.GetDiskType()); - csInfo->set_hostip(cs.GetHostIp()); - csInfo->set_externalip(cs.GetExternalHostIp()); - csInfo->set_port(cs.GetPort()); - csInfo->set_status(cs.GetStatus()); - csInfo->set_onlinestate(cs.GetOnlineState()); - - ChunkServerState st = cs.GetChunkServerState(); - csInfo->set_diskstatus(st.GetDiskState()); - csInfo->set_mountpoint(cs.GetMountPoint()); - csInfo->set_diskcapacity(st.GetDiskCapacity()); - csInfo->set_diskused(st.GetDiskUsed()); + cs.ToChunkServerInfo(response->add_chunkserverinfos()); } } @@ -780,11 +756,73 @@ void TopologyServiceManager::ListPoolZone(const ListPoolZoneRequest* request, } } +void TopologyServiceManager::CreatePoolset(const PoolsetRequest* request, + PoolsetResponse* response) { + if ((request->has_poolsetname()) && (request->has_type())) { + PoolsetIdType pid = topology_->FindPoolset(request->poolsetname()); + if (pid != static_cast(UNINTIALIZE_ID)) { + LOG(WARNING) << "Poolset name conflict with existing poolset, id: " + << pid; + response->set_statuscode(kTopoErrCodeNameDuplicated); + return; + } + + pid = topology_->AllocatePoolsetId(); + if (pid == static_cast(UNINTIALIZE_ID)) { + response->set_statuscode(kTopoErrCodeAllocateIdFail); + return; + } + + Poolset poolset(pid, request->poolsetname(), request->type(), + request->desc()); + int errcode = topology_->AddPoolset(poolset); + if (kTopoErrCodeSuccess == errcode) { + response->set_statuscode(kTopoErrCodeSuccess); + PoolsetInfo *info = response->mutable_poolsetinfo(); + info->set_poolsetid(pid); + info->set_poolsetname(request->poolsetname()); + info->set_type(request->type()); + info->set_desc(request->desc()); + } else { + response->set_statuscode(errcode); + } + } else { + response->set_statuscode(kTopoErrCodeInvalidParam); + } +} + +void TopologyServiceManager::DeletePoolset(const PoolsetRequest* request, + PoolsetResponse* response) { + Poolset poolset; + if (request->has_poolsetid()) { + if (!topology_->GetPoolset(request->poolsetid(), &poolset)) { + response->set_statuscode(kTopoErrCodePoolsetNotFound); + return; + } + } else if (request->has_poolsetname()) { + if (!topology_->GetPoolset(request->poolsetname(), &poolset)) { + response->set_statuscode(kTopoErrCodePoolsetNotFound); + return; + } + } else { + response->set_statuscode(kTopoErrCodeInvalidParam); + return; + } + + int errcode = topology_->RemovePoolset(poolset.GetId()); + response->set_statuscode(errcode); +} + void TopologyServiceManager::CreatePhysicalPool( const PhysicalPoolRequest *request, PhysicalPoolResponse *response) { - if ((request->has_physicalpoolname()) && - (request->has_desc())) { + if ((request->has_physicalpoolname()) && (request->has_desc()) && + (request->has_poolsetname())) { + Poolset poolset; + if (!topology_->GetPoolset(request->poolsetname(), &poolset)) { + response->set_statuscode(kTopoErrCodePoolsetNotFound); + return; + } PoolIdType pid = topology_->AllocatePhysicalPoolId(); if (pid == static_cast(UNINTIALIZE_ID)) { @@ -793,6 +831,7 @@ void TopologyServiceManager::CreatePhysicalPool( } PhysicalPool pool(pid, request->physicalpoolname(), + poolset.GetId(), request->desc()); int errcode = topology_->AddPhysicalPool(pool); @@ -802,6 +841,8 @@ void TopologyServiceManager::CreatePhysicalPool( info->set_physicalpoolid(pid); info->set_physicalpoolname(request->physicalpoolname()); info->set_desc(request->desc()); + info->set_poolsetid(pool.GetPoolsetId()); + info->set_poolsetname(poolset.GetName()); response->set_allocated_physicalpoolinfo(info); } else { response->set_statuscode(errcode); @@ -820,8 +861,9 @@ void TopologyServiceManager::DeletePhysicalPool( response->set_statuscode(kTopoErrCodePhysicalPoolNotFound); return; } - } else if (request->has_physicalpoolname()) { - if (!topology_->GetPhysicalPool(request->physicalpoolname(), &pool)) { + } else if (request->has_physicalpoolname() && request->has_poolsetname()) { + if (!topology_->GetPhysicalPool(request->physicalpoolname(), + request->poolsetname(), &pool)) { response->set_statuscode(kTopoErrCodePhysicalPoolNotFound); return; } @@ -856,13 +898,71 @@ void TopologyServiceManager::GetPhysicalPool(const PhysicalPoolRequest *request, PhysicalPoolInfo *info = new PhysicalPoolInfo(); info->set_physicalpoolid(pool.GetId()); info->set_physicalpoolname(pool.GetName()); + + assert(pool.GetPoolsetId() != UNINTIALIZE_ID); + Poolset poolset; + if (!topology_->GetPoolset(pool.GetPoolsetId(), &poolset)) { + response->set_statuscode(kTopoErrCodeInternalError); + return; + } + info->set_poolsetid(pool.GetPoolsetId()); + info->set_poolsetname(poolset.GetName()); info->set_desc(pool.GetDesc()); response->set_allocated_physicalpoolinfo(info); } +void TopologyServiceManager::GetPoolset(const PoolsetRequest *request, + PoolsetResponse *response) { + Poolset poolset; + if (request->has_poolsetid()) { + if (!topology_->GetPoolset(request->poolsetid(), &poolset)) { + response->set_statuscode(kTopoErrCodePoolsetNotFound); + return; + } + } else if (request->has_poolsetname()) { + if (!topology_->GetPoolset(request->poolsetname(), &poolset)) { + response->set_statuscode(kTopoErrCodePoolsetNotFound); + return; + } + } else { + response->set_statuscode(kTopoErrCodeInvalidParam); + return; + } + + response->set_statuscode(kTopoErrCodeSuccess); + PoolsetInfo *info = response->mutable_poolsetinfo(); + info->set_poolsetid(poolset.GetId()); + info->set_poolsetname(poolset.GetName()); + info->set_type(poolset.GetType()); + info->set_desc(poolset.GetDesc()); +} + +void TopologyServiceManager::ListPoolset(const ListPoolsetRequest* /*request*/, + ListPoolsetResponse* response) { + response->set_statuscode(kTopoErrCodeSuccess); + auto poolsetList = topology_->GetPoolsetInCluster(); + for (PoolsetIdType id : poolsetList) { + Poolset poolset; + if (topology_->GetPoolset(id, &poolset)) { + PoolsetInfo* info = response->add_poolsetinfos(); + info->set_poolsetid(poolset.GetId()); + info->set_poolsetname(poolset.GetName()); + info->set_type(poolset.GetType()); + info->set_desc(poolset.GetDesc()); + } else { + LOG(ERROR) << "Topology has counter an internal error: " + << "[func:] ListPoolset, " + << "[msg:] Poolset not found, id = " << id; + response->set_statuscode(kTopoErrCodeInternalError); + return; + } + } +} + void TopologyServiceManager::ListPhysicalPool( const ListPhysicalPoolRequest *request, ListPhysicalPoolResponse *response) { + (void)request; response->set_statuscode(kTopoErrCodeSuccess); auto poolList = topology_->GetPhysicalPoolInCluster(); for (PoolIdType id : poolList) { @@ -871,6 +971,19 @@ void TopologyServiceManager::ListPhysicalPool( PhysicalPoolInfo *info = response->add_physicalpoolinfos(); info->set_physicalpoolid(pool.GetId()); info->set_physicalpoolname(pool.GetName()); + assert(pool.GetPoolsetId() != UNINTIALIZE_ID); + Poolset poolset; + if (!topology_->GetPoolset(pool.GetPoolsetId(), &poolset)) { + LOG(WARNING) + << "Failed to get poolset, id: " << pool.GetPoolsetId(); + response->clear_physicalpoolinfos(); + response->set_statuscode(kTopoErrCodeInternalError); + return; + } + info->set_poolsetid(pool.GetPoolsetId()); + info->set_poolsetname(poolset.GetName()); + + info->set_poolsetid(pool.GetPoolsetId()); info->set_desc(pool.GetDesc()); } else { LOG(ERROR) << "Topology has counter an internal error: " @@ -883,6 +996,44 @@ void TopologyServiceManager::ListPhysicalPool( } } +void TopologyServiceManager::ListPhysicalPoolsInPoolset( + const ListPhysicalPoolsInPoolsetRequest* request, + ListPhysicalPoolResponse* response) { + response->set_statuscode(kTopoErrCodeSuccess); + int sz = request->poolsetid_size(); + if (sz <= 0) { + response->set_statuscode(kTopoErrCodeInvalidParam); + return; + } + for (int i = 0; i < sz; ++i) { + Poolset poolset; + PoolsetIdType psId = request->poolsetid(i); + if (!topology_->GetPoolset(psId, &poolset)) { + response->set_statuscode(kTopoErrCodePoolsetNotFound); + return; + } + + const auto& pidList = poolset.GetPhysicalPoolList(); + for (PhysicalPoolIdType id : pidList) { + PhysicalPool pool; + if (topology_->GetPhysicalPool(id, &pool)) { + PhysicalPoolInfo* info = response->add_physicalpoolinfos(); + info->set_physicalpoolid(pool.GetId()); + info->set_physicalpoolname(pool.GetName()); + info->set_desc(pool.GetDesc()); + info->set_poolsetid(pool.GetPoolsetId()); + info->set_poolsetname(poolset.GetName()); + } else { + LOG(ERROR) << "Topology has counter an internal error: " + << "[func:] ListPhysicalPoolsInPoolset, " + << "[msg:] physicalpool not found, id = " << id; + response->set_statuscode(kTopoErrCodeInternalError); + return; + } + } + } +} + int TopologyServiceManager::CreateCopysetForLogicalPool( const LogicalPool &lPool, uint32_t *scatterWidth, @@ -1570,6 +1721,7 @@ void TopologyServiceManager::GetCopyset(const GetCopysetRequest* request, void TopologyServiceManager::GetClusterInfo( const GetClusterInfoRequest* request, GetClusterInfoResponse* response) { + (void)request; ClusterInformation info; if (topology_->GetClusterInfo(&info)) { response->set_statuscode(kTopoErrCodeSuccess); @@ -1599,6 +1751,7 @@ void TopologyServiceManager::SetCopysetsAvailFlag( void TopologyServiceManager::ListUnAvailCopySets( const ListUnAvailCopySetsRequest* request, ListUnAvailCopySetsResponse* response) { + (void)request; std::vector copysets = topology_->GetCopySetsInCluster(); for (const CopySetKey& copyset : copysets) { diff --git a/src/mds/topology/topology_service_manager.h b/src/mds/topology/topology_service_manager.h index e67e3848c5..2de2ab5c1d 100644 --- a/src/mds/topology/topology_service_manager.h +++ b/src/mds/topology/topology_service_manager.h @@ -124,6 +124,22 @@ class TopologyServiceManager { virtual void ListPhysicalPool(const ListPhysicalPoolRequest *request, ListPhysicalPoolResponse *response); + virtual void ListPhysicalPoolsInPoolset( + const ListPhysicalPoolsInPoolsetRequest* request, + ListPhysicalPoolResponse* response); + + virtual void CreatePoolset(const PoolsetRequest *request, + PoolsetResponse *response); + + virtual void GetPoolset(const PoolsetRequest *request, + PoolsetResponse *response); + + virtual void ListPoolset(const ListPoolsetRequest *request, + ListPoolsetResponse *response); + + virtual void DeletePoolset(const PoolsetRequest *request, + PoolsetResponse *response); + virtual void CreateLogicalPool(const CreateLogicalPoolRequest *request, CreateLogicalPoolResponse *response); diff --git a/src/mds/topology/topology_storage_codec.cpp b/src/mds/topology/topology_storage_codec.cpp index 2b0636eda3..eae5a0abc0 100644 --- a/src/mds/topology/topology_storage_codec.cpp +++ b/src/mds/topology/topology_storage_codec.cpp @@ -26,11 +26,29 @@ #include "src/common/namespace_define.h" #include "src/common/encode.h" - namespace curve { namespace mds { namespace topology { +std::string TopologyStorageCodec::EncodePoolsetKey(PoolsetIdType id) { + std::string key = POOLSETKEYPREFIX; + size_t prefixLen = key.size(); + key.resize(prefixLen + sizeof(uint64_t)); + ::curve::common::EncodeBigEndian(&(key[prefixLen]), id); + return key; +} + +bool TopologyStorageCodec::EncodePoolsetData( + const Poolset &data, std::string *value) { + return data.SerializeToString(value); +} + +bool TopologyStorageCodec::DecodePoolsetData( + const std::string &value, Poolset *data) { + return data->ParseFromString(value); +} + + std::string TopologyStorageCodec::EncodeLogicalPoolKey( LogicalPoolIdType id) { std::string key = LOGICALPOOLKEYPREFIX; @@ -160,4 +178,3 @@ bool TopologyStorageCodec::DecodeCluserInfoData(const std::string &value, } // namespace topology } // namespace mds } // namespace curve - diff --git a/src/mds/topology/topology_storage_codec.h b/src/mds/topology/topology_storage_codec.h index 0ce7e1ad84..ea28b5e895 100644 --- a/src/mds/topology/topology_storage_codec.h +++ b/src/mds/topology/topology_storage_codec.h @@ -41,6 +41,8 @@ using ::curve::common::CHUNKSERVERKEYEND; using ::curve::common::CLUSTERINFOKEY; using ::curve::common::COPYSETKEYPREFIX; using ::curve::common::COPYSETKEYEND; +using ::curve::common::POOLSETKEYPREFIX; +using ::curve::common::POOLSETKEYEND; namespace curve { namespace mds { @@ -52,6 +54,11 @@ class TopologyStorageCodec { // Encode__Key: attach item id to item prefix // Encode__Data: convert data structure to a string // Decode__Data: convert a string to data structure + + std::string EncodePoolsetKey(PoolsetIdType id); + bool EncodePoolsetData(const Poolset &data, std::string *value); + bool DecodePoolsetData(const std::string &value, Poolset *data); + std::string EncodeLogicalPoolKey( LogicalPoolIdType id); bool EncodeLogicalPoolData( diff --git a/src/mds/topology/topology_storge.h b/src/mds/topology/topology_storge.h index 01225ebf3c..05c168fecd 100644 --- a/src/mds/topology/topology_storge.h +++ b/src/mds/topology/topology_storge.h @@ -42,6 +42,9 @@ class TopologyStorage { TopologyStorage() {} virtual ~TopologyStorage() {} + virtual bool LoadPoolset( + std::unordered_map *poolsetMap, + PoolsetIdType *maxPoolsetId) = 0; virtual bool LoadLogicalPool( std::unordered_map *logicalPoolMap, PoolIdType *maxLogicalPoolId) = 0; @@ -61,6 +64,7 @@ class TopologyStorage { std::map *copySetMap, std::map *copySetIdMaxMap) = 0; + virtual bool StoragePoolset(const Poolset &data) = 0; virtual bool StorageLogicalPool(const LogicalPool &data) = 0; virtual bool StoragePhysicalPool(const PhysicalPool &data) = 0; virtual bool StorageZone(const Zone &data) = 0; @@ -68,6 +72,7 @@ class TopologyStorage { virtual bool StorageChunkServer(const ChunkServer &data) = 0; virtual bool StorageCopySet(const CopySetInfo &data) = 0; + virtual bool DeletePoolset(PoolsetIdType id) = 0; virtual bool DeleteLogicalPool(PoolIdType id) = 0; virtual bool DeletePhysicalPool(PoolIdType id) = 0; virtual bool DeleteZone(ZoneIdType id) = 0; diff --git a/src/mds/topology/topology_storge_etcd.cpp b/src/mds/topology/topology_storge_etcd.cpp index 51f15e4929..5b26f13c31 100644 --- a/src/mds/topology/topology_storge_etcd.cpp +++ b/src/mds/topology/topology_storge_etcd.cpp @@ -31,6 +31,42 @@ namespace curve { namespace mds { namespace topology { +bool TopologyStorageEtcd::LoadPoolset( + std::unordered_map* poolsetMap, + PoolsetIdType* maxPoolsetId) { + std::vector out; + poolsetMap->clear(); + *maxPoolsetId = 0; + int errCode = client_->List(POOLSETKEYPREFIX, POOLSETKEYEND, &out); + if (errCode == EtcdErrCode::EtcdKeyNotExist) { + return true; + } + if (errCode != EtcdErrCode::EtcdOK) { + LOG(ERROR) << "etcd list err:" << errCode; + return false; + } + for (size_t i = 0; i < out.size(); i++) { + Poolset data; + errCode = codec_->DecodePoolsetData(out[i], &data); + if (!errCode) { + LOG(ERROR) << "DecodePoolsetData err"; + return false; + } + PoolsetIdType id = data.GetId(); + auto ret = poolsetMap->emplace(id, std::move(data)); + if (!ret.second) { + LOG(ERROR) << "LoadPoolset: " + << "PoolsetId duplicated, PoolsetId = " + << id; + return false; + } + if (*maxPoolsetId < id) { + *maxPoolsetId = id; + } + } + return true; +} + bool TopologyStorageEtcd::LoadLogicalPool( std::unordered_map *logicalPoolMap, PoolIdType *maxLogicalPoolId) { @@ -45,7 +81,7 @@ bool TopologyStorageEtcd::LoadLogicalPool( LOG(ERROR) << "etcd list err:" << errCode; return false; } - for (int i = 0; i < out.size(); i++) { + for (size_t i = 0; i < out.size(); i++) { LogicalPool data; errCode = codec_->DecodeLogicalPoolData(out[i], &data); if (!errCode) { @@ -82,7 +118,7 @@ bool TopologyStorageEtcd::LoadPhysicalPool( LOG(ERROR) << "etcd list err:" << errCode; return false; } - for (int i = 0; i < out.size(); i++) { + for (size_t i = 0; i < out.size(); i++) { PhysicalPool data; errCode = codec_->DecodePhysicalPoolData(out[i], &data); if (!errCode) { @@ -118,7 +154,7 @@ bool TopologyStorageEtcd::LoadZone( LOG(ERROR) << "etcd list err:" << errCode; return false; } - for (int i = 0; i < out.size(); i++) { + for (size_t i = 0; i < out.size(); i++) { Zone data; errCode = codec_->DecodeZoneData(out[i], &data); if (!errCode) { @@ -154,7 +190,7 @@ bool TopologyStorageEtcd::LoadServer( LOG(ERROR) << "etcd list err:" << errCode; return false; } - for (int i = 0; i < out.size(); i++) { + for (size_t i = 0; i < out.size(); i++) { Server data; errCode = codec_->DecodeServerData(out[i], &data); if (!errCode) { @@ -190,7 +226,7 @@ bool TopologyStorageEtcd::LoadChunkServer( LOG(ERROR) << "etcd list err:" << errCode; return false; } - for (int i = 0; i < out.size(); i++) { + for (size_t i = 0; i < out.size(); i++) { ChunkServer data; errCode = codec_->DecodeChunkServerData(out[i], &data); if (!errCode) { @@ -228,7 +264,7 @@ bool TopologyStorageEtcd::LoadCopySet( LOG(ERROR) << "etcd list err:" << errCode; return false; } - for (int i = 0; i < out.size(); i++) { + for (size_t i = 0; i < out.size(); i++) { CopySetInfo data; errCode = codec_->DecodeCopySetData(out[i], &data); if (!errCode) { @@ -254,6 +290,25 @@ bool TopologyStorageEtcd::LoadCopySet( return true; } +bool TopologyStorageEtcd::StoragePoolset(const Poolset &data) { + std::string key = codec_->EncodePoolsetKey(data.GetId()); + std::string value; + bool ret = codec_->EncodePoolsetData(data, &value); + if (!ret) { + LOG(ERROR) << "EncodePoolsetData err" + << ", poolsetId = " << data.GetId(); + return false; + } + int errCode = client_->Put(key, value); + if (errCode != EtcdErrCode::EtcdOK) { + LOG(ERROR) << "Put Poolset into etcd err" + << ", errcode = " << errCode + << ", poolsetId = " << data.GetId(); + return false; + } + return true; +} + bool TopologyStorageEtcd::StorageLogicalPool(const LogicalPool &data) { std::string key = codec_->EncodeLogicalPoolKey(data.GetId()); std::string value; @@ -371,6 +426,18 @@ bool TopologyStorageEtcd::StorageCopySet(const CopySetInfo &data) { return true; } +bool TopologyStorageEtcd::DeletePoolset(PoolsetIdType id) { + std::string key = codec_->EncodePoolsetKey(id); + int errCode = client_->Delete(key); + if (errCode != EtcdErrCode::EtcdOK) { + LOG(ERROR) << "delete Poolset from etcd err" + << ", errcode = " << errCode + << ", poolsetId = " << id; + return false; + } + return true; +} + bool TopologyStorageEtcd::DeleteLogicalPool(PoolIdType id) { std::string key = codec_->EncodeLogicalPoolKey(id); int errCode = client_->Delete(key); @@ -510,4 +577,3 @@ bool TopologyStorageEtcd::StorageClusterInfo(const ClusterInformation &info) { } // namespace topology } // namespace mds } // namespace curve - diff --git a/src/mds/topology/topology_storge_etcd.h b/src/mds/topology/topology_storge_etcd.h index 9f3ab4e56a..ca6d8f9739 100644 --- a/src/mds/topology/topology_storge_etcd.h +++ b/src/mds/topology/topology_storge_etcd.h @@ -47,7 +47,9 @@ class TopologyStorageEtcd : public TopologyStorage { std::shared_ptr codec) : client_(client), codec_(codec) {} - + bool LoadPoolset( + std::unordered_map *poolsetMap, + PoolsetIdType *maxPoolsetId) override; bool LoadLogicalPool( std::unordered_map *logicalPoolMap, PoolIdType *maxLogicalPoolId) override; @@ -67,6 +69,7 @@ class TopologyStorageEtcd : public TopologyStorage { std::map *copySetMap, std::map *copySetIdMaxMap) override; + bool StoragePoolset(const Poolset &data) override; bool StorageLogicalPool(const LogicalPool &data) override; bool StoragePhysicalPool(const PhysicalPool &data) override; bool StorageZone(const Zone &data) override; @@ -74,6 +77,7 @@ class TopologyStorageEtcd : public TopologyStorage { bool StorageChunkServer(const ChunkServer &data) override; bool StorageCopySet(const CopySetInfo &data) override; + bool DeletePoolset(PoolsetIdType id) override; bool DeleteLogicalPool(PoolIdType id) override; bool DeletePhysicalPool(PoolIdType id) override; bool DeleteZone(ZoneIdType id) override; diff --git a/src/snapshotcloneserver/clone/clone_closure.h b/src/snapshotcloneserver/clone/clone_closure.h index 68a9199c4c..65847d109e 100644 --- a/src/snapshotcloneserver/clone/clone_closure.h +++ b/src/snapshotcloneserver/clone/clone_closure.h @@ -34,60 +34,38 @@ #include "src/common/concurrent/name_lock.h" #include "src/common/concurrent/dlock.h" -using ::google::protobuf::RpcController; -using ::google::protobuf::Closure; -using ::curve::common::NameLockGuard; using ::curve::common::DLock; +using ::curve::common::NameLockGuard; +using ::google::protobuf::Closure; +using ::google::protobuf::RpcController; namespace curve { namespace snapshotcloneserver { class CloneClosure : public Closure { public: - CloneClosure(brpc::Controller* bcntl = nullptr, - Closure* done = nullptr) - : bcntl_(bcntl), - done_(done), - requestId_(""), - taskId_(""), - dlock_(nullptr), - retCode_(kErrCodeInternalError) {} - - brpc::Controller * GetController() { - return bcntl_; - } + explicit CloneClosure(brpc::Controller *bcntl = nullptr, + Closure *done = nullptr) + : dlock_(nullptr), bcntl_(bcntl), done_(done), requestId_(""), + taskId_(""), retCode_(kErrCodeInternalError) {} - void SetRequestId(const UUID &requestId) { - requestId_ = requestId; - } + brpc::Controller *GetController() { return bcntl_; } - void SetTaskId(const TaskIdType &taskId) { - taskId_ = taskId; - } + void SetRequestId(const UUID &requestId) { requestId_ = requestId; } - TaskIdType GetTaskId() { - return taskId_; - } + void SetTaskId(const TaskIdType &taskId) { taskId_ = taskId; } - void SetErrCode(int retCode) { - retCode_ = retCode; - } + TaskIdType GetTaskId() { return taskId_; } - int GetErrCode() { - return retCode_; - } + void SetErrCode(int retCode) { retCode_ = retCode; } - void SetDestFileLock(std::shared_ptr lock) { - lock_ = lock; - } + int GetErrCode() { return retCode_; } - void SetDLock(std::shared_ptr lock) { - dlock_ = lock; - } + void SetDestFileLock(std::shared_ptr lock) { lock_ = lock; } - std::shared_ptr GetDLock() { - return dlock_; - } + void SetDLock(std::shared_ptr lock) { dlock_ = lock; } + + std::shared_ptr GetDLock() { return dlock_; } void SetDestFileName(const std::string &destFileName) { destFileName_ = destFileName; @@ -100,9 +78,8 @@ class CloneClosure : public Closure { bcntl_->http_response().set_status_code( brpc::HTTP_STATUS_INTERNAL_SERVER_ERROR); butil::IOBufBuilder os; - std::string msg = BuildErrorMessage(retCode_, - requestId_, - taskId_); + std::string msg = + BuildErrorMessage(retCode_, requestId_, taskId_); os << msg; os.move_to(bcntl_->response_attachment()); } else { @@ -137,7 +114,7 @@ class CloneClosure : public Closure { std::shared_ptr dlock_; std::string destFileName_; brpc::Controller *bcntl_; - Closure* done_; + Closure *done_; UUID requestId_; TaskIdType taskId_; int retCode_; diff --git a/src/snapshotcloneserver/clone/clone_core.cpp b/src/snapshotcloneserver/clone/clone_core.cpp index e2843f6cd9..2974ed06c8 100644 --- a/src/snapshotcloneserver/clone/clone_core.cpp +++ b/src/snapshotcloneserver/clone/clone_core.cpp @@ -56,6 +56,7 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, const std::string &destination, bool lazyFlag, CloneTaskType taskType, + std::string poolset, CloneInfo *cloneInfo) { // 查询数据库中是否有任务正在执行 std::vector cloneInfoList; @@ -67,6 +68,7 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, << ", source = " << source << ", user = " << user << ", destination = " << destination + << ", poolset = " << poolset << ", Exist CloneInfo : " << info; // is clone if (taskType == CloneTaskType::kClone) { @@ -144,7 +146,8 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, << "but task not match! " << "source = " << source << ", user = " << user - << ", destination = " << destination; + << ", destination = " << destination + << ", poolset = " << poolset; return kErrCodeFileExist; } } else { @@ -152,9 +155,15 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, LOG(ERROR) << "Clone dest file must not exist" << ", source = " << source << ", user = " << user - << ", destination = " << destination; + << ", destination = " << destination + << ", poolset = " << poolset; return kErrCodeFileExist; } + } else if (CloneTaskType::kRecover == taskType) { + // recover任务,卷的poolset信息不变 + poolset = destFInfo.poolset; + } else { + assert(false); } break; case -LIBCURVE_ERROR::NOTEXIST: @@ -174,7 +183,7 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, return kErrCodeInternalError; } - //是否为快照 + // 是否为快照 SnapshotInfo snapInfo; CloneFileType fileType; @@ -198,6 +207,7 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, << ", source = " << source << ", user = " << user << ", destination = " << destination + << ", poolset = " << poolset << ", snapshot.user = " << snapInfo.GetUser(); return kErrCodeInvalidUser; } @@ -217,7 +227,8 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, LOG(ERROR) << "Clone source file not exist" << ", source = " << source << ", user = " << user - << ", destination = " << destination; + << ", destination = " << destination + << ", poolset = " << poolset; return kErrCodeFileNotExist; default: LOG(ERROR) << "GetFileInfo encounter an error" @@ -239,7 +250,7 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, UUID uuid = UUIDGenerator().GenerateUUID(); CloneInfo info(uuid, user, taskType, - source, destination, fileType, lazyFlag); + source, destination, poolset, fileType, lazyFlag); if (CloneTaskType::kClone == taskType) { info.SetStatus(CloneStatus::cloning); } else { @@ -255,7 +266,8 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, << ", taskId = " << uuid << ", user = " << user << ", source = " << source - << ", destination = " << destination; + << ", destination = " << destination + << ", poolset = " << poolset; if (CloneFileType::kSnapshot == fileType) { snapshotRef_->DecrementSnapshotRef(source); } @@ -290,6 +302,7 @@ int CloneCoreImpl::FlattenPre( const std::string &user, const TaskIdType &taskId, CloneInfo *cloneInfo) { + (void)user; int ret = metaStore_->GetCloneInfo(taskId, cloneInfo); if (ret < 0) { return kErrCodeFileNotExist; @@ -457,6 +470,16 @@ int CloneCoreImpl::BuildFileInfoFromSnapshot( newFileInfo->length = snapInfo.GetFileLength(); newFileInfo->stripeUnit = snapInfo.GetStripeUnit(); newFileInfo->stripeCount = snapInfo.GetStripeCount(); + + if (task->GetCloneInfo().GetTaskType() == CloneTaskType::kRecover && + task->GetCloneInfo().GetPoolset().empty()) { + LOG(ERROR) << "Recover task's poolset should not be empty"; + return kErrCodeInternalError; + } + newFileInfo->poolset = !task->GetCloneInfo().GetPoolset().empty() + ? task->GetCloneInfo().GetPoolset() + : snapInfo.GetPoolset(); + if (IsRecover(task)) { FInfo fInfo; std::string destination = task->GetCloneInfo().GetDest(); @@ -562,6 +585,15 @@ int CloneCoreImpl::BuildFileInfoFromFile( newFileInfo->stripeUnit = fInfo.stripeUnit; newFileInfo->stripeCount = fInfo.stripeCount; + if (task->GetCloneInfo().GetTaskType() == CloneTaskType::kRecover && + task->GetCloneInfo().GetPoolset().empty()) { + LOG(ERROR) << "Recover task's poolset should not be empty"; + return kErrCodeInternalError; + } + newFileInfo->poolset = !task->GetCloneInfo().GetPoolset().empty() + ? task->GetCloneInfo().GetPoolset() + : fInfo.poolset; + uint64_t fileLength = fInfo.length; uint64_t segmentSize = fInfo.segmentsize; uint64_t chunkSize = fInfo.chunksize; @@ -622,6 +654,7 @@ int CloneCoreImpl::CreateCloneFile( uint32_t chunkSize = fInfo.chunksize; uint64_t stripeUnit = fInfo.stripeUnit; uint64_t stripeCount = fInfo.stripeCount; + const auto& poolset = fInfo.poolset; std::string source = ""; // 只有从文件克隆才带clone source @@ -632,7 +665,7 @@ int CloneCoreImpl::CreateCloneFile( FInfo fInfoOut; int ret = client_->CreateCloneFile(source, fileName, mdsRootUser_, fileLength, seqNum, chunkSize, - stripeUnit, stripeCount, &fInfoOut); + stripeUnit, stripeCount, poolset, &fInfoOut); if (ret == LIBCURVE_ERROR::OK) { // nothing } else if (ret == -LIBCURVE_ERROR::EXISTS) { @@ -868,6 +901,8 @@ int CloneCoreImpl::CompleteCloneMeta( std::shared_ptr task, const FInfo &fInfo, const CloneSegmentMap &segInfos) { + (void)fInfo; + (void)segInfos; std::string origin = cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); std::string user = task->GetCloneInfo().GetUser(); @@ -1077,6 +1112,7 @@ int CloneCoreImpl::ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd( int CloneCoreImpl::ChangeOwner( std::shared_ptr task, const FInfo &fInfo) { + (void)fInfo; std::string user = task->GetCloneInfo().GetUser(); std::string origin = cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); @@ -1171,6 +1207,8 @@ int CloneCoreImpl::CompleteCloneFile( std::shared_ptr task, const FInfo &fInfo, const CloneSegmentMap &segInfos) { + (void)fInfo; + (void)segInfos; std::string fileName; if (IsLazy(task)) { fileName = task->GetCloneInfo().GetDest(); @@ -1542,7 +1580,7 @@ void CloneCoreImpl::HandleCleanCloneOrRecoverTask( if (CloneStatus::errorCleaning == task->GetCloneInfo().GetStatus()) { // 错误情况下可能未清除镜像被克隆标志 if (IsFile(task)) { - //重新发送 + // 重新发送 std::string source = task->GetCloneInfo().GetSrc(); NameLockGuard lockGuard(cloneRef_->GetLock(), source); if (cloneRef_->GetRef(source) == 0) { @@ -1681,4 +1719,3 @@ int CloneCoreImpl::HandleDeleteCloneInfo(const CloneInfo &cloneInfo) { } // namespace snapshotcloneserver } // namespace curve - diff --git a/src/snapshotcloneserver/clone/clone_core.h b/src/snapshotcloneserver/clone/clone_core.h index 67332b5b89..19c1c20c9d 100644 --- a/src/snapshotcloneserver/clone/clone_core.h +++ b/src/snapshotcloneserver/clone/clone_core.h @@ -58,6 +58,7 @@ class CloneCore { * @param destination 克隆或恢复的目标文件名 * @param lazyFlag 是否lazy * @param taskType 克隆或恢复 + * @param poolset 克隆时目标文件的poolset * @param[out] info 克隆或恢复任务信息 * * @return 错误码 @@ -67,6 +68,7 @@ class CloneCore { const std::string &destination, bool lazyFlag, CloneTaskType taskType, + std::string poolset, CloneInfo *info) = 0; /** @@ -249,6 +251,7 @@ class CloneCoreImpl : public CloneCore { const std::string &destination, bool lazyFlag, CloneTaskType taskType, + std::string poolset, CloneInfo *info) override; void HandleCloneOrRecoverTask(std::shared_ptr task) override; diff --git a/src/snapshotcloneserver/clone/clone_service_manager.cpp b/src/snapshotcloneserver/clone/clone_service_manager.cpp index 2fe123e8cd..9b7439fecf 100644 --- a/src/snapshotcloneserver/clone/clone_service_manager.cpp +++ b/src/snapshotcloneserver/clone/clone_service_manager.cpp @@ -63,6 +63,7 @@ void CloneServiceManager::Stop() { int CloneServiceManager::CloneFile(const UUID &source, const std::string &user, const std::string &destination, + const std::string &poolset, bool lazyFlag, std::shared_ptr closure, TaskIdType *taskId) { @@ -75,7 +76,7 @@ int CloneServiceManager::CloneFile(const UUID &source, CloneInfo cloneInfo; int ret = cloneCore_->CloneOrRecoverPre( source, user, destination, lazyFlag, - CloneTaskType::kClone, &cloneInfo); + CloneTaskType::kClone, poolset, &cloneInfo); if (ret < 0) { if (kErrCodeTaskExist == ret) { // 任务已存在的情况下返回成功,使接口幂等 @@ -89,7 +90,8 @@ int CloneServiceManager::CloneFile(const UUID &source, << ", source = " << source << ", user = " << user << ", destination = " << destination - << ", lazyFlag = " << lazyFlag; + << ", lazyFlag = " << lazyFlag + << ", poolset = " << poolset; closure->SetErrCode(ret); return ret; } @@ -119,7 +121,7 @@ int CloneServiceManager::RecoverFile(const UUID &source, CloneInfo cloneInfo; int ret = cloneCore_->CloneOrRecoverPre( source, user, destination, lazyFlag, - CloneTaskType::kRecover, &cloneInfo); + CloneTaskType::kRecover, "", &cloneInfo); if (ret < 0) { if (kErrCodeTaskExist == ret) { // 任务已存在的情况下返回成功,使接口幂等 diff --git a/src/snapshotcloneserver/clone/clone_service_manager.h b/src/snapshotcloneserver/clone/clone_service_manager.h index 367bbd74be..0cd66e9d09 100644 --- a/src/snapshotcloneserver/clone/clone_service_manager.h +++ b/src/snapshotcloneserver/clone/clone_service_manager.h @@ -249,6 +249,7 @@ class CloneServiceManager { virtual int CloneFile(const UUID &source, const std::string &user, const std::string &destination, + const std::string &poolset, bool lazyFlag, std::shared_ptr closure, TaskIdType *taskId); diff --git a/src/snapshotcloneserver/common/curvefs_client.cpp b/src/snapshotcloneserver/common/curvefs_client.cpp index e2dec3591b..00f27de524 100644 --- a/src/snapshotcloneserver/common/curvefs_client.cpp +++ b/src/snapshotcloneserver/common/curvefs_client.cpp @@ -182,13 +182,15 @@ int CurveFsClientImpl::CreateCloneFile( uint32_t chunkSize, uint64_t stripeUnit, uint64_t stripeCount, + const std::string& poolset, FInfo* fileInfo) { UserInfo userInfo = GetUserInfo(user); RetryMethod method = [this, &source, &filename, - userInfo, size, sn, chunkSize, stripeUnit, stripeCount, fileInfo] () { + userInfo, size, sn, chunkSize, stripeUnit, stripeCount, fileInfo, + poolset] () { return snapClient_->CreateCloneFile(source, filename, userInfo, size, - sn, chunkSize, stripeUnit, stripeCount, fileInfo); + sn, chunkSize, stripeUnit, stripeCount, poolset, fileInfo); }; RetryCondition condition = [] (int ret) { return ret != LIBCURVE_ERROR::OK && @@ -402,4 +404,3 @@ int CurveFsClientImpl::ChangeOwner(const std::string& filename, } // namespace snapshotcloneserver } // namespace curve - diff --git a/src/snapshotcloneserver/common/curvefs_client.h b/src/snapshotcloneserver/common/curvefs_client.h index 712c661595..131f01659c 100644 --- a/src/snapshotcloneserver/common/curvefs_client.h +++ b/src/snapshotcloneserver/common/curvefs_client.h @@ -234,6 +234,7 @@ class CurveFsClient { uint32_t chunkSize, uint64_t stripeUnit, uint64_t stripeCount, + const std::string& poolset, FInfo* fileInfo) = 0; /** @@ -458,6 +459,7 @@ class CurveFsClientImpl : public CurveFsClient { uint32_t chunkSize, uint64_t stripeUnit, uint64_t stripeCount, + const std::string& poolset, FInfo* fileInfo) override; int CreateCloneChunk( @@ -540,4 +542,3 @@ class CurveFsClientImpl : public CurveFsClient { } // namespace snapshotcloneserver } // namespace curve #endif // SRC_SNAPSHOTCLONESERVER_COMMON_CURVEFS_CLIENT_H_ - diff --git a/src/snapshotcloneserver/common/snapshotclone_info.cpp b/src/snapshotcloneserver/common/snapshotclone_info.cpp index dd5bf3653c..575a1c5790 100644 --- a/src/snapshotcloneserver/common/snapshotclone_info.cpp +++ b/src/snapshotcloneserver/common/snapshotclone_info.cpp @@ -43,6 +43,7 @@ bool CloneInfo::SerializeToString(std::string *value) const { data.set_islazy(isLazy_); data.set_nextstep(static_cast(nextStep_)); data.set_status(static_cast(status_)); + data.set_poolset(poolset_); return data.SerializeToString(value); } @@ -61,6 +62,7 @@ bool CloneInfo::ParseFromString(const std::string &value) { isLazy_ = data.islazy(); nextStep_ = static_cast(data.nextstep()); status_ = static_cast(data.status()); + poolset_ = data.poolset(); return ret; } @@ -71,6 +73,7 @@ std::ostream& operator<<(std::ostream& os, const CloneInfo &cloneInfo) { << static_cast (cloneInfo.GetTaskType()); os << ", source : " << cloneInfo.GetSrc(); os << ", destination : " << cloneInfo.GetDest(); + os << ", poolset : " << cloneInfo.GetPoolset(); os << ", originId : " << cloneInfo.GetOriginId(); os << ", destId : " << cloneInfo.GetDestId(); os << ", time : " << cloneInfo.GetTime(); @@ -94,6 +97,7 @@ bool SnapshotInfo::SerializeToString(std::string *value) const { data.set_filelength(fileLength_); data.set_stripeunit(stripeUnit_); data.set_stripecount(stripeCount_); + data.set_poolset(poolset_); data.set_time(time_); data.set_status(static_cast(status_)); return data.SerializeToString(value); @@ -120,6 +124,7 @@ bool SnapshotInfo::ParseFromString(const std::string &value) { } else { stripeCount_ = 0; } + poolset_ = data.poolset(); time_ = data.time(); status_ = static_cast(data.status()); return ret; @@ -136,6 +141,7 @@ std::ostream& operator<<(std::ostream& os, const SnapshotInfo &snapshotInfo) { os << ", fileLength : " << snapshotInfo.GetFileLength(); os << ", stripeUnit :" << snapshotInfo.GetStripeUnit(); os << ", stripeCount :" << snapshotInfo.GetStripeCount(); + os << ", poolset: " << snapshotInfo.GetPoolset(); os << ", time : " << snapshotInfo.GetCreateTime(); os << ", status : " << static_cast(snapshotInfo.GetStatus()); os << " }"; diff --git a/src/snapshotcloneserver/common/snapshotclone_info.h b/src/snapshotcloneserver/common/snapshotclone_info.h index 00433695d2..766ae00e05 100644 --- a/src/snapshotcloneserver/common/snapshotclone_info.h +++ b/src/snapshotcloneserver/common/snapshotclone_info.h @@ -79,6 +79,7 @@ class CloneInfo { CloneTaskType type, const std::string &source, const std::string &destination, + const std::string &poolset, CloneFileType fileType, bool isLazy) : taskId_(taskId), @@ -86,6 +87,7 @@ class CloneInfo { type_(type), source_(source), destination_(destination), + poolset_(poolset), originId_(0), destinationId_(0), time_(0), @@ -99,6 +101,7 @@ class CloneInfo { CloneTaskType type, const std::string &source, const std::string &destination, + const std::string &poolset, uint64_t originId, uint64_t destinationId, uint64_t time, @@ -111,6 +114,7 @@ class CloneInfo { type_(type), source_(source), destination_(destination), + poolset_(poolset), originId_(originId), destinationId_(destinationId), time_(time), @@ -159,6 +163,14 @@ class CloneInfo { destination_ = dest; } + std::string GetPoolset() const { + return poolset_; + } + + void SetPoolset(const std::string &poolset) { + poolset_ = poolset; + } + uint64_t GetOriginId() const { return originId_; } @@ -229,6 +241,8 @@ class CloneInfo { std::string source_; // 目标文件名 std::string destination_; + // 目标文件所在的poolset + std::string poolset_; // 被恢复的原始文件id, 仅用于恢复 uint64_t originId_; // 目标文件id @@ -247,7 +261,7 @@ class CloneInfo { std::ostream& operator<<(std::ostream& os, const CloneInfo &cloneInfo); -//快照处理状态 +// 快照处理状态 enum class Status{ done = 0, pending, @@ -257,7 +271,7 @@ enum class Status{ error }; -//快照信息 +// 快照信息 class SnapshotInfo { public: SnapshotInfo() @@ -297,6 +311,7 @@ class SnapshotInfo { uint64_t filelength, uint64_t stripeUnit, uint64_t stripeCount, + const std::string& poolset, uint64_t time, Status status) :uuid_(uuid), @@ -309,6 +324,7 @@ class SnapshotInfo { fileLength_(filelength), stripeUnit_(stripeUnit), stripeCount_(stripeCount), + poolset_(poolset), time_(time), status_(status) {} @@ -392,6 +408,14 @@ class SnapshotInfo { return stripeCount_; } + void SetPoolset(const std::string& poolset) { + poolset_ = poolset; + } + + const std::string& GetPoolset() const { + return poolset_; + } + void SetCreateTime(uint64_t createTime) { time_ = createTime; } @@ -427,12 +451,14 @@ class SnapshotInfo { uint32_t chunkSize_; // 文件的segment大小 uint64_t segmentSize_; - //文件大小 + // 文件大小 uint64_t fileLength_; // stripe size uint64_t stripeUnit_; // stripe count uint64_t stripeCount_; + // poolset + std::string poolset_; // 快照创建时间 uint64_t time_; // 快照处理的状态 diff --git a/src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.cpp b/src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.cpp index 9533ded57e..fc1edd783b 100644 --- a/src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.cpp +++ b/src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.cpp @@ -304,7 +304,7 @@ int SnapshotCloneMetaStoreEtcd::LoadSnapshotInfos() { LOG(ERROR) << "etcd list err:" << errCode; return -1; } - for (int i = 0; i < out.size(); i++) { + for (size_t i = 0; i < out.size(); i++) { SnapshotInfo data; errCode = codec_->DecodeSnapshotData(out[i], &data); if (!errCode) { @@ -327,7 +327,7 @@ int SnapshotCloneMetaStoreEtcd::LoadCloneInfos() { LOG(ERROR) << "etcd list err:" << errCode; return -1; } - for (int i = 0; i < out.size(); i++) { + for (size_t i = 0; i < out.size(); i++) { CloneInfo data; errCode = codec_->DecodeCloneInfoData(out[i], &data); if (!errCode) { diff --git a/src/snapshotcloneserver/common/task_tracker.h b/src/snapshotcloneserver/common/task_tracker.h index 809bcf5aa4..121fb01b30 100644 --- a/src/snapshotcloneserver/common/task_tracker.h +++ b/src/snapshotcloneserver/common/task_tracker.h @@ -23,9 +23,12 @@ #ifndef SRC_SNAPSHOTCLONESERVER_COMMON_TASK_TRACKER_H_ #define SRC_SNAPSHOTCLONESERVER_COMMON_TASK_TRACKER_H_ -#include "src/common/snapshotclone/snapshotclone_define.h" #include "src/common/task_tracker.h" +#include + +#include "src/common/snapshotclone/snapshotclone_define.h" + using ::curve::common::TaskTracker; using ::curve::common::ContextTaskTracker; diff --git a/src/snapshotcloneserver/snapshot/snapshot_core.cpp b/src/snapshotcloneserver/snapshot/snapshot_core.cpp index 9cac841b11..6abb94b5e9 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_core.cpp +++ b/src/snapshotcloneserver/snapshot/snapshot_core.cpp @@ -73,7 +73,7 @@ int SnapshotCoreImpl::CreateSnapshotPre(const std::string &file, snapshotNum--; } } - if (snapshotNum >= maxSnapshotLimit_) { + if (snapshotNum >= static_cast(maxSnapshotLimit_)) { LOG(ERROR) << "Snapshot count reach the max limit."; return kErrCodeSnapshotCountReachLimit; } @@ -112,6 +112,7 @@ int SnapshotCoreImpl::CreateSnapshotPre(const std::string &file, UUID uuid = UUIDGenerator().GenerateUUID(); SnapshotInfo info(uuid, user, file, snapshotName); + info.SetPoolset(fInfo.poolset); info.SetStatus(Status::pending); ret = metaStore_->AddSnapshot(info); if (ret < 0) { @@ -555,6 +556,7 @@ int SnapshotCoreImpl::CreateSnapshotOnCurvefs( info->SetFileLength(snapInfo.length); info->SetStripeUnit(snapInfo.stripeUnit); info->SetStripeCount(snapInfo.stripeCount); + info->SetPoolset(snapInfo.poolset); info->SetCreateTime(snapInfo.ctime); auto compareAndSet = [&](SnapshotInfo* snapinfo) { @@ -1187,4 +1189,3 @@ int SnapshotCoreImpl::HandleCancelScheduledSnapshotTask( } // namespace snapshotcloneserver } // namespace curve - diff --git a/src/snapshotcloneserver/snapshotclone_service.cpp b/src/snapshotcloneserver/snapshotclone_service.cpp index 0a7e379d2b..72f6b04683 100644 --- a/src/snapshotcloneserver/snapshotclone_service.cpp +++ b/src/snapshotcloneserver/snapshotclone_service.cpp @@ -41,6 +41,8 @@ void SnapshotCloneServiceImpl::default_method(RpcController* cntl, const HttpRequest* req, HttpResponse* resp, Closure* done) { + (void)req; + (void)resp; brpc::ClosureGuard done_guard(done); brpc::Controller* bcntl = static_cast(cntl); @@ -347,6 +349,8 @@ void SnapshotCloneServiceImpl::HandleCloneAction( bcntl->http_request().uri().GetQuery(kDestinationStr); const std::string *lazy = bcntl->http_request().uri().GetQuery(kLazyStr); + const std::string *poolset = + bcntl->http_request().uri().GetQuery(kPoolset); if ((version == nullptr) || (user == nullptr) || (source == nullptr) || @@ -356,7 +360,9 @@ void SnapshotCloneServiceImpl::HandleCloneAction( (user->empty()) || (source->empty()) || (destination->empty()) || - (lazy->empty())) { + (lazy->empty()) || + // poolset is optional, but if it exists, it should not be empty + (poolset != nullptr && poolset->empty())) { HandleBadRequestError(bcntl, requestId); LOG(INFO) << "SnapshotCloneServiceImpl Return : " << "action = Clone" @@ -380,14 +386,16 @@ void SnapshotCloneServiceImpl::HandleCloneAction( << ", Source = " << *source << ", Destination = " << *destination << ", Lazy = " << *lazy + << ", Poolset = " << (poolset != nullptr ? *poolset : "") << ", requestId = " << requestId; TaskIdType taskId; auto closure = std::make_shared(bcntl, done); closure->SetRequestId(requestId); - cloneManager_->CloneFile( - *source, *user, *destination, lazyFlag, closure, &taskId); + cloneManager_->CloneFile(*source, *user, *destination, + (poolset != nullptr ? *poolset : ""), lazyFlag, + closure, &taskId); done_guard.release(); return; } @@ -913,7 +921,7 @@ void SnapshotCloneServiceImpl::HandleGetCloneRefStatusAction( if (refStatus == CloneRefStatus::kNeedCheck) { mainObj[kTotalCountStr] = cloneInfos.size(); Json::Value listObj; - for (int i = 0; i < cloneInfos.size(); i++) { + for (size_t i = 0; i < cloneInfos.size(); i++) { Json::Value cloneTaskObj; cloneTaskObj[kUserStr] = cloneInfos[i].GetUser(); cloneTaskObj[kFileStr] = cloneInfos[i].GetDest(); @@ -955,5 +963,3 @@ void SnapshotCloneServiceImpl::HandleBadRequestError(brpc::Controller* bcntl, } // namespace snapshotcloneserver } // namespace curve - - diff --git a/src/tools/BUILD b/src/tools/BUILD index bdbd703516..4a635c3379 100644 --- a/src/tools/BUILD +++ b/src/tools/BUILD @@ -99,5 +99,6 @@ cc_library( "//src/common:curve_common", "//src/mds/common:mds_common", "//src/mds/nameserver2", + "@com_google_absl//absl/strings:str_format", ], ) diff --git a/src/tools/copyset_check_core.cpp b/src/tools/copyset_check_core.cpp index 0906296133..f32a7a923d 100644 --- a/src/tools/copyset_check_core.cpp +++ b/src/tools/copyset_check_core.cpp @@ -19,8 +19,9 @@ * Created Date: 2019-10-30 * Author: charisu */ -#include #include "src/tools/copyset_check_core.h" +#include +#include DEFINE_uint64(margin, 1000, "The threshold of the gap between peers"); DEFINE_uint64(replicasNum, 3, "the number of replicas that required"); @@ -404,7 +405,7 @@ int CopysetCheckCore::CheckCopysetsOnServer(const ServerIdType& serverId, std::vector threadpool; std::map> queryCsResult; uint32_t index = 0; - for (int i = 0; i < FLAGS_rpcConcurrentNum; i++) { + for (uint64_t i = 0; i < FLAGS_rpcConcurrentNum; i++) { threadpool.emplace_back(Thread( &CopysetCheckCore::ConcurrentCheckCopysetsOnServer, this, std::ref(chunkservers), &index, @@ -751,7 +752,7 @@ CheckResult CopysetCheckCore::CheckPeerOnlineStatus( } if (notOnlineNum > 0) { uint32_t majority = peers.size() / 2 + 1; - if (notOnlineNum < majority) { + if (notOnlineNum < static_cast(majority)) { return CheckResult::kMinorityPeerNotOnline; } else { return CheckResult::kMajorityPeerNotOnline; @@ -819,7 +820,9 @@ CheckResult CopysetCheckCore::CheckHealthOnLeader( return CheckResult::kParseError; } } - gap = std::max(gap, lastLogId - (nextIndex - 1 - flying)); + if (lastLogId > (nextIndex - 1 - flying)) { + gap = std::max(gap, lastLogId - (nextIndex - 1 - flying)); + } } } if (gap > FLAGS_margin) { @@ -926,6 +929,7 @@ int CopysetCheckCore::ListMayBrokenVolumes( void CopysetCheckCore::GetCopysetInfos(const char* key, std::vector* copysets) { + (void)key; for (auto iter = copysets_[kMajorityPeerNotOnline].begin(); iter != copysets_[kMajorityPeerNotOnline].end(); ++iter) { std::string gid = *iter; diff --git a/src/tools/copyset_tool.cpp b/src/tools/copyset_tool.cpp index bb203e4cf4..5cfaca5d94 100644 --- a/src/tools/copyset_tool.cpp +++ b/src/tools/copyset_tool.cpp @@ -84,7 +84,7 @@ int CopysetTool::Init() { } void PrintCopysets(const std::vector& copysets) { - for (int i = 0; i < copysets.size(); ++i) { + for (size_t i = 0; i < copysets.size(); ++i) { if (i != 0) { std::cout << ","; } diff --git a/src/tools/curve_cli.cpp b/src/tools/curve_cli.cpp index 00dbeea052..0dc5dcf46e 100644 --- a/src/tools/curve_cli.cpp +++ b/src/tools/curve_cli.cpp @@ -269,6 +269,8 @@ int CurveCli::DoSnapshot() { int CurveCli::DoSnapshot(uint32_t lgPoolId, uint32_t copysetId, const curve::common::Peer& peer) { + (void)lgPoolId; + (void)copysetId; braft::cli::CliOptions opt; opt.timeout_ms = FLAGS_timeout_ms; opt.max_retry = FLAGS_max_retry; diff --git a/src/tools/curve_meta_tool.cpp b/src/tools/curve_meta_tool.cpp index fbcdb30556..5d9da78ec0 100644 --- a/src/tools/curve_meta_tool.cpp +++ b/src/tools/curve_meta_tool.cpp @@ -121,7 +121,7 @@ int CurveMetaTool::PrintChunkMeta(const std::string& chunkFileName) { memset(buf.get(), 0, FLAGS_pageSize); int rc = localFS_->Read(fd, buf.get(), 0, FLAGS_pageSize); localFS_->Close(fd); - if (rc != FLAGS_pageSize) { + if (rc != static_cast(FLAGS_pageSize)) { if (rc < 0) { std::cout << "Fail to read metaPage from " << chunkFileName << ", " << berror() << std::endl; @@ -157,7 +157,7 @@ int CurveMetaTool::PrintSnapshotMeta(const std::string& snapFileName) { memset(buf.get(), 0, FLAGS_pageSize); int rc = localFS_->Read(fd, buf.get(), 0, FLAGS_pageSize); localFS_->Close(fd); - if (rc != FLAGS_pageSize) { + if (rc != static_cast(FLAGS_pageSize)) { if (rc < 0) { std::cout << "Fail to read metaPage from " << snapFileName << ", " << berror() << std::endl; diff --git a/src/tools/curve_tool_define.h b/src/tools/curve_tool_define.h index 22755dc85b..a392b807bd 100644 --- a/src/tools/curve_tool_define.h +++ b/src/tools/curve_tool_define.h @@ -68,6 +68,7 @@ const char kExtendCmd[] = "extend"; const char kCleanRecycleCmd[] = "clean-recycle"; const char kChunkLocatitonCmd[] = "chunk-location"; const char kUpdateThrottle[] = "update-throttle"; +const char kListPoolsets[] = "list-poolsets"; // CopysetCheck相关命令 const char kCheckCopysetCmd[] = "check-copyset"; diff --git a/src/tools/curve_tool_main.cpp b/src/tools/curve_tool_main.cpp index 37059de00c..8e516dc0e7 100644 --- a/src/tools/curve_tool_main.cpp +++ b/src/tools/curve_tool_main.cpp @@ -24,7 +24,7 @@ #include "src/common/curve_version.h" #include "src/tools/curve_tool_factory.h" -const char* kHelpStr = "Usage: curve_ops_tool [Command] [OPTIONS...]\n" +static const char* kHelpStr = "Usage: curve_ops_tool [Command] [OPTIONS...]\n" "COMMANDS:\n" "space : show curve all disk type space, include total space and used space\n" //NOLINT "status : show the total status of the cluster\n" @@ -62,10 +62,10 @@ const char* kHelpStr = "Usage: curve_ops_tool [Command] [OPTIONS...]\n" "update-throttle: update file throttle params\n" "rapid-leader-schedule: rapid leader schedule in cluster in logicalpool\n" //NOLINT "set-scan-state: set scan state for specify logical pool\n" - "scan-status: show scan status\n\n" + "scan-status: show scan status\n" + "list-poolsets: list all poolsets in cluster\n\n" "You can specify the config path by -confPath to avoid typing too many options\n"; //NOLINT - DEFINE_bool(example, false, "print the example of usage"); DEFINE_string(confPath, "/etc/curve/tools.conf", "config file path of tools"); namespace brpc { @@ -133,7 +133,7 @@ bool LoadRootUserNameAndPassword(curve::common::Configuration* conf) { rc = conf->GetStringValue("rootUserPassword", &curve::tool::rootUserPassword); if (!rc) { - std::cerr << "Mising rootUserPassword in '" << FLAGS_confPath << "'\n"; + std::cerr << "Missing rootUserPassword in '" << FLAGS_confPath << "'\n"; return false; } diff --git a/src/tools/etcd_client.cpp b/src/tools/etcd_client.cpp index a9d4a8bad7..6e1821a8ba 100644 --- a/src/tools/etcd_client.cpp +++ b/src/tools/etcd_client.cpp @@ -22,10 +22,12 @@ #include "src/tools/etcd_client.h" +#include + namespace curve { namespace tool { -int EtcdClient::Init(const std::string& etcdAddr) { +int EtcdClient::Init(const std::string &etcdAddr) { curve::common::SplitString(etcdAddr, ",", &etcdAddrVec_); if (etcdAddrVec_.empty()) { std::cout << "Split etcd address fail!" << std::endl; @@ -34,8 +36,8 @@ int EtcdClient::Init(const std::string& etcdAddr) { return 0; } -int EtcdClient::GetEtcdClusterStatus(std::vector* leaderAddrVec, - std::map* onlineState) { +int EtcdClient::GetEtcdClusterStatus(std::vector *leaderAddrVec, + std::map *onlineState) { if (!leaderAddrVec || !onlineState) { std::cout << "The argument is a null pointer!" << std::endl; return -1; @@ -43,7 +45,7 @@ int EtcdClient::GetEtcdClusterStatus(std::vector* leaderAddrVec, brpc::Channel httpChannel; brpc::ChannelOptions options; options.protocol = brpc::PROTOCOL_HTTP; - for (const auto& addr : etcdAddrVec_) { + for (const auto &addr : etcdAddrVec_) { int res = httpChannel.Init(addr.c_str(), &options); if (res != 0) { (*onlineState)[addr] = false; @@ -59,12 +61,18 @@ int EtcdClient::GetEtcdClusterStatus(std::vector* leaderAddrVec, } (*onlineState)[addr] = true; std::string resp = cntl.response_attachment().to_string(); - Json::Reader reader(Json::Features::strictMode()); + Json::CharReaderBuilder builder; + Json::CharReaderBuilder::strictMode(&builder.settings_); + std::unique_ptr reader(builder.newCharReader()); Json::Value value; - if (!reader.parse(resp, value)) { - std::cout << "Parse the response fail!" << std::endl; + JSONCPP_STRING errormsg; + if (!reader->parse(resp.data(), resp.data() + resp.length(), &value, + &errormsg)) { + std::cout << "Parse the response fail! Error: " << errormsg + << std::endl; return -1; } + if (!value[kEtcdLeader].isNull()) { if (value[kEtcdLeader] == value[kEtcdHeader][kEtcdMemberId]) { leaderAddrVec->emplace_back(addr); @@ -74,13 +82,13 @@ int EtcdClient::GetEtcdClusterStatus(std::vector* leaderAddrVec, return 0; } -int EtcdClient::GetAndCheckEtcdVersion(std::string* version, - std::vector* failedList) { +int EtcdClient::GetAndCheckEtcdVersion(std::string *version, + std::vector *failedList) { brpc::Channel httpChannel; brpc::ChannelOptions options; options.protocol = brpc::PROTOCOL_HTTP; VersionMapType versionMap; - for (const auto& addr : etcdAddrVec_) { + for (const auto &addr : etcdAddrVec_) { int res = httpChannel.Init(addr.c_str(), &options); if (res != 0) { std::cout << "Init channel to " << addr << " failed" << std::endl; @@ -98,10 +106,16 @@ int EtcdClient::GetAndCheckEtcdVersion(std::string* version, continue; } std::string resp = cntl.response_attachment().to_string(); - Json::Reader reader(Json::Features::strictMode()); + + Json::CharReaderBuilder builder; + Json::CharReaderBuilder::strictMode(&builder.settings_); + std::unique_ptr reader(builder.newCharReader()); Json::Value value; - if (!reader.parse(resp, value)) { - std::cout << "Parse the response fail!" << std::endl; + JSONCPP_STRING errormsg; + if (!reader->parse(resp.data(), resp.data() + resp.length(), &value, + &errormsg)) { + std::cout << "Parse the response fail! Error: " << errormsg + << std::endl; return -1; } if (value[kEtcdCluster].isNull()) { diff --git a/src/tools/mds_client.cpp b/src/tools/mds_client.cpp index 502d58ccd5..21f9ded49d 100644 --- a/src/tools/mds_client.cpp +++ b/src/tools/mds_client.cpp @@ -271,18 +271,18 @@ int MDSClient::DeleteFile(const std::string& fileName, bool forcedelete) { return -1; } -int MDSClient::CreateFile(const std::string& fileName, uint64_t length, - bool normalFile, uint64_t stripeUnit, - uint64_t stripeCount) { +int MDSClient::CreateFile(const CreateFileContext& context) { curve::mds::CreateFileRequest request; curve::mds::CreateFileResponse response; - request.set_filename(fileName); - if (normalFile) { + request.set_filename(context.name); + if (context.type == curve::mds::FileType::INODE_PAGEFILE) { request.set_filetype(curve::mds::FileType::INODE_PAGEFILE); - request.set_filelength(length); - request.set_stripeunit(stripeUnit); - request.set_stripecount(stripeCount); + request.set_filelength(context.length); + request.set_poolset(context.poolset); + request.set_stripeunit(context.stripeUnit); + request.set_stripecount(context.stripeCount); } else { + assert(context.type == curve::mds::FileType::INODE_DIRECTORY); request.set_filetype(curve::mds::FileType::INODE_DIRECTORY); } @@ -300,7 +300,7 @@ int MDSClient::CreateFile(const std::string& fileName, uint64_t length, return 0; } std::cout << "CreateFile fail with errCode: " - << response.statuscode() << std::endl; + << StatusCode_Name(response.statuscode()) << std::endl; return -1; } @@ -960,7 +960,7 @@ int MDSClient::GetMetric(const std::string& metricName, std::string* value) { bool MDSClient::ChangeMDServer() { currentMdsIndex_++; - if (currentMdsIndex_ > mdsAddrVec_.size() - 1) { + if (currentMdsIndex_ > static_cast(mdsAddrVec_.size() - 1)) { currentMdsIndex_ = 0; } if (channel_.Init(mdsAddrVec_[currentMdsIndex_].c_str(), @@ -1152,5 +1152,30 @@ void MDSClient::FillUserInfo(T* request) { request->set_signature(sig); } } + +int MDSClient::ListPoolset(std::vector* poolsets) { + assert(poolsets != nullptr); + curve::mds::topology::ListPoolsetRequest request; + curve::mds::topology::ListPoolsetResponse response; + curve::mds::topology::TopologyService_Stub stub(&channel_); + + auto fp = &curve::mds::topology::TopologyService_Stub::ListPoolset; + if (0 != SendRpcToMds(&request, &response, &stub, fp)) { + std::cout << "ListPoolset fail" << std::endl; + return -1; + } + + if (response.statuscode() == curve::mds::topology::kTopoErrCodeSuccess) { + auto* mut = response.mutable_poolsetinfos(); + poolsets->insert(poolsets->end(), std::make_move_iterator(mut->begin()), + std::make_move_iterator(mut->end())); + return 0; + } + + std::cout << "ListPoolset fail with errCode: " << response.statuscode() + << std::endl; + return -1; +} + } // namespace tool } // namespace curve diff --git a/src/tools/mds_client.h b/src/tools/mds_client.h index 49e142046e..05bac69cd5 100644 --- a/src/tools/mds_client.h +++ b/src/tools/mds_client.h @@ -76,6 +76,8 @@ using curve::common::Authenticator; namespace curve { namespace tool { +using curve::mds::topology::PoolsetInfo; + enum class GetSegmentRes { kOK = 0, // 获取segment成功 kSegmentNotAllocated = -1, // segment不存在 @@ -85,6 +87,15 @@ enum class GetSegmentRes { using AllocMap = std::unordered_map; +struct CreateFileContext { + curve::mds::FileType type; + std::string name; + uint64_t length; + uint64_t stripeUnit; + uint64_t stripeCount; + std::string poolset; +}; + class MDSClient { public: MDSClient() : currentMdsIndex_(0), userName_(""), @@ -175,11 +186,7 @@ class MDSClient { * @param stripeCount the amount of stripes * @return 成功返回0,失败返回-1 */ - virtual int CreateFile(const std::string& fileName, - uint64_t length = 0, - bool normalFile = true, - uint64_t stripeUnit = 0, - uint64_t stripeCount = 0); + virtual int CreateFile(const CreateFileContext& context); /** * @brief List all volumes on copysets @@ -471,6 +478,8 @@ class MDSClient { virtual int UpdateFileThrottleParams( const std::string& fileName, const curve::mds::ThrottleParams& params); + int ListPoolset(std::vector* poolsets); + private: /** * @brief 切换mds diff --git a/src/tools/metric_client.cpp b/src/tools/metric_client.cpp index df3b2713dd..776347f738 100644 --- a/src/tools/metric_client.cpp +++ b/src/tools/metric_client.cpp @@ -22,23 +22,24 @@ #include "src/tools/metric_client.h" +#include + DECLARE_uint64(rpcTimeout); DECLARE_uint64(rpcRetryTimes); namespace curve { namespace tool { -MetricRet MetricClient::GetMetric(const std::string& addr, - const std::string& metricName, - std::string* value) { +MetricRet MetricClient::GetMetric(const std::string &addr, + const std::string &metricName, + std::string *value) { brpc::Channel httpChannel; brpc::ChannelOptions options; brpc::Controller cntl; options.protocol = brpc::PROTOCOL_HTTP; int res = httpChannel.Init(addr.c_str(), &options); if (res != 0) { - std::cout << "Init httpChannel to " << addr << " fail!" - << std::endl; + std::cout << "Init httpChannel to " << addr << " fail!" << std::endl; return MetricRet::kOtherErr; } @@ -46,17 +47,15 @@ MetricRet MetricClient::GetMetric(const std::string& addr, cntl.set_timeout_ms(FLAGS_rpcTimeout); httpChannel.CallMethod(NULL, &cntl, NULL, NULL, NULL); if (!cntl.Failed()) { - std::string attachment = - cntl.response_attachment().to_string(); + std::string attachment = cntl.response_attachment().to_string(); res = GetValueFromAttachment(attachment, value); return (res == 0) ? MetricRet::kOK : MetricRet::kOtherErr; } - bool needRetry = (cntl.Failed() && - cntl.ErrorCode() != EHOSTDOWN && - cntl.ErrorCode() != ETIMEDOUT && - cntl.ErrorCode() != brpc::ELOGOFF && - cntl.ErrorCode() != brpc::ERPCTIMEDOUT); + bool needRetry = + (cntl.Failed() && cntl.ErrorCode() != EHOSTDOWN && + cntl.ErrorCode() != ETIMEDOUT && cntl.ErrorCode() != brpc::ELOGOFF && + cntl.ErrorCode() != brpc::ERPCTIMEDOUT); uint64_t retryTimes = 0; while (needRetry && retryTimes < FLAGS_rpcRetryTimes) { cntl.Reset(); @@ -67,8 +66,7 @@ MetricRet MetricClient::GetMetric(const std::string& addr, retryTimes++; continue; } - std::string attachment = - cntl.response_attachment().to_string(); + std::string attachment = cntl.response_attachment().to_string(); res = GetValueFromAttachment(attachment, value); return (res == 0) ? MetricRet::kOK : MetricRet::kOtherErr; } @@ -78,14 +76,13 @@ MetricRet MetricClient::GetMetric(const std::string& addr, return notExist ? MetricRet::kNotFound : MetricRet::kOtherErr; } -MetricRet MetricClient::GetMetricUint(const std::string& addr, - const std::string& metricName, - uint64_t* value) { +MetricRet MetricClient::GetMetricUint(const std::string &addr, + const std::string &metricName, + uint64_t *value) { std::string str; MetricRet res = GetMetric(addr, metricName, &str); if (res != MetricRet::kOK) { - std::cout << "get metric " << metricName << " from " - << addr << " fail"; + std::cout << "get metric " << metricName << " from " << addr << " fail"; return res; } if (!curve::common::StringToUll(str, value)) { @@ -95,31 +92,37 @@ MetricRet MetricClient::GetMetricUint(const std::string& addr, return MetricRet::kOK; } -MetricRet MetricClient::GetConfValueFromMetric(const std::string& addr, - const std::string& metricName, - std::string* confValue) { +MetricRet MetricClient::GetConfValueFromMetric(const std::string &addr, + const std::string &metricName, + std::string *confValue) { std::string jsonString; brpc::Controller cntl; MetricRet res = GetMetric(addr, metricName, &jsonString); if (res != MetricRet::kOK) { return res; } - Json::Reader reader(Json::Features::strictMode()); + + Json::CharReaderBuilder builder; + Json::CharReaderBuilder::strictMode(&builder.settings_); + std::unique_ptr reader(builder.newCharReader()); Json::Value value; - if (!reader.parse(jsonString, value)) { - std::cout << "Parse metric as json fail" << std::endl; + JSONCPP_STRING errormsg; + if (!reader->parse(jsonString.data(), + jsonString.data() + jsonString.length(), &value, + &errormsg)) { + std::cout << "Parse metric as json fail: " << errormsg << std::endl; return MetricRet::kOtherErr; } + *confValue = value[kConfValue].asString(); return MetricRet::kOK; } -int MetricClient::GetValueFromAttachment(const std::string& attachment, - std::string* value) { +int MetricClient::GetValueFromAttachment(const std::string &attachment, + std::string *value) { auto pos = attachment.find(":"); if (pos == std::string::npos) { - std::cout << "parse response attachment fail!" - << std::endl; + std::cout << "parse response attachment fail!" << std::endl; return -1; } *value = attachment.substr(pos + 1); diff --git a/src/tools/namespace_tool.cpp b/src/tools/namespace_tool.cpp index ca69e258ec..8d6119b75d 100644 --- a/src/tools/namespace_tool.cpp +++ b/src/tools/namespace_tool.cpp @@ -22,6 +22,10 @@ */ #include "src/tools/namespace_tool.h" +#include + +#include "absl/strings/str_format.h" + DEFINE_string(fileName, "", "file name"); DEFINE_string(dirName, "", "directory name"); DEFINE_string(expireTime, "7d", "Time for file in recyclebin exceed expire time " // NOLINT @@ -29,6 +33,7 @@ DEFINE_string(expireTime, "7d", "Time for file in recyclebin exceed expire time DEFINE_bool(forcedelete, false, "force delete file or not"); DEFINE_uint64(fileLength, 20, "file length (GB)"); DEFINE_uint64(newSize, 30, "the new size of expanded volume(GB)"); +DEFINE_string(poolset, "", "specify the poolset name"); DEFINE_bool(isTest, false, "is unit test or not"); DEFINE_uint64(offset, 0, "offset to query chunk location"); DEFINE_uint64(rpc_timeout, 3000, "millisecond for rpc timeout"); @@ -68,7 +73,8 @@ bool NameSpaceTool::SupportCommand(const std::string& command) { || command == kExtendCmd || command == kCleanRecycleCmd || command == kChunkLocatitonCmd - || command == kUpdateThrottle); + || command == kUpdateThrottle + || command == kListPoolsets); } // 根据命令行参数选择对应的操作 @@ -129,10 +135,18 @@ int NameSpaceTool::RunCommand(const std::string &cmd) { return -1; } bool normalFile = FLAGS_dirName.empty(); - std::string name = normalFile ? FLAGS_fileName : FLAGS_dirName; - return core_->CreateFile(name, FLAGS_fileLength * mds::kGB, - normalFile, FLAGS_stripeUnit, - FLAGS_stripeCount); + CreateFileContext context; + context.type = normalFile ? curve::mds::FileType::INODE_PAGEFILE + : curve::mds::FileType::INODE_DIRECTORY; + context.name = normalFile ? FLAGS_fileName : FLAGS_dirName; + if (normalFile) { + context.length = FLAGS_fileLength * mds::kGB; + context.stripeUnit = FLAGS_stripeUnit; + context.stripeCount = FLAGS_stripeCount; + context.poolset = FLAGS_poolset; + } + + return core_->CreateFile(context); } else if (cmd == kExtendCmd) { return core_->ExtendVolume(fileName, FLAGS_newSize * mds::kGB); } else if (cmd == kChunkLocatitonCmd) { @@ -141,6 +155,8 @@ int NameSpaceTool::RunCommand(const std::string &cmd) { return core_->UpdateFileThrottle(fileName, FLAGS_throttleType, FLAGS_limit, FLAGS_burst, FLAGS_burstLength); + } else if (cmd == kListPoolsets) { + return PrintPoolsets(); } else { std::cout << "Command not support!" << std::endl; return -1; @@ -159,7 +175,7 @@ void NameSpaceTool::PrintHelp(const std::string &cmd) { std::cout << "If -fileName is specified, delete the files in recyclebin that the original directory is fileName" << std::endl; // NOLINT std::cout << "expireTime: s=second, m=minute, h=hour, d=day, M=month, y=year" << std::endl; // NOLINT } else if (cmd == kCreateCmd) { - std::cout << "curve_ops_tool " << cmd << " -fileName=/test -userName=test -password=123 -fileLength=20 [-stripeUnit=32768] [-stripeCount=32] [-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT + std::cout << "curve_ops_tool " << cmd << " -fileName=/test -userName=test -password=123 -fileLength=20 [--poolset=default] [-stripeUnit=32768] [-stripeCount=32] [-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT std::cout << "curve_ops_tool " << cmd << " -dirName=/dir -userName=test -password=123 [-mdsAddr=127.0.0.1:6666] [-confPath=/etc/curve/tools.conf]" << std::endl; // NOLINT std::cout << "The first example can create a volume and the second create a directory." << std::endl; // NOLINT } else if (cmd == kExtendCmd) { @@ -285,6 +301,28 @@ int NameSpaceTool::PrintListDir(const std::string& dirName) { return ret; } +int NameSpaceTool::PrintPoolsets() { + std::vector poolsets; + if (core_->ListPoolset(&poolsets) != 0) { + std::cout << "List poolset fail!" << std::endl; + return -1; + } + + std::sort(poolsets.begin(), poolsets.end(), + [](const PoolsetInfo& a, const PoolsetInfo& b) { + return a.poolsetid() < b.poolsetid(); + }); + + for (const auto& poolset : poolsets) { + const std::string str = absl::StrFormat( + "id: %3d, name: %s, type: %s, desc: `%s`", poolset.poolsetid(), + poolset.poolsetname(), poolset.type(), poolset.desc()); + std::cout << str << std::endl; + } + + return 0; +} + int NameSpaceTool::PrintSegmentInfo(const std::string &fileName) { std::vector segments; if (core_->GetFileSegments(fileName, &segments) != 0) { diff --git a/src/tools/namespace_tool.h b/src/tools/namespace_tool.h index 718bbd95ad..1af7f8ca8f 100644 --- a/src/tools/namespace_tool.h +++ b/src/tools/namespace_tool.h @@ -111,6 +111,8 @@ class NameSpaceTool : public CurveTool { // 目前curve mds不支持/test/格式的文件名,需要把末尾的/去掉 void TrimEndingSlash(std::string* fileName); + int PrintPoolsets(); + private: // 核心逻辑 std::shared_ptr core_; diff --git a/src/tools/namespace_tool_core.cpp b/src/tools/namespace_tool_core.cpp index 2cf644bcf4..b69a6ecacc 100644 --- a/src/tools/namespace_tool_core.cpp +++ b/src/tools/namespace_tool_core.cpp @@ -60,14 +60,10 @@ int NameSpaceToolCore::DeleteFile(const std::string& fileName, return client_->DeleteFile(fileName, forcedelete); } -int NameSpaceToolCore::CreateFile(const std::string& fileName, - uint64_t length, - bool normalFile, - uint64_t stripeUnit, - uint64_t stripeCount) { - return client_->CreateFile(fileName, length, normalFile, - stripeUnit, stripeCount); +int NameSpaceToolCore::CreateFile(const CreateFileContext& ctx) { + return client_->CreateFile(ctx); } + int NameSpaceToolCore::ExtendVolume(const std::string& fileName, uint64_t newSize) { return client_->ExtendVolume(fileName, newSize); @@ -203,7 +199,7 @@ int NameSpaceToolCore::UpdateFileThrottle(const std::string& fileName, params.set_limit(limit); params.set_type(type); if (burst >= 0) { - if (burst < limit) { + if (burst < static_cast(limit)) { std::cout << "burst should greater equal to limit" << std::endl; return -1; } @@ -253,7 +249,7 @@ int NameSpaceToolCore::QueryChunkCopyset(const std::string& fileName, return -1; } uint64_t chunkIndex = (offset - segOffset) / segment.chunksize(); - if (chunkIndex >= segment.chunks_size()) { + if (static_cast(chunkIndex) >= segment.chunks_size()) { std::cout << "ChunkIndex exceed chunks num in segment!" << std::endl; return -1; } @@ -264,5 +260,13 @@ int NameSpaceToolCore::QueryChunkCopyset(const std::string& fileName, *copyset = std::make_pair(logicPoolId, copysetId); return 0; } + +int NameSpaceToolCore::ListPoolset(std::vector* poolsets) { + assert(poolsets != nullptr); + poolsets->clear(); + + return client_->ListPoolset(poolsets); +} + } // namespace tool } // namespace curve diff --git a/src/tools/namespace_tool_core.h b/src/tools/namespace_tool_core.h index b72afc0b76..febf0882f8 100644 --- a/src/tools/namespace_tool_core.h +++ b/src/tools/namespace_tool_core.h @@ -52,6 +52,8 @@ using curve::common::ChunkServerLocation; namespace curve { namespace tool { +using curve::mds::topology::PoolsetInfo; + class NameSpaceToolCore { public: explicit NameSpaceToolCore(std::shared_ptr client); @@ -110,9 +112,7 @@ class NameSpaceToolCore { * @param stripeCount the amount of stripes * @return 成功返回0,失败返回-1 */ - virtual int CreateFile(const std::string& fileName, uint64_t length, - bool normalFile = true, uint64_t stripeUnit = 0, - uint64_t stripeCount = 0); + virtual int CreateFile(const CreateFileContext& ctx); /** * @brief 扩容卷 @@ -178,6 +178,8 @@ class NameSpaceToolCore { const int64_t burst, const int64_t burstLength); + virtual int ListPoolset(std::vector* poolsets); + private: /** * @brief 获取文件的segment信息并输出到segments里面 diff --git a/src/tools/snapshot_clone_client.cpp b/src/tools/snapshot_clone_client.cpp index 02e617cfea..745ea01326 100644 --- a/src/tools/snapshot_clone_client.cpp +++ b/src/tools/snapshot_clone_client.cpp @@ -104,8 +104,7 @@ std::vector SnapshotCloneClient::GetActiveAddrs() { void SnapshotCloneClient::GetOnlineStatus( std::map* onlineStatus) { onlineStatus->clear(); - int result = 0; - for (const auto item : dummyServerMap_) { + for (const auto &item : dummyServerMap_) { std::string listenAddr; int res = GetListenAddrFromDummyPort(item.second, &listenAddr); // 如果获取到的监听地址与记录的mds地址不一致,也认为不在线 diff --git a/src/tools/status_tool.cpp b/src/tools/status_tool.cpp index a2a91ed915..e6bfc116a4 100644 --- a/src/tools/status_tool.cpp +++ b/src/tools/status_tool.cpp @@ -856,7 +856,7 @@ int PrintChunkserverOnlineStatus( int i = 0; for (ChunkServerIdType csId : offlineRecover) { i++; - if (i == offlineRecover.size()) { + if (i == static_cast(offlineRecover.size())) { std::cout << csId; } else { std::cout << csId << ", "; diff --git a/src/tools/status_tool.h b/src/tools/status_tool.h index 89e7882bb0..2b54d70943 100644 --- a/src/tools/status_tool.h +++ b/src/tools/status_tool.h @@ -49,13 +49,13 @@ #include "src/tools/snapshot_clone_client.h" #include "src/common/uri_parser.h" +using curve::mds::topology::ChunkServerInfo; using curve::mds::topology::ChunkServerStatus; using curve::mds::topology::DiskState; +using curve::mds::topology::LogicalPoolInfo; using curve::mds::topology::OnlineState; using curve::mds::topology::PhysicalPoolInfo; -using curve::mds::topology::LogicalPoolInfo; using curve::mds::topology::PoolIdType; -using curve::mds::topology::ChunkServerInfo; namespace curve { namespace tool { @@ -99,14 +99,11 @@ class StatusTool : public CurveTool { std::shared_ptr copysetCheckCore, std::shared_ptr versionTool, std::shared_ptr metricClient, - std::shared_ptr snapshotClient) : - mdsClient_(mdsClient), etcdClient_(etcdClient), - copysetCheckCore_(copysetCheckCore), - versionTool_(versionTool), - metricClient_(metricClient), - snapshotClient_(snapshotClient), - mdsInited_(false), etcdInited_(false), - noSnapshotServer_(false) {} + std::shared_ptr snapshotClient) + : mdsClient_(mdsClient), copysetCheckCore_(copysetCheckCore), + etcdClient_(etcdClient), metricClient_(metricClient), + snapshotClient_(snapshotClient), versionTool_(versionTool), + mdsInited_(false), etcdInited_(false), noSnapshotServer_(false) {} ~StatusTool() = default; /** @@ -128,7 +125,7 @@ class StatusTool : public CurveTool { * @param command:执行的命令 * @return true / false */ - static bool SupportCommand(const std::string& command); + static bool SupportCommand(const std::string &command); /** * @brief 判断集群是否健康 @@ -136,16 +133,16 @@ class StatusTool : public CurveTool { bool IsClusterHeatlhy(); private: - int Init(const std::string& command); + int Init(const std::string &command); int SpaceCmd(); int StatusCmd(); int ChunkServerListCmd(); int ServerListCmd(); int LogicalPoolListCmd(); int ChunkServerStatusCmd(); - int GetPoolsInCluster(std::vector* phyPools, - std::vector* lgPools); - int GetSpaceInfo(SpaceInfo* spaceInfo); + int GetPoolsInCluster(std::vector *phyPools, + std::vector *lgPools); + int GetSpaceInfo(SpaceInfo *spaceInfo); int PrintClusterStatus(); int PrintMdsStatus(); int PrintEtcdStatus(); @@ -153,9 +150,9 @@ class StatusTool : public CurveTool { int PrintClientStatus(); int ClientListCmd(); int ScanStatusCmd(); - void PrintCsLeftSizeStatistics(const std::string& name, - const std::map>& poolLeftSize); + void PrintCsLeftSizeStatistics( + const std::string &name, + const std::map> &poolLeftSize); int PrintSnapshotCloneStatus(); /** @@ -163,7 +160,7 @@ class StatusTool : public CurveTool { * @param command:执行的命令 * @return 需要返回true,否则返回false */ - bool CommandNeedEtcd(const std::string& command); + bool CommandNeedEtcd(const std::string &command); /** @@ -171,22 +168,22 @@ class StatusTool : public CurveTool { * @param command:执行的命令 * @return 需要返回true,否则返回false */ - bool CommandNeedMds(const std::string& command); + bool CommandNeedMds(const std::string &command); /** * @brief 判断命令是否需要snapshot clone server * @param command:执行的命令 * @return 需要返回true,否则返回false */ - bool CommandNeedSnapshotClone(const std::string& command); + bool CommandNeedSnapshotClone(const std::string &command); /** * @brief 打印在线状态 * @param name : 在线状态对应的名字 * @param onlineStatus 在线状态的map */ - void PrintOnlineStatus(const std::string& name, - const std::map& onlineStatus); + void PrintOnlineStatus(const std::string &name, + const std::map &onlineStatus); /** * @brief 获取并打印mds version信息 @@ -197,7 +194,7 @@ class StatusTool : public CurveTool { * @brief 检查服务是否健康 * @param name 服务名 */ - bool CheckServiceHealthy(const ServiceName& name); + bool CheckServiceHealthy(const ServiceName &name); private: // 向mds发送RPC的client diff --git a/src/tools/version_tool.h b/src/tools/version_tool.h index 50cfbfb497..9231d1e4fc 100644 --- a/src/tools/version_tool.h +++ b/src/tools/version_tool.h @@ -49,9 +49,8 @@ class VersionTool { explicit VersionTool(std::shared_ptr mdsClient, std::shared_ptr metricClient, std::shared_ptr snapshotClient) - : mdsClient_(mdsClient), - metricClient_(metricClient), - snapshotClient_(snapshotClient) {} + : mdsClient_(mdsClient), snapshotClient_(snapshotClient), + metricClient_(metricClient) {} virtual ~VersionTool() {} /** @@ -59,43 +58,45 @@ class VersionTool { * @param[out] version 版本 * @return 成功返回0,失败返回-1 */ - virtual int GetAndCheckMdsVersion(std::string* version, - std::vector* failedList); + virtual int GetAndCheckMdsVersion(std::string *version, + std::vector *failedList); /** * @brief 获取chunkserver的版本并检查版本一致性 * @param[out] version 版本 * @return 成功返回0,失败返回-1 */ - virtual int GetAndCheckChunkServerVersion(std::string* version, - std::vector* failedList); + virtual int + GetAndCheckChunkServerVersion(std::string *version, + std::vector *failedList); /** * @brief 获取snapshot clone server的版本 * @param[out] version 版本 * @return 成功返回0,失败返回-1 */ - virtual int GetAndCheckSnapshotCloneVersion(std::string* version, - std::vector* failedList); + virtual int + GetAndCheckSnapshotCloneVersion(std::string *version, + std::vector *failedList); /** * @brief 获取client的版本 * @param[out] versionMap process->版本->地址的映射表 * @return 成功返回0,失败返回-1 */ - virtual int GetClientVersion(ClientVersionMapType* versionMap); + virtual int GetClientVersion(ClientVersionMapType *versionMap); /** * @brief 打印每个version对应的地址 * @param versionMap version到地址列表的map */ - static void PrintVersionMap(const VersionMapType& versionMap); + static void PrintVersionMap(const VersionMapType &versionMap); /** * @brief 打印访问失败的地址 * @param failedList 访问失败的地址列表 */ - static void PrintFailedList(const std::vector& failedList); + static void PrintFailedList(const std::vector &failedList); private: /** @@ -104,17 +105,17 @@ class VersionTool { * @param[out] versionMap version到地址的map * @param[out] failedList 查询version失败的地址列表 */ - void GetVersionMap(const std::vector& addrVec, - VersionMapType* versionMap, - std::vector* failedList); + void GetVersionMap(const std::vector &addrVec, + VersionMapType *versionMap, + std::vector *failedList); /** * @brief 获取addrVec对应地址的version,并把version和地址对应关系存在map中 * @param addrVec 地址列表 * @param[out] processMap 不同的process对应的client的地址列表 */ - void FetchClientProcessMap(const std::vector& addrVec, - ProcessMapType* processMap); + void FetchClientProcessMap(const std::vector &addrVec, + ProcessMapType *processMap); /** * @brief 从启动server的命令行获取对应的程序的名字 @@ -129,7 +130,7 @@ class VersionTool { * @param addrVec 地址列表 * @return 进程的名字 */ - std::string GetProcessNameFromCmd(const std::string& cmd); + std::string GetProcessNameFromCmd(const std::string &cmd); private: // 向mds发送RPC的client diff --git a/test/chunkserver/chunk_service_test.cpp b/test/chunkserver/chunk_service_test.cpp index 6f6a970448..d770fd6663 100644 --- a/test/chunkserver/chunk_service_test.cpp +++ b/test/chunkserver/chunk_service_test.cpp @@ -159,7 +159,6 @@ TEST_F(ChunkserverTest, normal_read_write_test) { }; WaitpidGuard waitpidGuard(pid1, pid2, pid3); - const uint32_t kMaxChunkSize = 16 * 1024 * 1024; PeerId leader; LogicPoolID logicPoolId = 1; CopysetID copysetId = 100001; diff --git a/test/chunkserver/chunk_service_test2.cpp b/test/chunkserver/chunk_service_test2.cpp index 21a594c2d2..3422f0f314 100644 --- a/test/chunkserver/chunk_service_test2.cpp +++ b/test/chunkserver/chunk_service_test2.cpp @@ -164,7 +164,6 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { CopysetID copysetId = 100001; uint64_t chunkId = 1; uint64_t sn = 1; - char ch = 'a'; char expectData[kOpRequestAlignSize + 1]; ::memset(expectData, 'a', kOpRequestAlignSize); expectData[kOpRequestAlignSize] = '\0'; diff --git a/test/chunkserver/chunkserver_test.cpp b/test/chunkserver/chunkserver_test.cpp index 7963df4f23..f945e4e707 100644 --- a/test/chunkserver/chunkserver_test.cpp +++ b/test/chunkserver/chunkserver_test.cpp @@ -34,11 +34,9 @@ #include "test/client/fake/fakeMDS.h" -uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT -uint32_t chunk_size = 16 * 1024 * 1024; // NOLINT -std::string mdsMetaServerAddr = "127.0.0.1:9301"; // NOLINT - -char* confPath = "conf/client.conf"; +uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT +uint32_t chunk_size = 16 * 1024 * 1024; // NOLINT +std::string mdsMetaServerAddr = "127.0.0.1:9301"; // NOLINT butil::AtExitManager atExitManager; @@ -53,17 +51,17 @@ struct ChunkServerPackage { char **argv; }; -void* run_chunkserver_thread(void *arg) { +void *run_chunkserver_thread(void *arg) { ChunkServerPackage *package = reinterpret_cast(arg); package->chunkserver->Run(package->argc, package->argv); return NULL; } -using curve::fs::LocalFsFactory; using curve::fs::FileSystemType; +using curve::fs::LocalFsFactory; -static int ExecCmd(const std::string& cmd) { +static int ExecCmd(const std::string &cmd) { LOG(INFO) << "executing command: " << cmd; return system(cmd.c_str()); @@ -73,7 +71,6 @@ class ChunkserverTest : public ::testing::Test { public: void SetUp() { std::string filename = "test.img"; - size_t filesize = 10uL * 1024 * 1024 * 1024; mds_ = new FakeMDS(filename); @@ -87,8 +84,7 @@ class ChunkserverTest : public ::testing::Test { } private: - FakeMDS* mds_; - int fd_; + FakeMDS *mds_; }; TEST(ChunkserverCommonTest, GroupIdTest) { @@ -111,25 +107,26 @@ TEST(ChunkserverCommonTest, GroupIdTest) { TEST(ChunkServerGflagTest, test_load_gflag) { int argc = 1; - char *argvv[] = {""}; + char *argvv[] = {const_cast("")}; char **argv = argvv; gflags::ParseCommandLineFlags(&argc, &argv, true); google::CommandLineFlagInfo info; ASSERT_TRUE(GetCommandLineFlagInfo("chunkservertest", &info)); ASSERT_TRUE(info.is_default); ASSERT_EQ("testdefault", FLAGS_chunkservertest); - ASSERT_FALSE( - GetCommandLineFlagInfo("chunkservertest", &info) && !info.is_default); + ASSERT_FALSE(GetCommandLineFlagInfo("chunkservertest", &info) && + !info.is_default); - char *argvj[] = {"", "-chunkservertest=test1"}; + char *argvj[] = {const_cast(""), + const_cast("-chunkservertest=test1")}; argv = argvj; argc = 2; gflags::ParseCommandLineFlags(&argc, &argv, true); ASSERT_TRUE(GetCommandLineFlagInfo("chunkservertest", &info)); ASSERT_FALSE(info.is_default); ASSERT_EQ("test1", FLAGS_chunkservertest); - ASSERT_TRUE( - GetCommandLineFlagInfo("chunkservertest", &info) && !info.is_default); + ASSERT_TRUE(GetCommandLineFlagInfo("chunkservertest", &info) && + !info.is_default); } } // namespace chunkserver } // namespace curve diff --git a/test/chunkserver/clone/clone_core_test.cpp b/test/chunkserver/clone/clone_core_test.cpp index b245b11eb9..737b8d9422 100644 --- a/test/chunkserver/clone/clone_core_test.cpp +++ b/test/chunkserver/clone/clone_core_test.cpp @@ -38,14 +38,13 @@ namespace curve { namespace chunkserver { using curve::chunkserver::CHUNK_OP_TYPE; -using curve::fs::LocalFsFactory; using curve::fs::FileSystemType; +using curve::fs::LocalFsFactory; -ACTION_TEMPLATE(SaveBraftTask, - HAS_1_TEMPLATE_PARAMS(int, k), +ACTION_TEMPLATE(SaveBraftTask, HAS_1_TEMPLATE_PARAMS(int, k), AND_1_VALUE_PARAMS(value)) { auto input = static_cast(::testing::get(args)); - auto output = static_cast(value); + auto output = static_cast(value); output->data->swap(*input.data); output->done = input.done; } @@ -68,20 +67,17 @@ class CloneCoreTest : public testing::Test { } void FakeCopysetNode() { - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(true)); - EXPECT_CALL(*node_, GetDataStore()) - .WillRepeatedly(Return(datastore_)); + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(true)); + EXPECT_CALL(*node_, GetDataStore()).WillRepeatedly(Return(datastore_)); EXPECT_CALL(*node_, GetConcurrentApplyModule()) .WillRepeatedly(Return(nullptr)); EXPECT_CALL(*node_, GetAppliedIndex()) .WillRepeatedly(Return(LAST_INDEX)); } - std::shared_ptr GenerateReadRequest(CHUNK_OP_TYPE optype, - off_t offset, - size_t length) { - ChunkRequest* readRequest = new ChunkRequest(); + std::shared_ptr + GenerateReadRequest(CHUNK_OP_TYPE optype, off_t offset, size_t length) { + ChunkRequest *readRequest = new ChunkRequest(); readRequest->set_logicpoolid(LOGICPOOL_ID); readRequest->set_copysetid(COPYSET_ID); readRequest->set_chunkid(CHUNK_ID); @@ -95,31 +91,25 @@ class CloneCoreTest : public testing::Test { closure->SetRequest(readRequest); closure->SetResponse(response); std::shared_ptr req = - std::make_shared(node_, - nullptr, - cntl, - readRequest, - response, - closure); + std::make_shared(node_, nullptr, cntl, + readRequest, response, closure); return req; } void SetCloneParam(std::shared_ptr readRequest) { - ChunkRequest* request = - const_cast(readRequest->GetChunkRequest()); + ChunkRequest *request = + const_cast(readRequest->GetChunkRequest()); request->set_clonefilesource("/test"); request->set_clonefileoffset(0); } - void CheckTask(const braft::Task& task, - off_t offset, - size_t length, - char* buf) { + void CheckTask(const braft::Task &task, off_t offset, size_t length, + char *buf) { butil::IOBuf data; ChunkRequest request; auto req = ChunkOpRequest::Decode(*task.data, &request, &data, 0, PeerId("127.0.0.1:8200:0")); - auto preq = dynamic_cast(req.get()); + auto preq = dynamic_cast(req.get()); ASSERT_TRUE(preq != nullptr); ASSERT_EQ(LOGICPOOL_ID, request.logicpoolid()); @@ -143,34 +133,28 @@ class CloneCoreTest : public testing::Test { TEST_F(CloneCoreTest, ReadChunkTest1) { off_t offset = 0; size_t length = 5 * PAGE_SIZE; - std::shared_ptr core - = std::make_shared(SLICE_SIZE, true, copyer_); - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); + std::shared_ptr core = + std::make_shared(SLICE_SIZE, true, copyer_); + std::shared_ptr readRequest = + GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); // 不会从源端拷贝数据 - EXPECT_CALL(*copyer_, DownloadAsync(_)) - .Times(0); + EXPECT_CALL(*copyer_, DownloadAsync(_)).Times(0); // 获取chunk信息 CSChunkInfo info; info.isClone = false; info.pageSize = PAGE_SIZE; EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); // 读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(1); + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(1); // 更新 applied index - EXPECT_CALL(*node_, UpdateAppliedIndex(_)) - .Times(1); + EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); // 不会产生PasteChunkRequest - EXPECT_CALL(*node_, Propose(_)) - .Times(0); + EXPECT_CALL(*node_, Propose(_)).Times(0); - ASSERT_EQ(0, core->HandleReadRequest(readRequest, - readRequest->Closure())); - FakeChunkClosure* closure = - reinterpret_cast(readRequest->Closure()); + ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); + FakeChunkClosure *closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, @@ -196,93 +180,94 @@ TEST_F(CloneCoreTest, ReadChunkTest2) { info.pageSize = PAGE_SIZE; info.chunkSize = CHUNK_SIZE; info.bitmap = std::make_shared(CHUNK_SIZE / PAGE_SIZE); - std::shared_ptr core - = std::make_shared(SLICE_SIZE, true, copyer_); + std::shared_ptr core = + std::make_shared(SLICE_SIZE, true, copyer_); // case1 { info.bitmap->Set(); // 每次调HandleReadRequest后会被closure释放 - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); - EXPECT_CALL(*copyer_, DownloadAsync(_)) - .Times(0); + std::shared_ptr readRequest = + GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); + EXPECT_CALL(*copyer_, DownloadAsync(_)).Times(0); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); // 读chunk文件 - char chunkData[length]; // NOLINT + char *chunkData = new char[length]; memset(chunkData, 'a', length); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .WillOnce(DoAll(SetArrayArgument<2>(chunkData, - chunkData + length), + .WillOnce(DoAll(SetArrayArgument<2>(chunkData, chunkData + length), Return(CSErrorCode::Success))); // 更新 applied index - EXPECT_CALL(*node_, UpdateAppliedIndex(_)) - .Times(1); + EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); // 不会产生PasteChunkRequest - EXPECT_CALL(*node_, Propose(_)) - .Times(0); - ASSERT_EQ(0, core->HandleReadRequest(readRequest, - readRequest->Closure())); - FakeChunkClosure* closure = - reinterpret_cast(readRequest->Closure()); + EXPECT_CALL(*node_, Propose(_)).Times(0); + ASSERT_EQ(0, + core->HandleReadRequest(readRequest, readRequest->Closure())); + FakeChunkClosure *closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, closure->resContent_.status); - ASSERT_EQ(memcmp(chunkData, - closure->resContent_.attachment.to_string().c_str(), //NOLINT - length), 0); + ASSERT_EQ( + memcmp( + chunkData, + closure->resContent_.attachment.to_string().c_str(), // NOLINT + length), + 0); + delete[] chunkData; } // case2 { info.bitmap->Clear(); // 每次调HandleReadRequest后会被closure释放 - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); - char cloneData[length]; // NOLINT + std::shared_ptr readRequest = + GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); + char *cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure* closure){ + .WillOnce(Invoke([&](DownloadClosure *closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext* context = closure->GetDownloadContext(); + AsyncDownloadContext *context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); // 读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(0); + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); // 更新 applied index - EXPECT_CALL(*node_, UpdateAppliedIndex(_)) - .Times(1); + EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); // 产生PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; - EXPECT_CALL(*node_, Propose(_)) - .WillOnce(SaveBraftTask<0>(&task)); + EXPECT_CALL(*node_, Propose(_)).WillOnce(SaveBraftTask<0>(&task)); - ASSERT_EQ(0, core->HandleReadRequest(readRequest, - readRequest->Closure())); - FakeChunkClosure* closure = - reinterpret_cast(readRequest->Closure()); + ASSERT_EQ(0, + core->HandleReadRequest(readRequest, readRequest->Closure())); + FakeChunkClosure *closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - closure->resContent_.status); + closure->resContent_.status); CheckTask(task, offset, length, cloneData); // 正常propose后,会将closure交给并发层处理, // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 ASSERT_NE(nullptr, task.done); task.done->Run(); - ASSERT_EQ(memcmp(cloneData, - closure->resContent_.attachment.to_string().c_str(), //NOLINT - length), 0); + ASSERT_EQ( + memcmp( + cloneData, + closure->resContent_.attachment.to_string().c_str(), // NOLINT + length), + 0); + delete[] cloneData; } // case3 @@ -290,41 +275,39 @@ TEST_F(CloneCoreTest, ReadChunkTest2) { info.bitmap->Clear(); info.bitmap->Set(0, 2); // 每次调HandleReadRequest后会被closure释放 - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); - char cloneData[length]; // NOLINT + std::shared_ptr readRequest = + GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); + char *cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure* closure){ + .WillOnce(Invoke([&](DownloadClosure *closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext* context = closure->GetDownloadContext(); + AsyncDownloadContext *context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); // 读chunk文件 char chunkData[3 * PAGE_SIZE]; memset(chunkData, 'a', 3 * PAGE_SIZE); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, 0, 3 * PAGE_SIZE)) - .WillOnce(DoAll(SetArrayArgument<2>(chunkData, - chunkData + 3 * PAGE_SIZE), - Return(CSErrorCode::Success))); + .WillOnce( + DoAll(SetArrayArgument<2>(chunkData, chunkData + 3 * PAGE_SIZE), + Return(CSErrorCode::Success))); // 更新 applied index - EXPECT_CALL(*node_, UpdateAppliedIndex(_)) - .Times(1); + EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); // 产生PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; - EXPECT_CALL(*node_, Propose(_)) - .WillOnce(SaveBraftTask<0>(&task)); + EXPECT_CALL(*node_, Propose(_)).WillOnce(SaveBraftTask<0>(&task)); - ASSERT_EQ(0, core->HandleReadRequest(readRequest, - readRequest->Closure())); - FakeChunkClosure* closure = - reinterpret_cast(readRequest->Closure()); + ASSERT_EQ(0, + core->HandleReadRequest(readRequest, readRequest->Closure())); + FakeChunkClosure *closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, @@ -335,12 +318,18 @@ TEST_F(CloneCoreTest, ReadChunkTest2) { // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 ASSERT_NE(nullptr, task.done); task.done->Run(); - ASSERT_EQ(memcmp(chunkData, - closure->resContent_.attachment.to_string().c_str(), //NOLINT - 3 * PAGE_SIZE), 0); + ASSERT_EQ( + memcmp( + chunkData, + closure->resContent_.attachment.to_string().c_str(), // NOLINT + 3 * PAGE_SIZE), + 0); ASSERT_EQ(memcmp(cloneData, - closure->resContent_.attachment.to_string().c_str() + 3 * PAGE_SIZE, //NOLINT - 2 * PAGE_SIZE), 0); + closure->resContent_.attachment.to_string().c_str() + + 3 * PAGE_SIZE, // NOLINT + 2 * PAGE_SIZE), + 0); + delete[] cloneData; } // case4 { @@ -349,29 +338,25 @@ TEST_F(CloneCoreTest, ReadChunkTest2) { info.bitmap->Clear(); info.bitmap->Set(0, 2); // 每次调HandleReadRequest后会被closure释放 - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); + std::shared_ptr readRequest = + GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - EXPECT_CALL(*copyer_, DownloadAsync(_)) - .Times(0); + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + EXPECT_CALL(*copyer_, DownloadAsync(_)).Times(0); // 读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) - .Times(0); + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); // 更新 applied index - EXPECT_CALL(*node_, UpdateAppliedIndex(_)) - .Times(0); + EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(0); // 不产生PasteChunkRequest braft::Task task; - EXPECT_CALL(*node_, Propose(_)) - .Times(0); + EXPECT_CALL(*node_, Propose(_)).Times(0); - ASSERT_EQ(-1, core->HandleReadRequest(readRequest, - readRequest->Closure())); - FakeChunkClosure* closure = - reinterpret_cast(readRequest->Closure()); + ASSERT_EQ(-1, + core->HandleReadRequest(readRequest, readRequest->Closure())); + FakeChunkClosure *closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, @@ -391,57 +376,58 @@ TEST_F(CloneCoreTest, ReadChunkTest3) { info.pageSize = PAGE_SIZE; info.chunkSize = CHUNK_SIZE; info.bitmap = std::make_shared(CHUNK_SIZE / PAGE_SIZE); - std::shared_ptr core - = std::make_shared(SLICE_SIZE, true, copyer_); + std::shared_ptr core = + std::make_shared(SLICE_SIZE, true, copyer_); // case1 { info.bitmap->Clear(); // 每次调HandleReadRequest后会被closure释放 - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); + std::shared_ptr readRequest = + GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); SetCloneParam(readRequest); - char cloneData[length]; // NOLINT + char *cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure* closure){ + .WillOnce(Invoke([&](DownloadClosure *closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext* context = closure->GetDownloadContext(); + AsyncDownloadContext *context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .Times(2) .WillRepeatedly(Return(CSErrorCode::ChunkNotExistError)); // 读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(0); + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); // 更新 applied index - EXPECT_CALL(*node_, UpdateAppliedIndex(_)) - .Times(1); + EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); // 产生PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; - EXPECT_CALL(*node_, Propose(_)) - .WillOnce(SaveBraftTask<0>(&task)); + EXPECT_CALL(*node_, Propose(_)).WillOnce(SaveBraftTask<0>(&task)); - ASSERT_EQ(0, core->HandleReadRequest(readRequest, - readRequest->Closure())); - FakeChunkClosure* closure = - reinterpret_cast(readRequest->Closure()); + ASSERT_EQ(0, + core->HandleReadRequest(readRequest, readRequest->Closure())); + FakeChunkClosure *closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - closure->resContent_.status); + closure->resContent_.status); CheckTask(task, offset, length, cloneData); // 正常propose后,会将closure交给并发层处理, // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 ASSERT_NE(nullptr, task.done); task.done->Run(); - ASSERT_EQ(memcmp(cloneData, - closure->resContent_.attachment.to_string().c_str(), //NOLINT - length), 0); + ASSERT_EQ( + memcmp( + cloneData, + closure->resContent_.attachment.to_string().c_str(), // NOLINT + length), + 0); + delete[] cloneData; } } @@ -464,26 +450,23 @@ TEST_F(CloneCoreTest, ReadChunkErrorTest) { info.bitmap = std::make_shared(CHUNK_SIZE / PAGE_SIZE); info.bitmap->Clear(); info.bitmap->Set(0, 2); - std::shared_ptr core - = std::make_shared(SLICE_SIZE, true, copyer_); + std::shared_ptr core = + std::make_shared(SLICE_SIZE, true, copyer_); // case1 { - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); + std::shared_ptr readRequest = + GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(Return(CSErrorCode::InternalError)); - EXPECT_CALL(*copyer_, DownloadAsync(_)) - .Times(0); - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) - .Times(0); - EXPECT_CALL(*node_, Propose(_)) - .Times(0); - - ASSERT_EQ(-1, core->HandleReadRequest(readRequest, - readRequest->Closure())); - FakeChunkClosure* closure = - reinterpret_cast(readRequest->Closure()); + EXPECT_CALL(*copyer_, DownloadAsync(_)).Times(0); + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); + EXPECT_CALL(*node_, Propose(_)).Times(0); + + ASSERT_EQ(-1, + core->HandleReadRequest(readRequest, readRequest->Closure())); + FakeChunkClosure *closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, @@ -491,61 +474,60 @@ TEST_F(CloneCoreTest, ReadChunkErrorTest) { } // case2 { - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); + std::shared_ptr readRequest = + GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - char cloneData[length]; // NOLINT + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + char *cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure* closure){ + .WillOnce(Invoke([&](DownloadClosure *closure) { brpc::ClosureGuard guard(closure); closure->SetFailed(); })); - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) - .Times(0); + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); - ASSERT_EQ(0, core->HandleReadRequest(readRequest, - readRequest->Closure())); - FakeChunkClosure* closure = - reinterpret_cast(readRequest->Closure()); + ASSERT_EQ(0, + core->HandleReadRequest(readRequest, readRequest->Closure())); + FakeChunkClosure *closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, closure->resContent_.status); + delete[] cloneData; } // case3 { - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); + std::shared_ptr readRequest = + GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - char cloneData[length]; // NOLINT + .WillRepeatedly( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + char *cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure* closure){ + .WillOnce(Invoke([&](DownloadClosure *closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext* context = closure->GetDownloadContext(); + AsyncDownloadContext *context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) .WillOnce(Return(CSErrorCode::InternalError)); - // 产生PasteChunkRequest + // 产生PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; - EXPECT_CALL(*node_, Propose(_)) - .WillOnce(SaveBraftTask<0>(&task)); + EXPECT_CALL(*node_, Propose(_)).WillOnce(SaveBraftTask<0>(&task)); - ASSERT_EQ(0, core->HandleReadRequest(readRequest, - readRequest->Closure())); - FakeChunkClosure* closure = - reinterpret_cast(readRequest->Closure()); + ASSERT_EQ(0, + core->HandleReadRequest(readRequest, readRequest->Closure())); + FakeChunkClosure *closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, @@ -556,6 +538,7 @@ TEST_F(CloneCoreTest, ReadChunkErrorTest) { // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 ASSERT_NE(nullptr, task.done); task.done->Run(); + delete[] cloneData; } } @@ -566,31 +549,26 @@ TEST_F(CloneCoreTest, ReadChunkErrorTest) { TEST_F(CloneCoreTest, RecoverChunkTest1) { off_t offset = 0; size_t length = 5 * PAGE_SIZE; - std::shared_ptr core - = std::make_shared(SLICE_SIZE, true, copyer_); - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); + std::shared_ptr core = + std::make_shared(SLICE_SIZE, true, copyer_); + std::shared_ptr readRequest = + GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); // 不会从源端拷贝数据 - EXPECT_CALL(*copyer_, DownloadAsync(_)) - .Times(0); + EXPECT_CALL(*copyer_, DownloadAsync(_)).Times(0); // 获取chunk信息 CSChunkInfo info; info.isClone = false; info.pageSize = PAGE_SIZE; EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); // 读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) - .Times(0); + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); // 不会产生PasteChunkRequest - EXPECT_CALL(*node_, Propose(_)) - .Times(0); + EXPECT_CALL(*node_, Propose(_)).Times(0); - ASSERT_EQ(0, core->HandleReadRequest(readRequest, - readRequest->Closure())); - FakeChunkClosure* closure = - reinterpret_cast(readRequest->Closure()); + ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); + FakeChunkClosure *closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, @@ -612,29 +590,26 @@ TEST_F(CloneCoreTest, RecoverChunkTest2) { info.pageSize = PAGE_SIZE; info.chunkSize = CHUNK_SIZE; info.bitmap = std::make_shared(CHUNK_SIZE / PAGE_SIZE); - std::shared_ptr core - = std::make_shared(SLICE_SIZE, true, copyer_); + std::shared_ptr core = + std::make_shared(SLICE_SIZE, true, copyer_); // case1 { info.bitmap->Set(); // 每次调HandleReadRequest后会被closure释放 - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); //NOLINT - EXPECT_CALL(*copyer_, DownloadAsync(_)) - .Times(0); + std::shared_ptr readRequest = GenerateReadRequest( + CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); // NOLINT + EXPECT_CALL(*copyer_, DownloadAsync(_)).Times(0); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); // 不会读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) - .Times(0); + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); // 不会产生PasteChunkRequest - EXPECT_CALL(*node_, Propose(_)) - .Times(0); - ASSERT_EQ(0, core->HandleReadRequest(readRequest, - readRequest->Closure())); - FakeChunkClosure* closure = - reinterpret_cast(readRequest->Closure()); + EXPECT_CALL(*node_, Propose(_)).Times(0); + ASSERT_EQ(0, + core->HandleReadRequest(readRequest, readRequest->Closure())); + FakeChunkClosure *closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, @@ -645,33 +620,31 @@ TEST_F(CloneCoreTest, RecoverChunkTest2) { { info.bitmap->Clear(); // 每次调HandleReadRequest后会被closure释放 - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); //NOLINT - char cloneData[length]; // NOLINT + std::shared_ptr readRequest = GenerateReadRequest( + CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); // NOLINT + char *cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure* closure){ + .WillOnce(Invoke([&](DownloadClosure *closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext* context = closure->GetDownloadContext(); + AsyncDownloadContext *context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); // 不会读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(0); + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); // 产生PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; - EXPECT_CALL(*node_, Propose(_)) - .WillOnce(SaveBraftTask<0>(&task)); + EXPECT_CALL(*node_, Propose(_)).WillOnce(SaveBraftTask<0>(&task)); - ASSERT_EQ(0, core->HandleReadRequest(readRequest, - readRequest->Closure())); - FakeChunkClosure* closure = - reinterpret_cast(readRequest->Closure()); + ASSERT_EQ(0, + core->HandleReadRequest(readRequest, readRequest->Closure())); + FakeChunkClosure *closure = + reinterpret_cast(readRequest->Closure()); // closure被转交给PasteRequest处理,这里closure还未执行 ASSERT_FALSE(closure->isDone_); @@ -684,6 +657,7 @@ TEST_F(CloneCoreTest, RecoverChunkTest2) { ASSERT_TRUE(closure->isDone_); ASSERT_EQ(0, closure->resContent_.appliedindex); ASSERT_EQ(0, closure->resContent_.status); + delete[] cloneData; } } @@ -697,79 +671,75 @@ TEST_F(CloneCoreTest, DisablePasteTest) { info.pageSize = PAGE_SIZE; info.chunkSize = CHUNK_SIZE; info.bitmap = std::make_shared(CHUNK_SIZE / PAGE_SIZE); - std::shared_ptr core - = std::make_shared(SLICE_SIZE, false, copyer_); + std::shared_ptr core = + std::make_shared(SLICE_SIZE, false, copyer_); // case1 { info.bitmap->Clear(); // 每次调HandleReadRequest后会被closure释放 - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); - char cloneData[length]; // NOLINT + std::shared_ptr readRequest = + GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); + char *cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure* closure){ + .WillOnce(Invoke([&](DownloadClosure *closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext* context = closure->GetDownloadContext(); + AsyncDownloadContext *context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); // 读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(0); + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); // 更新 applied index - EXPECT_CALL(*node_, UpdateAppliedIndex(_)) - .Times(1); + EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); // 不会产生paste chunk请求 - EXPECT_CALL(*node_, Propose(_)) - .Times(0); + EXPECT_CALL(*node_, Propose(_)).Times(0); - ASSERT_EQ(0, core->HandleReadRequest(readRequest, - readRequest->Closure())); - FakeChunkClosure* closure = - reinterpret_cast(readRequest->Closure()); + ASSERT_EQ(0, + core->HandleReadRequest(readRequest, readRequest->Closure())); + FakeChunkClosure *closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - closure->resContent_.status); + closure->resContent_.status); + delete[] cloneData; } // case2 { info.bitmap->Clear(); // 每次调HandleReadRequest后会被closure释放 - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); //NOLINT - char cloneData[length]; // NOLINT + std::shared_ptr readRequest = GenerateReadRequest( + CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); // NOLINT + char *cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure* closure){ + .WillOnce(Invoke([&](DownloadClosure *closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext* context = closure->GetDownloadContext(); + AsyncDownloadContext *context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); // 不会读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(0); + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); // 产生PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; - EXPECT_CALL(*node_, Propose(_)) - .WillOnce(SaveBraftTask<0>(&task)); + EXPECT_CALL(*node_, Propose(_)).WillOnce(SaveBraftTask<0>(&task)); - ASSERT_EQ(0, core->HandleReadRequest(readRequest, - readRequest->Closure())); - FakeChunkClosure* closure = - reinterpret_cast(readRequest->Closure()); + ASSERT_EQ(0, + core->HandleReadRequest(readRequest, readRequest->Closure())); + FakeChunkClosure *closure = + reinterpret_cast(readRequest->Closure()); // closure被转交给PasteRequest处理,这里closure还未执行 ASSERT_FALSE(closure->isDone_); @@ -782,6 +752,7 @@ TEST_F(CloneCoreTest, DisablePasteTest) { ASSERT_TRUE(closure->isDone_); ASSERT_EQ(0, closure->resContent_.appliedindex); ASSERT_EQ(0, closure->resContent_.status); + delete[] cloneData; } } diff --git a/test/chunkserver/clone/op_request_test.cpp b/test/chunkserver/clone/op_request_test.cpp index 20e821132f..8126bdc959 100644 --- a/test/chunkserver/clone/op_request_test.cpp +++ b/test/chunkserver/clone/op_request_test.cpp @@ -668,7 +668,7 @@ TEST_F(OpRequestTest, ReadChunkTest) { .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - char chunkData[length]; // NOLINT + char *chunkData = new char[length]; memset(chunkData, 'a', length); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) .WillOnce(DoAll(SetArrayArgument<2>(chunkData, chunkData + length), @@ -687,6 +687,7 @@ TEST_F(OpRequestTest, ReadChunkTest) { } ASSERT_TRUE(closure->isDone_); + delete[] chunkData; } /** @@ -704,7 +705,7 @@ TEST_F(OpRequestTest, ReadChunkTest) { .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); // 读chunk文件 - char chunkData[length]; // NOLINT + char *chunkData = new char[length]; memset(chunkData, 'a', length); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) .WillOnce(DoAll(SetArrayArgument<2>(chunkData, chunkData + length), @@ -723,6 +724,7 @@ TEST_F(OpRequestTest, ReadChunkTest) { cntl->response_attachment().to_string().c_str(), // NOLINT length), 0); + delete[] chunkData; } /** @@ -741,7 +743,7 @@ TEST_F(OpRequestTest, ReadChunkTest) { .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); // 读chunk文件 - char chunkData[length]; // NOLINT + char *chunkData = new char[length]; memset(chunkData, 'a', length); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) .WillOnce(DoAll(SetArrayArgument<2>(chunkData, chunkData + length), @@ -762,6 +764,7 @@ TEST_F(OpRequestTest, ReadChunkTest) { .c_str(), // NOLINT length), 0); + delete[] chunkData; } /** @@ -879,7 +882,7 @@ TEST_F(OpRequestTest, ReadChunkTest) { .WillOnce(DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); // 读chunk文件 - char chunkData[length]; // NOLINT + char *chunkData = new char[length]; memset(chunkData, 'a', length); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) .WillOnce(DoAll(SetArrayArgument<2>(chunkData, @@ -899,6 +902,7 @@ TEST_F(OpRequestTest, ReadChunkTest) { ASSERT_EQ(memcmp(chunkData, closure->cntl_->response_attachment().to_string().c_str(), //NOLINT length), 0); + delete[] chunkData; } /** * 测试OnApply diff --git a/test/chunkserver/conf_epoch_file_test.cpp b/test/chunkserver/conf_epoch_file_test.cpp index a6a1fc53b8..a1ee08ace3 100644 --- a/test/chunkserver/conf_epoch_file_test.cpp +++ b/test/chunkserver/conf_epoch_file_test.cpp @@ -33,20 +33,20 @@ namespace curve { namespace chunkserver { using ::testing::_; -using ::testing::Invoke; -using ::testing::Return; using ::testing::AnyNumber; -using ::testing::Matcher; +using ::testing::AtLeast; using ::testing::DoAll; -using ::testing::SetArgPointee; -using ::testing::SetArrayArgument; -using ::testing::SetArgReferee; using ::testing::InSequence; -using ::testing::AtLeast; +using ::testing::Invoke; +using ::testing::Matcher; +using ::testing::Return; using ::testing::SaveArgPointee; +using ::testing::SetArgPointee; +using ::testing::SetArgReferee; +using ::testing::SetArrayArgument; -using curve::fs::MockLocalFileSystem; using curve::fs::FileSystemType; +using curve::fs::MockLocalFileSystem; TEST(ConfEpochFileTest, load_save) { LogicPoolID logicPoolID = 123; @@ -65,9 +65,7 @@ TEST(ConfEpochFileTest, load_save) { LogicPoolID loadLogicPoolID; CopysetID loadCopysetID; uint64_t loadEpoch; - ASSERT_EQ(0, confEpochFile.Load(path, - &loadLogicPoolID, - &loadCopysetID, + ASSERT_EQ(0, confEpochFile.Load(path, &loadLogicPoolID, &loadCopysetID, &loadEpoch)); ASSERT_EQ(logicPoolID, loadLogicPoolID); ASSERT_EQ(copysetID, loadCopysetID); @@ -78,22 +76,20 @@ TEST(ConfEpochFileTest, load_save) { // load: open failed { - std::shared_ptr fs - = std::make_shared(); + std::shared_ptr fs = + std::make_shared(); ConfEpochFile confEpochFile(fs); LogicPoolID loadLogicPoolID; CopysetID loadCopysetID; uint64_t loadEpoch; EXPECT_CALL(*fs, Open(_, _)).Times(1).WillOnce(Return(-1)); - ASSERT_EQ(-1, confEpochFile.Load(path, - &loadLogicPoolID, - &loadCopysetID, - &loadEpoch)); + ASSERT_EQ(-1, confEpochFile.Load(path, &loadLogicPoolID, &loadCopysetID, + &loadEpoch)); } // load: open success, read failed { - std::shared_ptr fs - = std::make_shared(); + std::shared_ptr fs = + std::make_shared(); ConfEpochFile confEpochFile(fs); LogicPoolID loadLogicPoolID; CopysetID loadCopysetID; @@ -101,118 +97,111 @@ TEST(ConfEpochFileTest, load_save) { EXPECT_CALL(*fs, Open(_, _)).Times(1).WillOnce(Return(10)); EXPECT_CALL(*fs, Read(_, _, _, _)).Times(1).WillOnce(Return(-1)); EXPECT_CALL(*fs, Close(_)).Times(1).WillOnce(Return(0)); - ASSERT_EQ(-1, confEpochFile.Load(path, - &loadLogicPoolID, - &loadCopysetID, + ASSERT_EQ(-1, confEpochFile.Load(path, &loadLogicPoolID, &loadCopysetID, &loadEpoch)); } // load: open success, read success, decode success, crc32c right { - char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":599727352}"; // NOLINT + const char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":" + "0,\"checksum\":599727352}"; // NOLINT std::string jsonStr(json); - std::shared_ptr fs - = std::make_shared(); + std::shared_ptr fs = + std::make_shared(); ConfEpochFile confEpochFile(fs); LogicPoolID loadLogicPoolID; CopysetID loadCopysetID; uint64_t loadEpoch; EXPECT_CALL(*fs, Open(_, _)).Times(1).WillOnce(Return(10)); - EXPECT_CALL(*fs, Read(_, _, _, _)).Times(1) + EXPECT_CALL(*fs, Read(_, _, _, _)) + .Times(1) .WillOnce(DoAll(SetArrayArgument<1>(json, json + jsonStr.size()), Return(jsonStr.size()))); EXPECT_CALL(*fs, Close(_)).Times(1).WillOnce(Return(0)); - ASSERT_EQ(0, confEpochFile.Load(path, - &loadLogicPoolID, - &loadCopysetID, - &loadEpoch)); + ASSERT_EQ(0, confEpochFile.Load(path, &loadLogicPoolID, &loadCopysetID, + &loadEpoch)); } // load: open success, read success, decode failed, crc32c right { - char *json = "{\"logicPoolId"; + const char *json = "{\"logicPoolId"; std::string jsonStr(json); - std::shared_ptr fs - = std::make_shared(); + std::shared_ptr fs = + std::make_shared(); ConfEpochFile confEpochFile(fs); LogicPoolID loadLogicPoolID; CopysetID loadCopysetID; uint64_t loadEpoch; EXPECT_CALL(*fs, Open(_, _)).Times(1).WillOnce(Return(10)); - EXPECT_CALL(*fs, Read(_, _, _, _)).Times(1) + EXPECT_CALL(*fs, Read(_, _, _, _)) + .Times(1) .WillOnce(DoAll(SetArrayArgument<1>(json, json + jsonStr.size()), Return(jsonStr.size()))); EXPECT_CALL(*fs, Close(_)).Times(1).WillOnce(Return(0)); - ASSERT_EQ(-1, confEpochFile.Load(path, - &loadLogicPoolID, - &loadCopysetID, + ASSERT_EQ(-1, confEpochFile.Load(path, &loadLogicPoolID, &loadCopysetID, &loadEpoch)); } // load: open success, read success, decode success, crc32c not right { - char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":123}"; // NOLINT + const char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":" + "0,\"checksum\":123}"; // NOLINT std::string jsonStr(json); - std::shared_ptr fs - = std::make_shared(); + std::shared_ptr fs = + std::make_shared(); ConfEpochFile confEpochFile(fs); LogicPoolID loadLogicPoolID; CopysetID loadCopysetID; uint64_t loadEpoch; EXPECT_CALL(*fs, Open(_, _)).Times(1).WillOnce(Return(10)); - EXPECT_CALL(*fs, Read(_, _, _, _)).Times(1) + EXPECT_CALL(*fs, Read(_, _, _, _)) + .Times(1) .WillOnce(DoAll(SetArrayArgument<1>(json, json + jsonStr.size()), Return(jsonStr.size()))); EXPECT_CALL(*fs, Close(_)).Times(1).WillOnce(Return(0)); - ASSERT_EQ(-1, confEpochFile.Load(path, - &loadLogicPoolID, - &loadCopysetID, - &loadEpoch)); + ASSERT_EQ(-1, confEpochFile.Load(path, &loadLogicPoolID, &loadCopysetID, + &loadEpoch)); } // save: open failed { - std::shared_ptr fs - = std::make_shared(); + std::shared_ptr fs = + std::make_shared(); ConfEpochFile confEpochFile(fs); - LogicPoolID loadLogicPoolID; - CopysetID loadCopysetID; - uint64_t loadEpoch; + LogicPoolID loadLogicPoolID = 0; + CopysetID loadCopysetID = 0; + uint64_t loadEpoch = 0; EXPECT_CALL(*fs, Open(_, _)).Times(1).WillOnce(Return(-1)); - ASSERT_EQ(-1, confEpochFile.Save(path, - loadLogicPoolID, - loadCopysetID, + ASSERT_EQ(-1, confEpochFile.Save(path, loadLogicPoolID, loadCopysetID, loadEpoch)); } // save: open success, write failed { - std::shared_ptr fs - = std::make_shared(); + std::shared_ptr fs = + std::make_shared(); ConfEpochFile confEpochFile(fs); - LogicPoolID loadLogicPoolID; - CopysetID loadCopysetID; - uint64_t loadEpoch; + LogicPoolID loadLogicPoolID = 0; + CopysetID loadCopysetID = 0; + uint64_t loadEpoch = 0; EXPECT_CALL(*fs, Open(_, _)).Times(1).WillOnce(Return(10)); - EXPECT_CALL(*fs, Write(_, Matcher(_), _, _)).Times(1) + EXPECT_CALL(*fs, Write(_, Matcher(_), _, _)) + .Times(1) .WillOnce(Return(-1)); EXPECT_CALL(*fs, Close(_)).Times(1).WillOnce(Return(0)); - ASSERT_EQ(-1, confEpochFile.Save(path, - loadLogicPoolID, - loadCopysetID, + ASSERT_EQ(-1, confEpochFile.Save(path, loadLogicPoolID, loadCopysetID, loadEpoch)); } // save: open success, write success, fsync failed { - char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":599727352}"; // NOLINT + const char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":" + "0,\"checksum\":599727352}"; // NOLINT std::string jsonStr(json); - std::shared_ptr fs - = std::make_shared(); + std::shared_ptr fs = + std::make_shared(); ConfEpochFile confEpochFile(fs); EXPECT_CALL(*fs, Open(_, _)).Times(1).WillOnce(Return(10)); - EXPECT_CALL(*fs, Write(_, Matcher(_), _, _)).Times(1) + EXPECT_CALL(*fs, Write(_, Matcher(_), _, _)) + .Times(1) .WillOnce(Return(jsonStr.size())); EXPECT_CALL(*fs, Close(_)).Times(1).WillOnce(Return(0)); EXPECT_CALL(*fs, Fsync(_)).Times(1).WillOnce(Return(-1)); - ASSERT_EQ(-1, confEpochFile.Save(path, - logicPoolID, - copysetID, - epoch)); + ASSERT_EQ(-1, confEpochFile.Save(path, logicPoolID, copysetID, epoch)); } } diff --git a/test/chunkserver/copyset_node_test.cpp b/test/chunkserver/copyset_node_test.cpp index eee7a3fa02..4b7e221976 100644 --- a/test/chunkserver/copyset_node_test.cpp +++ b/test/chunkserver/copyset_node_test.cpp @@ -160,7 +160,6 @@ class CopysetNodeTest : public ::testing::Test { TEST_F(CopysetNodeTest, error_test) { std::shared_ptr fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); //NOLINT - const uint32_t kMaxChunkSize = 16 * 1024 * 1024; std::string rmCmd("rm -f "); rmCmd += kCurveConfEpochFilename; @@ -173,7 +172,7 @@ TEST_F(CopysetNodeTest, error_test) { files.push_back("test-1.txt"); files.push_back("test-2.txt"); - char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":774340440}"; // NOLINT + const char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":774340440}"; // NOLINT std::string jsonStr(json); CopysetNode copysetNode(logicPoolID, copysetID, conf); @@ -235,7 +234,7 @@ TEST_F(CopysetNodeTest, error_test) { files.push_back("test-1.txt"); files.push_back("test-2.txt"); - char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":774340440}"; // NOLINT + const char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":774340440}"; // NOLINT std::string jsonStr(json); CopysetNode copysetNode(logicPoolID, copysetID, conf); @@ -270,7 +269,7 @@ TEST_F(CopysetNodeTest, error_test) { files.push_back("test-1.txt"); files.push_back("test-2.txt"); - char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":774340440}"; // NOLINT + const char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":774340440}"; // NOLINT std::string jsonStr(json); CopysetNode copysetNode(logicPoolID, copysetID, conf); @@ -590,7 +589,6 @@ TEST_F(CopysetNodeTest, error_test) { TEST_F(CopysetNodeTest, get_conf_change) { std::shared_ptr fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); //NOLINT - const uint32_t kMaxChunkSize = 16 * 1024 * 1024; std::string rmCmd("rm -f "); rmCmd += kCurveConfEpochFilename; @@ -794,7 +792,6 @@ TEST_F(CopysetNodeTest, get_conf_change) { TEST_F(CopysetNodeTest, get_hash) { std::shared_ptr fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); //NOLINT - const uint32_t kMaxChunkSize = 16 * 1024 * 1024; std::string rmCmd("rm -f "); rmCmd += kCurveConfEpochFilename; diff --git a/test/chunkserver/datastore/datastore_mock_unittest.cpp b/test/chunkserver/datastore/datastore_mock_unittest.cpp index 9d5e8444c6..f1668aa7a6 100644 --- a/test/chunkserver/datastore/datastore_mock_unittest.cpp +++ b/test/chunkserver/datastore/datastore_mock_unittest.cpp @@ -669,8 +669,8 @@ TEST_F(CSDataStore_test, WriteChunkTest1) { SequenceNum sn = 1; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // create new chunk and open it string chunk3Path = string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); @@ -733,6 +733,7 @@ TEST_F(CSDataStore_test, WriteChunkTest1) { .Times(1); EXPECT_CALL(*lfs_, Close(4)) .Times(1); + delete[] buf; } /** @@ -751,8 +752,8 @@ TEST_F(CSDataStore_test, WriteChunkTest2) { SequenceNum sn = 3; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // snchunk.correctedsn EXPECT_EQ(CSErrorCode::BackwardRequestError, @@ -789,6 +790,7 @@ TEST_F(CSDataStore_test, WriteChunkTest2) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /** @@ -812,8 +814,8 @@ TEST_F(CSDataStore_test, WriteChunkTest3) { SequenceNum sn = 3; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // sn>chunk.sn sn(NotNull()), 0, PAGE_SIZE)) .Times(1); @@ -982,6 +986,7 @@ TEST_F(CSDataStore_test, WriteChunkTest6) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /** @@ -1000,8 +1005,8 @@ TEST_F(CSDataStore_test, WriteChunkTest7) { SequenceNum sn = 3; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // will Open snapshot file, snap sn equals 2 string snapPath = string(baseDir) + "/" + FileNameOperator::GenerateSnapshotName(id, 2); @@ -1079,6 +1084,7 @@ TEST_F(CSDataStore_test, WriteChunkTest7) { .Times(1); EXPECT_CALL(*lfs_, Close(4)) .Times(1); + delete[] buf; } /** @@ -1096,8 +1102,8 @@ TEST_F(CSDataStore_test, WriteChunkTest9) { SequenceNum sn = 2; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // will not create snapshot // will copy on write EXPECT_CALL(*lfs_, Read(1, NotNull(), PAGE_SIZE + offset, length)) @@ -1131,6 +1137,7 @@ TEST_F(CSDataStore_test, WriteChunkTest9) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /** @@ -1155,8 +1162,8 @@ TEST_F(CSDataStore_test, WriteChunkTest10) { SequenceNum sn = 3; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // will update metapage EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, PAGE_SIZE)) .Times(1); @@ -1184,6 +1191,7 @@ TEST_F(CSDataStore_test, WriteChunkTest10) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /** @@ -1208,8 +1216,8 @@ TEST_F(CSDataStore_test, WriteChunkTest11) { SequenceNum sn = 4; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // sn>chunk.sn, sn>chunk.correctedsn EXPECT_EQ(CSErrorCode::SnapshotConflictError, @@ -1231,6 +1239,7 @@ TEST_F(CSDataStore_test, WriteChunkTest11) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /** @@ -1255,8 +1264,8 @@ TEST_F(CSDataStore_test, WriteChunkTest13) { SequenceNum correctedSn = 0; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); CSChunkInfo info; // 创建 clone chunk { @@ -1417,6 +1426,7 @@ TEST_F(CSDataStore_test, WriteChunkTest13) { .Times(1); EXPECT_CALL(*lfs_, Close(4)) .Times(1); + delete[] buf; } /** @@ -1441,8 +1451,8 @@ TEST_F(CSDataStore_test, WriteChunkTest14) { SequenceNum correctedSn = 3; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); CSChunkInfo info; // 创建 clone chunk { @@ -1612,6 +1622,7 @@ TEST_F(CSDataStore_test, WriteChunkTest14) { .Times(1); EXPECT_CALL(*lfs_, Close(4)) .Times(1); + delete[] buf; } /** @@ -1646,8 +1657,8 @@ TEST_F(CSDataStore_test, WriteChunkTest15) { SequenceNum sn = 2; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // will not create snapshot // will not copy on write EXPECT_CALL(*lfs_, Write(2, Matcher(NotNull()), _, _)) @@ -1671,6 +1682,7 @@ TEST_F(CSDataStore_test, WriteChunkTest15) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /** @@ -1705,8 +1717,8 @@ TEST_F(CSDataStore_test, WriteChunkTest16) { SequenceNum sn = 3; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // will not create snapshot // will not copy on write EXPECT_CALL(*lfs_, Write(2, Matcher(NotNull()), _, _)) @@ -1733,6 +1745,7 @@ TEST_F(CSDataStore_test, WriteChunkTest16) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /** @@ -1749,8 +1762,8 @@ TEST_F(CSDataStore_test, WriteChunkErrorTest1) { SequenceNum sn = 3; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); string snapPath = string(baseDir) + "/" + FileNameOperator::GenerateSnapshotName(id, 2); @@ -1817,6 +1830,7 @@ TEST_F(CSDataStore_test, WriteChunkErrorTest1) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /** @@ -1834,8 +1848,8 @@ TEST_F(CSDataStore_test, WriteChunkErrorTest2) { SequenceNum sn = 3; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // will Open snapshot file, snap sn equals 2 string snapPath = string(baseDir) + "/" + FileNameOperator::GenerateSnapshotName(id, 2); @@ -1879,6 +1893,7 @@ TEST_F(CSDataStore_test, WriteChunkErrorTest2) { .Times(1); EXPECT_CALL(*lfs_, Close(4)) .Times(1); + delete[] buf; } /** @@ -1896,8 +1911,8 @@ TEST_F(CSDataStore_test, WriteChunkErrorTest3) { SequenceNum sn = 3; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // will Open snapshot file, snap sn equals 2 string snapPath = string(baseDir) + "/" + FileNameOperator::GenerateSnapshotName(id, 2); @@ -2011,6 +2026,7 @@ TEST_F(CSDataStore_test, WriteChunkErrorTest3) { .Times(1); EXPECT_CALL(*lfs_, Close(4)) .Times(1); + delete[] buf; } /** @@ -2028,8 +2044,8 @@ TEST_F(CSDataStore_test, WriteChunkErrorTest4) { SequenceNum sn = 3; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // will Open snapshot file, snap sn equals 2 string snapPath = string(baseDir) + "/" + FileNameOperator::GenerateSnapshotName(id, 2); @@ -2093,6 +2109,7 @@ TEST_F(CSDataStore_test, WriteChunkErrorTest4) { .Times(1); EXPECT_CALL(*lfs_, Close(4)) .Times(1); + delete[] buf; } /** @@ -2109,8 +2126,8 @@ TEST_F(CSDataStore_test, WriteChunkErrorTest5) { SequenceNum sn = 1; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // create new chunk and open it string chunk3Path = string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); @@ -2211,6 +2228,7 @@ TEST_F(CSDataStore_test, WriteChunkErrorTest5) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /* @@ -2233,8 +2251,8 @@ TEST_F(CSDataStore_test, WriteChunkErrorTest6) { SequenceNum correctedSn = 0; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); CSChunkInfo info; // 创建 clone chunk { @@ -2333,6 +2351,7 @@ TEST_F(CSDataStore_test, WriteChunkErrorTest6) { .Times(1); EXPECT_CALL(*lfs_, Close(4)) .Times(1); + delete[] buf; } /** @@ -2349,8 +2368,8 @@ TEST_F(CSDataStore_test, ReadChunkTest1) { SequenceNum sn = 2; off_t offset = PAGE_SIZE; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // test chunk not exists EXPECT_EQ(CSErrorCode::ChunkNotExistError, dataStore->ReadChunk(id, @@ -2365,6 +2384,7 @@ TEST_F(CSDataStore_test, ReadChunkTest1) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /** @@ -2381,8 +2401,8 @@ TEST_F(CSDataStore_test, ReadChunkTest2) { SequenceNum sn = 2; off_t offset = CHUNK_SIZE; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // test read out of range EXPECT_EQ(CSErrorCode::InvalidArgError, dataStore->ReadChunk(id, @@ -2415,6 +2435,7 @@ TEST_F(CSDataStore_test, ReadChunkTest2) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /** @@ -2431,8 +2452,8 @@ TEST_F(CSDataStore_test, ReadChunkTest3) { SequenceNum sn = 2; off_t offset = PAGE_SIZE; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // test chunk exists EXPECT_CALL(*lfs_, Read(1, NotNull(), offset + PAGE_SIZE, length)) .Times(1); @@ -2449,6 +2470,7 @@ TEST_F(CSDataStore_test, ReadChunkTest3) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /** @@ -2502,8 +2524,8 @@ TEST_F(CSDataStore_test, ReadChunkTest4) { // case1: 读取未写过区域 off_t offset = 1 * PAGE_SIZE; size_t length = PAGE_SIZE; - char buf[2 * length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[2 * length]; + memset(buf, 0, 2 * length); EXPECT_CALL(*lfs_, Read(_, _, _, _)) .Times(0); EXPECT_EQ(CSErrorCode::PageNerverWrittenError, @@ -2561,8 +2583,8 @@ TEST_F(CSDataStore_test, ReadChunkErrorTest1) { SequenceNum sn = 2; off_t offset = PAGE_SIZE; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // test read chunk failed EXPECT_CALL(*lfs_, Read(1, NotNull(), offset + PAGE_SIZE, length)) .WillOnce(Return(-UT_ERRNO)); @@ -2579,6 +2601,7 @@ TEST_F(CSDataStore_test, ReadChunkErrorTest1) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /** @@ -2595,8 +2618,8 @@ TEST_F(CSDataStore_test, ReadSnapshotChunkTest1) { SequenceNum sn = 2; off_t offset = PAGE_SIZE; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // test chunk not exists EXPECT_EQ(CSErrorCode::ChunkNotExistError, dataStore->ReadSnapshotChunk(id, @@ -2611,6 +2634,7 @@ TEST_F(CSDataStore_test, ReadSnapshotChunkTest1) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /** @@ -2627,8 +2651,8 @@ TEST_F(CSDataStore_test, ReadSnapshotChunkTest2) { SequenceNum sn = 2; off_t offset = CHUNK_SIZE; size_t length = 2 * PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // test out of range EXPECT_EQ(CSErrorCode::InvalidArgError, dataStore->ReadSnapshotChunk(id, @@ -2672,6 +2696,7 @@ TEST_F(CSDataStore_test, ReadSnapshotChunkTest2) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /** @@ -2688,8 +2713,8 @@ TEST_F(CSDataStore_test, ReadSnapshotChunkTest3) { SequenceNum sn = 2; off_t offset = PAGE_SIZE; size_t length = PAGE_SIZE * 2; - char writeBuf[length]; // NOLINT - memset(writeBuf, 0, sizeof(writeBuf)); + char *writeBuf = new char[length]; + memset(writeBuf, 0, length); // data in [PAGE_SIZE, 2*PAGE_SIZE) will be cow EXPECT_CALL(*lfs_, Read(1, NotNull(), offset + PAGE_SIZE, length)) .Times(1); @@ -2715,8 +2740,8 @@ TEST_F(CSDataStore_test, ReadSnapshotChunkTest3) { sn = 1; offset = CHUNK_SIZE; length = PAGE_SIZE * 4; - char readBuf[length]; // NOLINT - memset(readBuf, 0, sizeof(readBuf)); + char *readBuf = new char[length]; + memset(readBuf, 0, length); EXPECT_EQ(CSErrorCode::InvalidArgError, dataStore->ReadSnapshotChunk(id, sn, @@ -2746,6 +2771,8 @@ TEST_F(CSDataStore_test, ReadSnapshotChunkTest3) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] writeBuf; + delete[] readBuf; } /** @@ -2762,8 +2789,8 @@ TEST_F(CSDataStore_test, ReadSnapshotChunkTest4) { SequenceNum sn = 3; off_t offset = PAGE_SIZE; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // test sn not exists EXPECT_EQ(CSErrorCode::ChunkNotExistError, dataStore->ReadSnapshotChunk(id, @@ -2778,6 +2805,7 @@ TEST_F(CSDataStore_test, ReadSnapshotChunkTest4) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /** @@ -2794,8 +2822,8 @@ TEST_F(CSDataStore_test, ReadSnapshotChunkErrorTest1) { SequenceNum sn = 2; off_t offset = PAGE_SIZE; size_t length = PAGE_SIZE * 2; - char writeBuf[length]; // NOLINT - memset(writeBuf, 0, sizeof(writeBuf)); + char *writeBuf = new char[length]; + memset(writeBuf, 0, length); // data in [PAGE_SIZE, 2*PAGE_SIZE) will be cow EXPECT_CALL(*lfs_, Read(1, NotNull(), offset + PAGE_SIZE, length)) .Times(1); @@ -2821,8 +2849,8 @@ TEST_F(CSDataStore_test, ReadSnapshotChunkErrorTest1) { sn = 1; offset = 0; length = PAGE_SIZE * 4; - char readBuf[length]; // NOLINT - memset(readBuf, 0, sizeof(readBuf)); + char *readBuf = new char[length]; + memset(readBuf, 0, length); // read chunk failed EXPECT_CALL(*lfs_, Read(1, NotNull(), PAGE_SIZE, PAGE_SIZE)) .WillOnce(Return(-UT_ERRNO)); @@ -2853,6 +2881,8 @@ TEST_F(CSDataStore_test, ReadSnapshotChunkErrorTest1) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] writeBuf; + delete[] readBuf; } /** @@ -2869,8 +2899,8 @@ TEST_F(CSDataStore_test, ReadSnapshotChunkErrorTest2) { SequenceNum sn = 2; off_t offset = PAGE_SIZE; size_t length = 2 * PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // test in range offset = PAGE_SIZE; EXPECT_CALL(*lfs_, Read(1, NotNull(), offset + PAGE_SIZE, length)) @@ -2888,6 +2918,7 @@ TEST_F(CSDataStore_test, ReadSnapshotChunkErrorTest2) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /** @@ -3040,7 +3071,6 @@ TEST_F(CSDataStore_test, DeleteChunkTest4) { EXPECT_TRUE(dataStore->Initialize()); ChunkID id = 2; - SequenceNum sn = 2; // case1 { @@ -3811,8 +3841,8 @@ TEST_F(CSDataStore_test, PasteChunkTest1) { SequenceNum correctedSn = 2; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); CSChunkInfo info; // 创建 clone chunk { @@ -4013,6 +4043,7 @@ TEST_F(CSDataStore_test, PasteChunkTest1) { .Times(1); EXPECT_CALL(*lfs_, Close(4)) .Times(1); + delete[] buf; } /* @@ -4032,8 +4063,8 @@ TEST_F(CSDataStore_test, PasteChunkErrorTest1) { SequenceNum correctedSn = 2; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); CSChunkInfo info; // 创建 clone chunk { @@ -4118,6 +4149,7 @@ TEST_F(CSDataStore_test, PasteChunkErrorTest1) { .Times(1); EXPECT_CALL(*lfs_, Close(4)) .Times(1); + delete[] buf; } /* @@ -4156,8 +4188,6 @@ TEST_F(CSDataStore_test, GetHashErrorTest2) { ChunkID id = 1; std::string hash; - off_t offset = 0; - size_t length = PAGE_SIZE + CHUNK_SIZE; // test read chunk failed EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, 4096)) .WillOnce(Return(-UT_ERRNO)); @@ -4203,10 +4233,9 @@ TEST_F(CSDataStore_test, CloneChunkUnAlignedTest) { ChunkID id = 3; SequenceNum sn = 2; SequenceNum correctedSn = 3; - off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); CSChunkInfo info; // 创建 clone chunk { @@ -4370,6 +4399,7 @@ TEST_F(CSDataStore_test, CloneChunkUnAlignedTest) { .Times(1); EXPECT_CALL(*lfs_, Close(4)) .Times(1); + delete[] buf; } TEST_F(CSDataStore_test, CloneChunkAlignedTest) { @@ -4382,8 +4412,8 @@ TEST_F(CSDataStore_test, CloneChunkAlignedTest) { SequenceNum correctedSn = 3; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); CSChunkInfo info; // 创建 clone chunk { @@ -4468,6 +4498,7 @@ TEST_F(CSDataStore_test, CloneChunkAlignedTest) { .Times(1); EXPECT_CALL(*lfs_, Close(4)) .Times(1); + delete[] buf; } TEST_F(CSDataStore_test, NormalChunkAlignmentTest) { @@ -4477,10 +4508,9 @@ TEST_F(CSDataStore_test, NormalChunkAlignmentTest) { ChunkID id = 2; SequenceNum sn = 2; - off_t offset = 0; size_t length = 512; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // write unaligned test { @@ -4546,6 +4576,7 @@ TEST_F(CSDataStore_test, NormalChunkAlignmentTest) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } } // namespace chunkserver diff --git a/test/chunkserver/heartbeat_test_common.cpp b/test/chunkserver/heartbeat_test_common.cpp index 146d354ac9..20d6b444f8 100644 --- a/test/chunkserver/heartbeat_test_common.cpp +++ b/test/chunkserver/heartbeat_test_common.cpp @@ -26,7 +26,7 @@ uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT uint32_t chunk_size = 16 * 1024 * 1024; // NOLINT -static char* confPath[3] = { +static const char* confPath[3] = { "./8200/chunkserver.conf", "./8201/chunkserver.conf", "./8202/chunkserver.conf", @@ -74,10 +74,6 @@ void HeartbeatTestCommon::CleanPeer( std::string peersStr = info.peers(0).address(); - if (info.has_configchangeinfo()) { - const ConfigChangeInfo& cxInfo = info.configchangeinfo(); - } - { // answer with cleaning peer response CopySetConf* conf = resp->add_needupdatecopysets(); diff --git a/test/chunkserver/heartbeat_test_main.cpp b/test/chunkserver/heartbeat_test_main.cpp index d25cc3c518..9e9f848dfa 100644 --- a/test/chunkserver/heartbeat_test_main.cpp +++ b/test/chunkserver/heartbeat_test_main.cpp @@ -31,7 +31,7 @@ #include "test/chunkserver/heartbeat_test_common.h" #include "test/integration/common/config_generator.h" -static char *param[3][15] = { +static const char *param[3][15] = { { "heartbeat_test", "-chunkServerIp=127.0.0.1", @@ -135,7 +135,7 @@ int main(int argc, char *argv[]) { * RunChunkServer内部会调用LOG(), 有较低概率因不兼容fork()而卡死 */ return RunChunkServer(i, sizeof(param[i]) / sizeof(char *), - param[i]); + const_cast(param[i])); } } @@ -172,8 +172,8 @@ int main(int argc, char *argv[]) { /* * RunChunkServer内部会调用LOG(), 有较低概率因不兼容fork()而卡死 */ - ret = - RunChunkServer(1, sizeof(param[1]) / sizeof(char *), param[1]); + ret = RunChunkServer(1, sizeof(param[1]) / sizeof(char *), + const_cast(param[1])); return ret; } sleep(2); diff --git a/test/chunkserver/multiple_copysets_io_test.cpp b/test/chunkserver/multiple_copysets_io_test.cpp index d8163159c3..c19d4ed505 100644 --- a/test/chunkserver/multiple_copysets_io_test.cpp +++ b/test/chunkserver/multiple_copysets_io_test.cpp @@ -145,7 +145,7 @@ int64_t chunk_size = 0; int64_t nr_chunks = 0; int64_t nr_copysets = 0; int64_t chunks_per_copyset = 0; -ThreadInfo thread_infos[8] = {0}; +ThreadInfo thread_infos[8] = {}; LogicPoolID poolId = 10000; CopysetID copysetIdBase = 100; @@ -791,7 +791,7 @@ int main(int argc, char *argv[]) { clock_gettime(CLOCK_REALTIME, &t1); - ThreadInfo total_info = {0}; + ThreadInfo total_info = {}; total_info.io_time = time_diff(t0, t1); total_info.iodepth = FLAGS_iodepth; threads_stats(FLAGS_thread_num, &total_info); diff --git a/test/chunkserver/op_request_test.cpp b/test/chunkserver/op_request_test.cpp index 6fecb3535c..ac16c16466 100644 --- a/test/chunkserver/op_request_test.cpp +++ b/test/chunkserver/op_request_test.cpp @@ -735,11 +735,7 @@ TEST(ChunkOpRequestTest, OnApplyErrorTest) { TEST(ChunkOpRequestTest, OnApplyFromLogTest) { LogicPoolID logicPoolId = 1; CopysetID copysetId = 10001; - uint64_t chunkId = 12345; - size_t offset = 0; - uint32_t size = 16; uint64_t sn = 1; - uint64_t appliedIndex = 12; uint32_t followScanRpcTimeoutMs = 1000; Configuration conf; diff --git a/test/chunkserver/raftlog/test_curve_segment.cpp b/test/chunkserver/raftlog/test_curve_segment.cpp index da2f369dee..16e8999845 100644 --- a/test/chunkserver/raftlog/test_curve_segment.cpp +++ b/test/chunkserver/raftlog/test_curve_segment.cpp @@ -135,7 +135,7 @@ TEST_F(CurveSegmentTest, open_segment) { // create and open std::string path = kRaftLogDataDir; - butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 1); + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 1L); ASSERT_EQ(0, prepare_segment(path)); ASSERT_EQ(0, seg1->create()); ASSERT_TRUE(seg1->is_open()); @@ -191,7 +191,7 @@ TEST_F(CurveSegmentTest, closed_segment) { // create and open std::string path = kRaftLogDataDir; - butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 1); + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 1L); ASSERT_EQ(0, prepare_segment(path)); ASSERT_EQ(0, seg1->create()); ASSERT_TRUE(seg1->is_open()); diff --git a/test/chunkserver/raftlog/test_curve_segment_log_storage.cpp b/test/chunkserver/raftlog/test_curve_segment_log_storage.cpp index 253b124ac3..dfc9c7ab9e 100644 --- a/test/chunkserver/raftlog/test_curve_segment_log_storage.cpp +++ b/test/chunkserver/raftlog/test_curve_segment_log_storage.cpp @@ -155,13 +155,13 @@ TEST_F(CurveSegmentLogStorageTest, basic_test) { // append entry std::string path = kRaftLogDataDir; - butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 1); + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 1L); ASSERT_EQ(0, prepare_segment(path)); path = kRaftLogDataDir; - butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 2049); + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 2049L); ASSERT_EQ(0, prepare_segment(path)); path = kRaftLogDataDir; - butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 4097); + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 4097L); ASSERT_EQ(0, prepare_segment(path)); append_entries(storage, 1000, 5); @@ -200,7 +200,7 @@ TEST_F(CurveSegmentLogStorageTest, basic_test) { // append path = kRaftLogDataDir; - butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 6145); + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 6145L); ASSERT_EQ(0, prepare_segment(path)); for (int i = 5001; i <= 7000; i++) { int64_t index = i; @@ -273,10 +273,10 @@ TEST_F(CurveSegmentLogStorageTest, append_close_load_append) { // append entry std::string path = kRaftLogDataDir; - butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 1); + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 1L); ASSERT_EQ(0, prepare_segment(path)); path = kRaftLogDataDir; - butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 2049); + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 2049L); ASSERT_EQ(0, prepare_segment(path)); append_entries(storage, 600, 5); ASSERT_EQ(countWalSegmentFile(), storage->GetStatus().walSegmentFileCount); @@ -293,7 +293,7 @@ TEST_F(CurveSegmentLogStorageTest, append_close_load_append) { // append entry path = kRaftLogDataDir; - butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 4097); + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 4097L); ASSERT_EQ(0, prepare_segment(path)); braft::IOMetric metric; for (int i = 600; i < 1000; i++) { @@ -349,7 +349,7 @@ TEST_F(CurveSegmentLogStorageTest, data_lost) { // append entry std::string path = kRaftLogDataDir; - butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 1); + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 1L); ASSERT_EQ(0, prepare_segment(path)); append_entries(storage, 100, 5); ASSERT_EQ(countWalSegmentFile(), storage->GetStatus().walSegmentFileCount); @@ -393,7 +393,7 @@ TEST_F(CurveSegmentLogStorageTest, compatibility) { // append entry std::string path = kRaftLogDataDir; - butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 3001); + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 3001L); ASSERT_EQ(0, prepare_segment(path)); braft::IOMetric metric; for (int i = 600; i < 1000; i++) { @@ -451,13 +451,13 @@ TEST_F(CurveSegmentLogStorageTest, basic_test_without_direct) { // append entry std::string path = kRaftLogDataDir; - butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 1); + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 1L); ASSERT_EQ(0, prepare_segment(path)); path = kRaftLogDataDir; - butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 2049); + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 2049L); ASSERT_EQ(0, prepare_segment(path)); path = kRaftLogDataDir; - butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 4097); + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 4097L); ASSERT_EQ(0, prepare_segment(path)); append_entries(storage, 1000, 5); @@ -496,7 +496,7 @@ TEST_F(CurveSegmentLogStorageTest, basic_test_without_direct) { // append path = kRaftLogDataDir; - butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 6145); + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 6145L); ASSERT_EQ(0, prepare_segment(path)); for (int i = 5001; i <= 7000; i++) { int64_t index = i; diff --git a/test/chunkserver/server.cpp b/test/chunkserver/server.cpp index 94c40d6368..88de6da291 100644 --- a/test/chunkserver/server.cpp +++ b/test/chunkserver/server.cpp @@ -35,32 +35,30 @@ #include "src/common/uri_parser.h" #include "src/chunkserver/raftsnapshot/curve_snapshot_storage.h" -using curve::chunkserver::CopysetNodeOptions; +using curve::chunkserver::ConcurrentApplyModule; using curve::chunkserver::Configuration; +using curve::chunkserver::CopysetID; using curve::chunkserver::CopysetNodeManager; -using curve::chunkserver::concurrent::ConcurrentApplyModule; -using curve::chunkserver::concurrent::ConcurrentApplyOption; +using curve::chunkserver::CopysetNodeOptions; using curve::chunkserver::FilePool; +using curve::chunkserver::FilePoolHelper; using curve::chunkserver::FilePoolOptions; -using curve::chunkserver::ConcurrentApplyModule; -using curve::common::UriParser; using curve::chunkserver::LogicPoolID; -using curve::chunkserver::CopysetID; -using curve::common::Peer; using curve::chunkserver::PeerId; +using curve::chunkserver::concurrent::ConcurrentApplyModule; +using curve::chunkserver::concurrent::ConcurrentApplyOption; +using curve::common::Peer; +using curve::common::UriParser; +using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::fs::FileSystemType; -using curve::chunkserver::FilePoolHelper; -DEFINE_string(ip, - "127.0.0.1", +DEFINE_string(ip, "127.0.0.1", "Initial configuration of the replication group"); DEFINE_int32(port, 8200, "Listen port of this peer"); DEFINE_string(copyset_dir, "local://./runlog/chunkserver_test0", - "copyset data dir"); -DEFINE_string(conf, - "127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0", + "copyset data dir"); +DEFINE_string(conf, "127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0", "Initial configuration of the replication group"); DEFINE_int32(election_timeout_ms, 1000, "election timeout"); DEFINE_int32(snapshot_interval_s, 5, "snapshot interval"); @@ -72,8 +70,7 @@ DEFINE_bool(create_chunkfilepool, true, "create chunkfile pool"); butil::AtExitManager atExitManager; -void CreateChunkFilePool(const std::string& dirname, - uint64_t chunksize, +void CreateChunkFilePool(const std::string &dirname, uint64_t chunksize, std::shared_ptr fsptr) { std::string datadir = dirname + "/chunkfilepool"; std::string metapath = dirname + "/chunkfilepool.meta"; @@ -83,9 +80,8 @@ void CreateChunkFilePool(const std::string& dirname, memset(data, 0, 8192); fsptr->Mkdir(datadir); while (count <= 20) { - std::string filename = dirname + - "/chunkfilepool/" + - std::to_string(count); + std::string filename = + dirname + "/chunkfilepool/" + std::to_string(count); int fd = fsptr->Open(filename.c_str(), O_RDWR | O_CREAT); if (fd < 0) { LOG(ERROR) << "Create file failed!"; @@ -93,8 +89,8 @@ void CreateChunkFilePool(const std::string& dirname, } else { LOG(INFO) << filename.c_str() << " created!"; } - for (int i = 0; i <= chunksize/4096; i++) { - fsptr->Write(fd, data, i*4096, 4096); + for (int i = 0; i <= chunksize / 4096; i++) { + fsptr->Write(fd, data, i * 4096, 4096); } fsptr->Close(fd); count++; @@ -109,12 +105,8 @@ void CreateChunkFilePool(const std::string& dirname, memcpy(cpopt.filePoolDir, datadir.c_str(), datadir.size()); memcpy(cpopt.metaPath, metapath.c_str(), metapath.size()); - int ret = FilePoolHelper::PersistEnCodeMetaInfo( - fsptr, - chunksize, - 4096, - datadir, - metapath); + (void)FilePoolHelper::PersistEnCodeMetaInfo(fsptr, chunksize, 4096, datadir, + metapath); } int main(int argc, char *argv[]) { @@ -131,13 +123,13 @@ int main(int argc, char *argv[]) { curve::chunkserver::CurveSnapshotStorage::set_server_addr(addr); if (server.Start(FLAGS_port, NULL) != 0) { - LOG(ERROR) << "Fail to start Server: " - << errno << ", " << strerror(errno); + LOG(ERROR) << "Fail to start Server: " << errno << ", " + << strerror(errno); return -1; } - std::shared_ptr - fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); const uint32_t kMaxChunkSize = 16 * 1024 * 1024; // TODO(yyk) 这部分实现不太优雅,后续进行重构 std::string copysetUri = FLAGS_copyset_dir + "/copysets"; @@ -161,8 +153,8 @@ int main(int argc, char *argv[]) { copysetNodeOptions.localFileSystem = fs; std::string chunkDataDir; - std::string - protocol = UriParser::ParseUri(FLAGS_copyset_dir, &chunkDataDir); + std::string protocol = + UriParser::ParseUri(FLAGS_copyset_dir, &chunkDataDir); if (protocol.empty()) { LOG(FATAL) << "not support chunk data uri's protocol" << " error chunkDataDir is: " << chunkDataDir; @@ -197,7 +189,7 @@ int main(int argc, char *argv[]) { ConcurrentApplyOption opt{2, 1, 2, 1}; LOG_IF(FATAL, false == copysetNodeOptions.concurrentapply->Init(opt)) - << "Failed to init concurrent apply module"; + << "Failed to init concurrent apply module"; curve::chunkserver::Configuration conf; if (conf.parse_from(FLAGS_conf) != 0) { @@ -215,16 +207,15 @@ int main(int argc, char *argv[]) { CopysetNodeManager::GetInstance().Init(copysetNodeOptions); CopysetNodeManager::GetInstance().Run(); - CopysetNodeManager::GetInstance().CreateCopysetNode(FLAGS_logic_pool_id, - FLAGS_copyset_id, - peers); + CopysetNodeManager::GetInstance().CreateCopysetNode( + FLAGS_logic_pool_id, FLAGS_copyset_id, peers); /* Wait until 'CTRL-C' is pressed. then Stop() and Join() the service */ server.RunUntilAskedToQuit(); LOG(INFO) << "server test service is going to quit"; - CopysetNodeManager::GetInstance().DeleteCopysetNode( - FLAGS_logic_pool_id, FLAGS_copyset_id); + CopysetNodeManager::GetInstance().DeleteCopysetNode(FLAGS_logic_pool_id, + FLAGS_copyset_id); return 0; } diff --git a/test/chunkserver/trash_test.cpp b/test/chunkserver/trash_test.cpp index 3c7975d571..3ddf32f27e 100644 --- a/test/chunkserver/trash_test.cpp +++ b/test/chunkserver/trash_test.cpp @@ -512,7 +512,7 @@ TEST_F(TrashTest, recycle_wal_failed) { "curve_log_inprogress_10088")) .WillOnce(Return(-1)); - //失败的情况下不应删除 + // 失败的情况下不应删除 EXPECT_CALL(*lfs, Delete("./runlog/trash_test0/trash/4294967493.55555")) .Times(0); diff --git a/test/client/client_mdsclient_metacache_unittest.cpp b/test/client/client_mdsclient_metacache_unittest.cpp index 4296630965..de0e96d04c 100644 --- a/test/client/client_mdsclient_metacache_unittest.cpp +++ b/test/client/client_mdsclient_metacache_unittest.cpp @@ -55,10 +55,11 @@ uint32_t chunk_size = 4 * 1024 * 1024; uint32_t segment_size = 1 * 1024 * 1024 * 1024; -std::string mdsMetaServerAddr = "127.0.0.1:29104"; // NOLINT -std::string configpath = "./test/client/configs/client_mdsclient_metacache.conf"; // NOLINT +std::string mdsMetaServerAddr = "127.0.0.1:29104"; // NOLINT +std::string configpath = // NOLINT + "./test/client/configs/client_mdsclient_metacache.conf"; // NOLINT -extern curve::client::FileClient* globalclient; +extern curve::client::FileClient *globalclient; namespace curve { namespace client { @@ -190,7 +191,6 @@ TEST_F(MDSClientTest, Createfile) { TEST_F(MDSClientTest, MkDir) { std::string dirpath = "/1"; - size_t len = 4 * 1024 * 1024; // set response file exist ::curve::mds::CreateFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileExists); @@ -244,7 +244,6 @@ TEST_F(MDSClientTest, MkDir) { TEST_F(MDSClientTest, Closefile) { std::string filename = "/1_userinfo_"; - size_t len = 4 * 1024 * 1024; // file not exist ::curve::mds::CloseFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileNotExists); @@ -289,7 +288,6 @@ TEST_F(MDSClientTest, Closefile) { TEST_F(MDSClientTest, Openfile) { std::string filename = "/1_userinfo_"; - size_t len = 4 * 1024 * 1024; /** * set openfile response */ @@ -635,7 +633,6 @@ TEST_F(MDSClientTest, Extendfile) { TEST_F(MDSClientTest, Deletefile) { LOG(INFO) << "Deletefile======================================="; std::string filename1 = "/1_userinfo_"; - uint64_t newsize = 10 * 1024 * 1024 * 1024ul; // set response file exist ::curve::mds::DeleteFileResponse response; @@ -723,7 +720,6 @@ TEST_F(MDSClientTest, Deletefile) { TEST_F(MDSClientTest, Rmdir) { std::string filename1 = "/1/"; - uint64_t newsize = 10 * 1024 * 1024 * 1024ul; // set response dir not exist ::curve::mds::DeleteFileResponse response; @@ -895,9 +891,9 @@ TEST_F(MDSClientTest, GetFileInfo) { curvefsservice.SetGetFileInfoFakeReturn(fakeret2); curvefsservice.CleanRetryTimes(); - ASSERT_EQ(LIBCURVE_ERROR::FAILED, - mdsclient_.GetFileInfo(filename.c_str(), userinfo, - finfo, &fEpoch)); + ASSERT_EQ( + LIBCURVE_ERROR::FAILED, + mdsclient_.GetFileInfo(filename.c_str(), userinfo, finfo, &fEpoch)); delete fakeret; delete fakeret2; @@ -912,16 +908,17 @@ TEST_F(MDSClientTest, GetOrAllocateSegment) { fi.chunksize = 4 * 1024 * 1024; fi.segmentsize = 1 * 1024 * 1024 * 1024ul; - std::chrono::system_clock::time_point start, end; - auto startTimer = [&start]() { start = std::chrono::system_clock::now(); }; - auto endTimer = [&end]() { end = std::chrono::system_clock::now(); }; - auto checkTimer = [&start, &end](uint64_t min, uint64_t max) { - auto elpased = - std::chrono::duration_cast(end - start) - .count(); - ASSERT_GE(elpased, min); - ASSERT_LE(elpased, max); - }; + // std::chrono::system_clock::time_point start, end; + // auto startTimer = [&start]() { start = std::chrono::system_clock::now(); + // }; auto endTimer = [&end]() { end = std::chrono::system_clock::now(); }; + // auto checkTimer = [&start, &end](uint64_t min, uint64_t max) { + // auto elpased = + // std::chrono::duration_cast(end - + // start) + // .count(); + // ASSERT_GE(elpased, min); + // ASSERT_LE(elpased, max); + // }; // TEST CASE: GetOrAllocateSegment failed, block until response ok // curve::mds::GetOrAllocateSegmentResponse resp; @@ -1102,8 +1099,8 @@ TEST_F(MDSClientTest, GetServerList) { response_1.set_statuscode(0); uint32_t chunkserveridc = 1; - ::curve::common::ChunkServerLocation* cslocs; - ::curve::mds::topology::CopySetServerInfo* csinfo; + ::curve::common::ChunkServerLocation *cslocs; + ::curve::mds::topology::CopySetServerInfo *csinfo; for (int j = 0; j < 256; j++) { csinfo = response_1.add_csinfo(); csinfo->set_copysetid(j); @@ -1264,8 +1261,8 @@ TEST_F(MDSClientTest, GetLeaderTest) { response_1.set_statuscode(0); uint32_t chunkserveridc = 1; - ::curve::common::ChunkServerLocation* cslocs; - ::curve::mds::topology::CopySetServerInfo* csinfo; + ::curve::common::ChunkServerLocation *cslocs; + ::curve::mds::topology::CopySetServerInfo *csinfo; csinfo = response_1.add_csinfo(); csinfo->set_copysetid(1234); for (int i = 0; i < 4; i++) { @@ -1428,7 +1425,7 @@ TEST_F(MDSClientTest, CreateCloneFile) { ASSERT_EQ(LIBCURVE_ERROR::FAILED, mdsclient_.CreateCloneFile("source", "destination", userinfo, 10 * 1024 * 1024, 0, 4 * 1024 * 1024, - 0, 0, &finfo)); + 0, 0, "default", &finfo)); // 认证失败 curve::mds::CreateCloneFileResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); @@ -1441,7 +1438,7 @@ TEST_F(MDSClientTest, CreateCloneFile) { ASSERT_EQ(LIBCURVE_ERROR::AUTHFAIL, mdsclient_.CreateCloneFile("source", "destination", userinfo, 10 * 1024 * 1024, 0, 4 * 1024 * 1024, - 0, 0, &finfo)); + 0, 0, "default", &finfo)); // 请求成功 info->set_id(5); curve::mds::CreateCloneFileResponse response2; @@ -1460,7 +1457,7 @@ TEST_F(MDSClientTest, CreateCloneFile) { ASSERT_EQ(LIBCURVE_ERROR::OK, mdsclient_.CreateCloneFile("source", "destination", userinfo, 10 * 1024 * 1024, 0, 4 * 1024 * 1024, - 0, 0, &finfo)); + 0, 0, "default", &finfo)); ASSERT_EQ(5, finfo.id); ASSERT_EQ(cloneSource, finfo.sourceInfo.name); ASSERT_EQ(cloneLength, finfo.sourceInfo.length); @@ -1707,7 +1704,6 @@ TEST_F(MDSClientTest, ListDir) { curvefsservice.SetListDir(fakeret); - int arrsize; std::vector filestatVec; int ret = globalclient->Listdir(filename1, userinfo, &filestatVec); ASSERT_EQ(ret, -1 * LIBCURVE_ERROR::NOTEXIST); @@ -1736,7 +1732,6 @@ TEST_F(MDSClientTest, ListDir) { curvefsservice.SetListDir(fakeret1); ASSERT_EQ(LIBCURVE_ERROR::OK, globalclient->Listdir(filename1, userinfo, &filestatVec)); - int arraysize = 0; C_UserInfo_t cuserinfo; memcpy(cuserinfo.owner, "test", 5); FileStatInfo *filestat = new FileStatInfo[5]; @@ -1823,7 +1818,7 @@ TEST_F(MDSClientTest, ListDir) { TEST(LibcurveInterface, InvokeWithOutInit) { CurveAioContext aioctx; UserInfo_t userinfo; - C_UserInfo_t *ui; + C_UserInfo_t *ui = nullptr; FileClient fc; ASSERT_EQ(-LIBCURVE_ERROR::FAILED, fc.Create("", userinfo, 0)); @@ -1930,8 +1925,7 @@ class ServiceHelperGetLeaderTest : public MDSClientTest { } } - GetLeaderResponse2 - MakeResponse(const curve::client::PeerAddr &addr) { + GetLeaderResponse2 MakeResponse(const curve::client::PeerAddr &addr) { GetLeaderResponse2 response; curve::common::Peer *peer = new curve::common::Peer(); peer->set_address(addr.ToString()); @@ -2003,8 +1997,7 @@ TEST_F(ServiceHelperGetLeaderTest, NormalTest) { // 测试第二次拉取新的leader,直接跳过第一个chunkserver,查找第2,3两个 int32_t currentLeaderIndex = 0; - curve::client::PeerAddr currentLeader = - internalAddrs[currentLeaderIndex]; + curve::client::PeerAddr currentLeader = internalAddrs[currentLeaderIndex]; response = MakeResponse(currentLeader); fakeret1 = FakeReturn(nullptr, static_cast(&response)); @@ -2296,7 +2289,8 @@ class MDSClientRefreshSessionTest : public ::testing::Test { void SetUp() override { ASSERT_EQ(0, server_.AddService(&curveFsService_, brpc::SERVER_DOESNT_OWN_SERVICE)); - ASSERT_EQ(0, server_.Start(kServerAddress, nullptr)); + ASSERT_EQ(0, server_.Start(0, nullptr)); + serverAddress_ = butil::endpoint2str(server_.listen_address()).c_str(); } void TearDown() override { @@ -2305,7 +2299,8 @@ class MDSClientRefreshSessionTest : public ::testing::Test { } protected: - const char *kServerAddress = "127.0.0.1:21000"; + std::string serverAddress_; + const std::string kLocalIp = "127.0.0.1"; const uint32_t kTestPort = 1234; brpc::Server server_; @@ -2315,11 +2310,11 @@ class MDSClientRefreshSessionTest : public ::testing::Test { TEST_F(MDSClientRefreshSessionTest, StartDummyServerTest) { curve::client::ClientDummyServerInfo::GetInstance().SetRegister(true); curve::client::ClientDummyServerInfo::GetInstance().SetPort(kTestPort); - curve::client::ClientDummyServerInfo::GetInstance().SetIP(kServerAddress); + curve::client::ClientDummyServerInfo::GetInstance().SetIP(kLocalIp); MDSClient mdsClient; MetaServerOption opt; - opt.rpcRetryOpt.addrs.push_back(kServerAddress); + opt.rpcRetryOpt.addrs.push_back(serverAddress_); ASSERT_EQ(0, mdsClient.Initialize(opt)); curve::mds::ReFreshSessionRequest request; @@ -2338,7 +2333,7 @@ TEST_F(MDSClientRefreshSessionTest, StartDummyServerTest) { ASSERT_TRUE(request.has_clientport()); ASSERT_TRUE(request.has_clientip()); ASSERT_EQ(request.clientport(), kTestPort); - ASSERT_EQ(request.clientip(), kServerAddress); + ASSERT_EQ(request.clientip(), kLocalIp); } TEST_F(MDSClientRefreshSessionTest, NoStartDummyServerTest) { @@ -2346,7 +2341,7 @@ TEST_F(MDSClientRefreshSessionTest, NoStartDummyServerTest) { MDSClient mdsClient; MetaServerOption opt; - opt.rpcRetryOpt.addrs.push_back(kServerAddress); + opt.rpcRetryOpt.addrs.push_back(serverAddress_); ASSERT_EQ(0, mdsClient.Initialize(opt)); curve::mds::ReFreshSessionRequest request; @@ -2369,8 +2364,8 @@ TEST_F(MDSClientRefreshSessionTest, NoStartDummyServerTest) { } // namespace client } // namespace curve -const std::vector clientConf { - std::string("mds.listen.addr=") + mdsMetaServerAddr, +const std::vector clientConf{ + std::string("mds.listen.addr=") + std::string(mdsMetaServerAddr), std::string("global.logPath=./runlog/"), std::string("chunkserver.rpcTimeoutMS=1000"), std::string("chunkserver.opMaxRetry=3"), @@ -2383,7 +2378,7 @@ const std::vector clientConf { std::string("throttle.enable=true"), }; -int main(int argc, char* argv[]) { +int main(int argc, char *argv[]) { ::testing::InitGoogleTest(&argc, argv); ::testing::InitGoogleMock(&argc, argv); @@ -2393,8 +2388,8 @@ int main(int argc, char* argv[]) { std::unique_ptr cluster(new curve::CurveCluster()); - cluster->PrepareConfig( - configpath, clientConf); + cluster->PrepareConfig(configpath, + clientConf); return RUN_ALL_TESTS(); } diff --git a/test/client/client_session_unittest.cpp b/test/client/client_session_unittest.cpp index 94e518bd51..e30175cbf6 100644 --- a/test/client/client_session_unittest.cpp +++ b/test/client/client_session_unittest.cpp @@ -159,8 +159,6 @@ TEST(ClientSession, LeaseTaskTest) { } } - auto iomanager = fileinstance.GetIOManager4File(); - curve::client::LeaseExecutor* lease = fileinstance.GetLeaseExecutor(); // 5. set refresh AuthFail diff --git a/test/client/fake/fakeMDS.cpp b/test/client/fake/fakeMDS.cpp index 8ec2ada4d9..65266ebb44 100644 --- a/test/client/fake/fakeMDS.cpp +++ b/test/client/fake/fakeMDS.cpp @@ -293,7 +293,7 @@ bool FakeMDS::StartService() { /** * set get snap allocate info */ - FakeReturn* snapfakeret = new FakeReturn(nullptr, static_cast(getallocateresponse)); // NOLINT + // FakeReturn* snapfakeret = new FakeReturn(nullptr, static_cast(getallocateresponse)); // NOLINT fakecurvefsservice_.SetGetSnapshotSegmentInfo(fakeret); /** @@ -328,7 +328,6 @@ bool FakeMDS::StartService() { /** * set list physical pool response */ - ListPhysicalPoolResponse* listphypoolresp = new ListPhysicalPoolResponse(); FakeReturn* fakeListPPRet = new FakeReturn(nullptr, response); faketopologyservice_.fakelistpoolret_ = fakeListPPRet; diff --git a/test/client/fake/fakeMDS.h b/test/client/fake/fakeMDS.h index a2c0d49ca2..e29f251c26 100644 --- a/test/client/fake/fakeMDS.h +++ b/test/client/fake/fakeMDS.h @@ -204,6 +204,7 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { LOG(INFO) << "request filename = " << request->filename(); ASSERT_EQ(request->filename()[0], '/'); }; + (void)checkFullpath; fiu_do_on("test/client/fake/fakeMDS.GetOrAllocateSegment", checkFullpath()); diff --git a/test/client/fake/mock_schedule.cpp b/test/client/fake/mock_schedule.cpp index 2495ec1bcf..b53a3b3444 100644 --- a/test/client/fake/mock_schedule.cpp +++ b/test/client/fake/mock_schedule.cpp @@ -43,9 +43,7 @@ int Schedule::ScheduleRequest( const std::vector& reqlist) { // LOG(INFO) << "ENTER MOCK ScheduleRequest"; char fakedate[10] = {'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'k'}; - curve::client::OpType type = curve::client::OpType::UNKNOWN; int processed = 0; - int totallength = 0; std::vector datavec; if (enableScheduleFailed) { @@ -71,9 +69,10 @@ int Schedule::ScheduleRequest( auto req = iter->done_->GetReqCtx(); if (iter->optype_ == curve::client::OpType::READ_SNAP) { - char buf[iter->rawlength_]; // NOLINT + char *buf = new char[iter->rawlength_]; memset(buf, fakedate[processed % 10], iter->rawlength_); iter->readData_.append(buf, iter->rawlength_); + delete[] buf; } if (iter->optype_ == curve::client::OpType::GET_CHUNK_INFO) { @@ -82,9 +81,10 @@ int Schedule::ScheduleRequest( } if (iter->optype_ == curve::client::OpType::READ) { - char buffer[iter->rawlength_]; // NOLINT + char *buffer = new char[iter->rawlength_]; memset(buffer, fakedate[processed % 10], iter->rawlength_); iter->readData_.append(buffer, iter->rawlength_); + delete[] buffer; // LOG(ERROR) << "request split" // << ", off = " << iter->offset_ @@ -96,7 +96,6 @@ int Schedule::ScheduleRequest( } if (iter->optype_ == curve::client::OpType::WRITE) { - type = curve::client::OpType::WRITE; writeData.append(iter->writeData_); } processed++; diff --git a/test/client/iotracker_splitor_unittest.cpp b/test/client/iotracker_splitor_unittest.cpp index 35a09b7f81..fd4df4eaed 100644 --- a/test/client/iotracker_splitor_unittest.cpp +++ b/test/client/iotracker_splitor_unittest.cpp @@ -339,7 +339,6 @@ TEST_F(IOTrackerSplitorTest, AsyncStartRead) { mockschuler->DelegateToFake(); curve::client::IOManager4File* iomana = fileinstance_->GetIOManager4File(); - MetaCache* mc = fileinstance_->GetIOManager4File()->GetMetaCache(); iomana->SetRequestScheduler(mockschuler); CurveAioContext aioctx; @@ -374,7 +373,6 @@ TEST_F(IOTrackerSplitorTest, AsyncStartWrite) { mockschuler->DelegateToFake(); curve::client::IOManager4File* iomana = fileinstance_->GetIOManager4File(); - MetaCache* mc = fileinstance_->GetIOManager4File()->GetMetaCache(); iomana->SetRequestScheduler(mockschuler); CurveAioContext aioctx; @@ -420,7 +418,6 @@ TEST_F(IOTrackerSplitorTest, StartRead) { mockschuler->DelegateToFake(); curve::client::IOManager4File* iomana = fileinstance_->GetIOManager4File(); - MetaCache* mc = fileinstance_->GetIOManager4File()->GetMetaCache(); iomana->SetRequestScheduler(mockschuler); uint64_t offset = 4 * 1024 * 1024 - 4 * 1024; @@ -451,7 +448,6 @@ TEST_F(IOTrackerSplitorTest, StartWrite) { mockschuler->DelegateToFake(); curve::client::IOManager4File* iomana = fileinstance_->GetIOManager4File(); - MetaCache* mc = fileinstance_->GetIOManager4File()->GetMetaCache(); iomana->SetRequestScheduler(mockschuler); uint64_t offset = 4 * 1024 * 1024 - 4 * 1024; @@ -518,7 +514,6 @@ TEST_F(IOTrackerSplitorTest, ManagerAsyncStartWrite) { MockRequestScheduler* mockschuler = new MockRequestScheduler; mockschuler->DelegateToFake(); - MetaCache* mc = fileinstance_->GetIOManager4File()->GetMetaCache(); auto ioctxmana = fileinstance_->GetIOManager4File(); ioctxmana->SetRequestScheduler(mockschuler); @@ -656,7 +651,6 @@ TEST_F(IOTrackerSplitorTest, ManagerStartRead) { MockRequestScheduler* mockschuler = new MockRequestScheduler; mockschuler->DelegateToFake(); - MetaCache* mc = fileinstance_->GetIOManager4File()->GetMetaCache(); auto ioctxmana = fileinstance_->GetIOManager4File(); ioctxmana->SetRequestScheduler(mockschuler); @@ -687,7 +681,6 @@ TEST_F(IOTrackerSplitorTest, ManagerStartWrite) { MockRequestScheduler* mockschuler = new MockRequestScheduler; mockschuler->DelegateToFake(); - MetaCache* mc = fileinstance_->GetIOManager4File()->GetMetaCache(); auto ioctxmana = fileinstance_->GetIOManager4File(); ioctxmana->SetRequestScheduler(mockschuler); @@ -781,7 +774,6 @@ TEST_F(IOTrackerSplitorTest, BoundaryTEST) { MockRequestScheduler* mockschuler = new MockRequestScheduler; mockschuler->DelegateToFake(); - MetaCache* mc = fileinstance_->GetIOManager4File()->GetMetaCache(); auto ioctxmana = fileinstance_->GetIOManager4File(); ioctxmana->SetRequestScheduler(mockschuler); @@ -1179,7 +1171,6 @@ TEST_F(IOTrackerSplitorTest, StartReadNotAllocateSegment) { mockschuler->DelegateToFake(); curve::client::IOManager4File* iomana = fileinstance_->GetIOManager4File(); - MetaCache* mc = fileinstance_->GetIOManager4File()->GetMetaCache(); iomana->SetRequestScheduler(mockschuler); uint64_t offset = 1 * 1024 * 1024 * 1024 + 4 * 1024 * 1024 - 4 * 1024; @@ -1207,7 +1198,6 @@ TEST_F(IOTrackerSplitorTest, AsyncStartReadNotAllocateSegment) { mockschuler->DelegateToFake(); curve::client::IOManager4File* iomana = fileinstance_->GetIOManager4File(); - MetaCache* mc = fileinstance_->GetIOManager4File()->GetMetaCache(); iomana->SetRequestScheduler(mockschuler); CurveAioContext aioctx; diff --git a/test/client/libcbd_ext4_test.cpp b/test/client/libcbd_ext4_test.cpp index 704bbcb10a..a240a3fd77 100644 --- a/test/client/libcbd_ext4_test.cpp +++ b/test/client/libcbd_ext4_test.cpp @@ -42,7 +42,7 @@ TEST(TestLibcbdExt4, InitTest) { memset(&opt, 0, sizeof(opt)); - opt.datahome = "."; + opt.datahome = const_cast("."); ret = cbd_lib_init(&opt); ASSERT_EQ(ret, 0); @@ -67,7 +67,7 @@ TEST(TestLibcbdExt4, ReadWriteTest) { memset(&opt, 0, sizeof(opt)); memset(buf, 'a', BUFSIZE); - opt.datahome = "."; + opt.datahome = const_cast("."); ret = cbd_lib_init(&opt); ASSERT_EQ(ret, 0); @@ -122,7 +122,7 @@ TEST(TestLibcbdExt4, AioReadWriteTest) { memset(&opt, 0, sizeof(opt)); memset(buf, 'a', BUFSIZE); - opt.datahome = "."; + opt.datahome = const_cast("."); ret = cbd_lib_init(&opt); ASSERT_EQ(ret, 0); @@ -175,7 +175,7 @@ TEST(TestLibcbdExt4, IncreaseEpochTest) { memset(&opt, 0, sizeof(opt)); - opt.datahome = "."; + opt.datahome = const_cast("."); ret = cbd_lib_init(&opt); ASSERT_EQ(ret, 0); diff --git a/test/client/libcbd_libcurve_test.cpp b/test/client/libcbd_libcurve_test.cpp index 14e96e94e3..3f582b8a3c 100644 --- a/test/client/libcbd_libcurve_test.cpp +++ b/test/client/libcbd_libcurve_test.cpp @@ -133,7 +133,7 @@ TEST_F(TestLibcbdLibcurve, InitTest) { globalclientinited_ = false; memset(&opt, 0, sizeof(opt)); // testing with no conf specified - opt.conf = ""; + opt.conf = const_cast(""); ret = cbd_lib_init(&opt); ASSERT_NE(ret, 0); ret = cbd_lib_fini(); diff --git a/test/client/libcurve_interface_unittest.cpp b/test/client/libcurve_interface_unittest.cpp index 19edf9dfa4..78492e1f2e 100644 --- a/test/client/libcurve_interface_unittest.cpp +++ b/test/client/libcurve_interface_unittest.cpp @@ -113,7 +113,7 @@ TEST(TestLibcurveInterface, InterfaceTest) { ASSERT_EQ(GetClusterId(clusterId, 1), -LIBCURVE_ERROR::FAILED); // libcurve file operation - int temp = Create(filename.c_str(), &userinfo, FLAGS_test_disk_size); + (void)Create(filename.c_str(), &userinfo, FLAGS_test_disk_size); int fd = Open(filename.c_str(), &userinfo); @@ -895,7 +895,6 @@ TEST(TestLibcurveInterface, ResumeTimeoutBackoff) { ASSERT_NE(fd, -1); - CliServiceFake *cliservice = mds.GetCliService(); std::vector chunkservice = mds.GetFakeChunkService(); char *buffer = new char[8 * 1024]; @@ -987,13 +986,24 @@ TEST(TestLibcurveInterface, InterfaceStripeTest) { FakeReturn *fakeret = new FakeReturn(nullptr, static_cast(&response)); service->SetCreateFileFakeReturn(fakeret); - int ret = fc.Create2(filename, userinfo, size, 0, 0); + CreateFileContext context; + context.pagefile = true; + context.name = filename; + context.user = userinfo; + context.length = size; + int ret = fc.Create2(context); ASSERT_EQ(LIBCURVE_ERROR::OK, ret); response.set_statuscode(::curve::mds::StatusCode::kFileExists); fakeret = new FakeReturn(nullptr, static_cast(&response)); service->SetCreateFileFakeReturn(fakeret); - ret = fc.Create2(filename2, userinfo, size, 1024 * 1024, 4); + context.pagefile = true; + context.name = filename2; + context.user = userinfo; + context.length = size; + context.stripeUnit = 1024 * 1024; + context.stripeCount = 4; + ret = fc.Create2(context); ASSERT_EQ(LIBCURVE_ERROR::EXISTS, -ret); FileStatInfo_t fsinfo; diff --git a/test/client/mds_client_test.cpp b/test/client/mds_client_test.cpp index 420aab2afe..9f0fa8fbcc 100644 --- a/test/client/mds_client_test.cpp +++ b/test/client/mds_client_test.cpp @@ -30,6 +30,7 @@ #include #include "test/client/mock/mock_namespace_service.h" +#include "test/client/mock/mock_topology_service.h" namespace curve { namespace client { @@ -42,17 +43,23 @@ using ::testing::SetArgPointee; constexpr uint64_t kGiB = 1024ull * 1024 * 1024; -template -void FakeRpcService(google::protobuf::RpcController* cntl_base, - const RpcRequestType* request, RpcResponseType* response, - google::protobuf::Closure* done) { - if (RpcFailed) { - brpc::Controller* cntl = static_cast(cntl_base); - cntl->SetFailed(112, "Not connected to"); +namespace { +template +struct FakeRpcService { + template + void operator()(google::protobuf::RpcController* cntl_base, + const Request* /*request*/, + Response* /*response*/, + google::protobuf::Closure* done) const { + if (FAIL) { + brpc::Controller* cntl = static_cast(cntl_base); + cntl->SetFailed(112, "Not connected to"); + } + + done->Run(); } - done->Run(); -} +}; +} // namespace class MDSClientTest : public testing::Test { protected: @@ -62,6 +69,8 @@ class MDSClientTest : public testing::Test { ASSERT_EQ(0, server_.AddService(&mockNameService_, brpc::SERVER_DOESNT_OWN_SERVICE)); + ASSERT_EQ(0, server_.AddService(&mockTopoService_, + brpc::SERVER_DOESNT_OWN_SERVICE)); // only start mds on mdsAddr1 ASSERT_EQ(0, server_.Start(mdsAddr1.c_str(), nullptr)); @@ -86,6 +95,7 @@ class MDSClientTest : public testing::Test { protected: brpc::Server server_; curve::mds::MockNameService mockNameService_; + curve::client::MockTopologyService mockTopoService_; MDSClient mdsClient_; MetaServerOption option_; }; @@ -102,7 +112,7 @@ TEST_F(MDSClientTest, TestRenameFile) { EXPECT_CALL(mockNameService_, RenameFile(_, _, _, _)) .WillRepeatedly(DoAll( SetArgPointee<2>(response), - Invoke(FakeRpcService))); + Invoke(FakeRpcService{}))); auto startMs = TimeUtility::GetTimeofDayMs(); ASSERT_EQ(LIBCURVE_ERROR::NOT_SUPPORT, @@ -119,7 +129,7 @@ TEST_F(MDSClientTest, TestRenameFile) { EXPECT_CALL(mockNameService_, RenameFile(_, _, _, _)) .WillRepeatedly(DoAll( SetArgPointee<2>(response), - Invoke(FakeRpcService))); + Invoke(FakeRpcService{}))); ASSERT_EQ(LIBCURVE_ERROR::FILE_OCCUPIED, mdsClient_.RenameFile(userInfo, srcName, destName)); @@ -136,10 +146,10 @@ TEST_F(MDSClientTest, TestRenameFile) { EXPECT_CALL(mockNameService_, RenameFile(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(responseNotSupport), - Invoke(FakeRpcService))) + Invoke(FakeRpcService{}))) .WillOnce(DoAll( SetArgPointee<2>(responseOK), - Invoke(FakeRpcService))); + Invoke(FakeRpcService{}))); ASSERT_EQ(LIBCURVE_ERROR::OK, mdsClient_.RenameFile(userInfo, srcName, destName)); @@ -157,7 +167,7 @@ TEST_F(MDSClientTest, TestDeleteFile) { EXPECT_CALL(mockNameService_, DeleteFile(_, _, _, _)) .WillRepeatedly(DoAll( SetArgPointee<2>(response), - Invoke(FakeRpcService))); + Invoke(FakeRpcService{}))); auto startMs = TimeUtility::GetTimeofDayMs(); ASSERT_EQ(LIBCURVE_ERROR::NOT_SUPPORT, @@ -174,7 +184,7 @@ TEST_F(MDSClientTest, TestDeleteFile) { EXPECT_CALL(mockNameService_, DeleteFile(_, _, _, _)) .WillRepeatedly(DoAll( SetArgPointee<2>(response), - Invoke(FakeRpcService))); + Invoke(FakeRpcService{}))); ASSERT_EQ(LIBCURVE_ERROR::FILE_OCCUPIED, mdsClient_.DeleteFile(fileName, userInfo)); @@ -191,10 +201,10 @@ TEST_F(MDSClientTest, TestDeleteFile) { EXPECT_CALL(mockNameService_, DeleteFile(_, _, _, _)) .WillOnce(DoAll( SetArgPointee<2>(responseNotSupport), - Invoke(FakeRpcService))) + Invoke(FakeRpcService{}))) .WillOnce(DoAll( SetArgPointee<2>(responseOK), - Invoke(FakeRpcService))); + Invoke(FakeRpcService{}))); ASSERT_EQ(LIBCURVE_ERROR::OK, mdsClient_.DeleteFile(fileName, userInfo)); @@ -214,7 +224,7 @@ TEST_F(MDSClientTest, TestChangeOwner) { .WillRepeatedly(DoAll( SetArgPointee<2>(response), Invoke( - FakeRpcService))); + FakeRpcService{}))); auto startMs = TimeUtility::GetTimeofDayMs(); ASSERT_EQ(LIBCURVE_ERROR::NOT_SUPPORT, @@ -232,7 +242,7 @@ TEST_F(MDSClientTest, TestChangeOwner) { .WillRepeatedly(DoAll( SetArgPointee<2>(response), Invoke( - FakeRpcService))); + FakeRpcService{}))); ASSERT_EQ(LIBCURVE_ERROR::FILE_OCCUPIED, mdsClient_.ChangeOwner(fileName, newUser, userInfo)); @@ -250,11 +260,11 @@ TEST_F(MDSClientTest, TestChangeOwner) { .WillOnce(DoAll( SetArgPointee<2>(responseNotSupport), Invoke( - FakeRpcService))) + FakeRpcService{}))) .WillOnce(DoAll( SetArgPointee<2>(responseOK), Invoke( - FakeRpcService))); + FakeRpcService{}))); ASSERT_EQ(LIBCURVE_ERROR::OK, mdsClient_.ChangeOwner(fileName, newUser, userInfo)); @@ -274,7 +284,7 @@ TEST_F(MDSClientTest, TestOpenFile) { { EXPECT_CALL(mockNameService_, OpenFile(_, _, _, _)) .WillRepeatedly(Invoke( - FakeRpcService)); + FakeRpcService{})); auto startMs = TimeUtility::GetTimeofDayMs(); ASSERT_EQ(LIBCURVE_ERROR::FAILED, @@ -292,7 +302,7 @@ TEST_F(MDSClientTest, TestOpenFile) { EXPECT_CALL(mockNameService_, OpenFile(_, _, _, _)) .WillRepeatedly(DoAll( SetArgPointee<2>(response), - Invoke(FakeRpcService))); + Invoke(FakeRpcService{}))); ASSERT_EQ(LIBCURVE_ERROR::FAILED, mdsClient_.OpenFile(fileName, userInfo, @@ -316,7 +326,7 @@ TEST_F(MDSClientTest, TestOpenFile) { EXPECT_CALL(mockNameService_, OpenFile(_, _, _, _)) .WillRepeatedly(DoAll( SetArgPointee<2>(response), - Invoke(FakeRpcService))); + Invoke(FakeRpcService{}))); ASSERT_EQ(LIBCURVE_ERROR::OK, mdsClient_.OpenFile(fileName, userInfo, @@ -344,7 +354,7 @@ TEST_F(MDSClientTest, TestOpenFile) { EXPECT_CALL(mockNameService_, OpenFile(_, _, _, _)) .WillRepeatedly(DoAll( SetArgPointee<2>(response), - Invoke(FakeRpcService))); + Invoke(FakeRpcService{}))); ASSERT_EQ(LIBCURVE_ERROR::OK, mdsClient_.OpenFile(fileName, userInfo, @@ -373,7 +383,7 @@ TEST_F(MDSClientTest, TestOpenFile) { EXPECT_CALL(mockNameService_, OpenFile(_, _, _, _)) .WillRepeatedly(DoAll( SetArgPointee<2>(response), - Invoke(FakeRpcService))); + Invoke(FakeRpcService{}))); ASSERT_EQ(LIBCURVE_ERROR::OK, mdsClient_.OpenFile(fileName, userInfo, @@ -410,7 +420,7 @@ TEST_F(MDSClientTest, TestOpenFile) { EXPECT_CALL(mockNameService_, OpenFile(_, _, _, _)) .WillRepeatedly(DoAll( SetArgPointee<2>(response), - Invoke(FakeRpcService))); + Invoke(FakeRpcService{}))); ASSERT_EQ(LIBCURVE_ERROR::OK, mdsClient_.OpenFile(fileName, userInfo, @@ -437,8 +447,7 @@ TEST_F(MDSClientTest, TestIncreaseEpoch) { { EXPECT_CALL(mockNameService_, IncreaseFileEpoch(_, _, _, _)) .WillRepeatedly(Invoke( - FakeRpcService)); + FakeRpcService{})); ASSERT_EQ(LIBCURVE_ERROR::FAILED, mdsClient_.IncreaseEpoch(fileName, userInfo, @@ -452,8 +461,7 @@ TEST_F(MDSClientTest, TestIncreaseEpoch) { EXPECT_CALL(mockNameService_, IncreaseFileEpoch(_, _, _, _)) .WillRepeatedly(DoAll( SetArgPointee<2>(response), - Invoke(FakeRpcService))); + Invoke(FakeRpcService{}))); ASSERT_EQ(LIBCURVE_ERROR::NOTEXIST, @@ -468,8 +476,7 @@ TEST_F(MDSClientTest, TestIncreaseEpoch) { EXPECT_CALL(mockNameService_, IncreaseFileEpoch(_, _, _, _)) .WillRepeatedly(DoAll( SetArgPointee<2>(response), - Invoke(FakeRpcService))); + Invoke(FakeRpcService{}))); ASSERT_EQ(LIBCURVE_ERROR::FAILED, @@ -499,8 +506,7 @@ TEST_F(MDSClientTest, TestIncreaseEpoch) { EXPECT_CALL(mockNameService_, IncreaseFileEpoch(_, _, _, _)) .WillRepeatedly(DoAll( SetArgPointee<2>(response), - Invoke(FakeRpcService))); + Invoke(FakeRpcService{}))); ASSERT_EQ(LIBCURVE_ERROR::OK, mdsClient_.IncreaseEpoch(fileName, userInfo, @@ -542,8 +548,7 @@ TEST_F(MDSClientTest, TestIncreaseEpoch) { EXPECT_CALL(mockNameService_, IncreaseFileEpoch(_, _, _, _)) .WillRepeatedly(DoAll( SetArgPointee<2>(response), - Invoke(FakeRpcService))); + Invoke(FakeRpcService{}))); ASSERT_EQ(LIBCURVE_ERROR::OK, mdsClient_.IncreaseEpoch(fileName, userInfo, @@ -565,5 +570,51 @@ TEST_F(MDSClientTest, TestIncreaseEpoch) { } } +TEST_F(MDSClientTest, TestListPoolset) { + std::vector out; + mds::topology::ListPoolsetResponse response; + + // controller failed + { + EXPECT_CALL(mockTopoService_, ListPoolset(_, _, _, _)) + .WillRepeatedly(Invoke(FakeRpcService{})); + + ASSERT_EQ(LIBCURVE_ERROR::FAILED, mdsClient_.ListPoolset(&out)); + } + + // request failed + { + response.set_statuscode(-1); + EXPECT_CALL(mockTopoService_, ListPoolset(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<2>(response), + Invoke(FakeRpcService{}))); + + ASSERT_EQ(LIBCURVE_ERROR::FAILED, mdsClient_.ListPoolset(&out)); + } + + // request success + { + response.set_statuscode(0); + auto* poolset = response.add_poolsetinfos(); + poolset->set_poolsetid(1); + poolset->set_poolsetname("default"); + poolset->set_type("default"); + poolset = response.add_poolsetinfos(); + poolset->set_poolsetid(2); + poolset->set_poolsetname("system"); + poolset->set_type("SSD"); + + EXPECT_CALL(mockTopoService_, ListPoolset(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<2>(response), + Invoke(FakeRpcService{}))); + + out.clear(); + ASSERT_EQ(LIBCURVE_ERROR::OK, mdsClient_.ListPoolset(&out)); + ASSERT_EQ(2, out.size()); + ASSERT_EQ("default", out[0]); + ASSERT_EQ("system", out[1]); + } +} + } // namespace client } // namespace curve diff --git a/test/client/mds_failover_test.cpp b/test/client/mds_failover_test.cpp index ebf8190e92..e95912f610 100644 --- a/test/client/mds_failover_test.cpp +++ b/test/client/mds_failover_test.cpp @@ -66,7 +66,6 @@ TEST(MDSChangeTest, MDSFailoverTest) { rpcexcutor.SetOption(metaopt.rpcRetryOpt); - int currentWorkMDSIndex = 1; int mds0RetryTimes = 0; int mds1RetryTimes = 0; int mds2RetryTimes = 0; diff --git a/test/client/metacache_test.cpp b/test/client/metacache_test.cpp index 725ac25f51..8f17b39d2b 100644 --- a/test/client/metacache_test.cpp +++ b/test/client/metacache_test.cpp @@ -91,7 +91,6 @@ TEST_F(MetaCacheTest, TestCleanChunksInSegment) { InsertMetaCache(fileLength, segmentSize, chunkSize); uint64_t totalChunks = fileLength / chunkSize; - uint64_t totalSegments = fileLength / segmentSize; uint64_t chunksInSegment = segmentSize / chunkSize; ASSERT_EQ(totalChunks, diff --git a/test/client/mock/mock_topology_service.h b/test/client/mock/mock_topology_service.h new file mode 100644 index 0000000000..1bf0dc2cbb --- /dev/null +++ b/test/client/mock/mock_topology_service.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TEST_CLIENT_MOCK_MOCK_TOPOLOGY_SERVICE_H_ +#define TEST_CLIENT_MOCK_MOCK_TOPOLOGY_SERVICE_H_ + +#include +#include + +#include "proto/topology.pb.h" + +namespace curve { +namespace client { + +class MockTopologyService : public mds::topology::TopologyService { + public: + MOCK_METHOD4(ListPoolset, + void(::google::protobuf::RpcController* controller, + const curve::mds::topology::ListPoolsetRequest* request, + curve::mds::topology::ListPoolsetResponse* response, + ::google::protobuf::Closure* done)); +}; + +} // namespace client +} // namespace curve + +#endif // TEST_CLIENT_MOCK_MOCK_TOPOLOGY_SERVICE_H_ diff --git a/test/client/snapshot_service_unittest.cpp b/test/client/snapshot_service_unittest.cpp index b942b34604..56b1c9cd20 100644 --- a/test/client/snapshot_service_unittest.cpp +++ b/test/client/snapshot_service_unittest.cpp @@ -565,7 +565,6 @@ TEST(SnapInstance, DeleteChunkSnapshotTest) { SnapshotClient cl; ASSERT_TRUE(!cl.Init(opt)); - auto max_split_size_kb = 1024 * 64; MockRequestScheduler* mocksch = new MockRequestScheduler; mocksch->DelegateToFake(); diff --git a/test/common/count_down_event_test.cpp b/test/common/count_down_event_test.cpp index f31ebb6b9b..8bdc5c9681 100644 --- a/test/common/count_down_event_test.cpp +++ b/test/common/count_down_event_test.cpp @@ -85,7 +85,6 @@ TEST(CountDownEventTest, basic) { t1.join(); } { - int i = 0; CountDownEvent cond(0); cond.WaitFor(1000); } diff --git a/test/common/dlock_test.cpp b/test/common/dlock_test.cpp index f4354b4faa..cd62367a3a 100644 --- a/test/common/dlock_test.cpp +++ b/test/common/dlock_test.cpp @@ -46,7 +46,7 @@ class TestDLock : public ::testing::Test { system("rm -fr testDLock.etcd"); client_ = std::make_shared(); char endpoints[] = "127.0.0.1:2375"; - EtcdConf conf = { endpoints, strlen(endpoints), 1000 }; + EtcdConf conf = {endpoints, static_cast(strlen(endpoints)), 1000}; ASSERT_EQ(EtcdErrCode::EtcdDeadlineExceeded, client_->Init(conf, 200, 3)); diff --git a/test/common/rw_lock_test.cpp b/test/common/rw_lock_test.cpp index 9801fcea45..ced1f06e48 100644 --- a/test/common/rw_lock_test.cpp +++ b/test/common/rw_lock_test.cpp @@ -80,6 +80,7 @@ TEST(RWLockTest, basic_test) { for (uint64_t i = 0; i < 10000; ++i) { ReadLockGuard readLockGuard(rwlock); auto j = writeCnt + i; + (void)j; } }; { @@ -149,6 +150,7 @@ TEST(BthreadRWLockTest, basic_test) { for (uint64_t i = 0; i < 10000; ++i) { ReadLockGuard readLockGuard(rwlock); auto j = writeCnt + i; + (void)j; } }; { diff --git a/test/common/task_thread_pool_test.cpp b/test/common/task_thread_pool_test.cpp index 0ac05897b6..cb44a36b09 100644 --- a/test/common/task_thread_pool_test.cpp +++ b/test/common/task_thread_pool_test.cpp @@ -35,11 +35,13 @@ using curve::common::CountDownEvent; void TestAdd1(int a, double b, CountDownEvent *cond) { double c = a + b; + (void)c; cond->Signal(); } int TestAdd2(int a, double b, CountDownEvent *cond) { double c = a + b; + (void)c; cond->Signal(); return 0; } diff --git a/test/integration/chunkserver/chunkserver_basic_test.cpp b/test/integration/chunkserver/chunkserver_basic_test.cpp index 42952560b2..934cc0e0fc 100644 --- a/test/integration/chunkserver/chunkserver_basic_test.cpp +++ b/test/integration/chunkserver/chunkserver_basic_test.cpp @@ -49,7 +49,7 @@ const ChunkSizeType CHUNK_SIZE = 16 * kMB; const char* kFakeMdsAddr = "127.0.0.1:9079"; -static char *chunkServerParams[1][16] = { +static const char *chunkServerParams[1][16] = { { "chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=" BASIC_TEST_CHUNK_SERVER_PORT, "-chunkServerStoreUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/", @@ -103,7 +103,7 @@ class ChunkServerIoTest : public testing::Test { ASSERT_TRUE(cg1_.Generate()); paramsIndexs_[PeerCluster::PeerToId(peer1_)] = 0; - params_.push_back(chunkServerParams[0]); + params_.push_back(const_cast(chunkServerParams[0])); // 初始化chunkfilepool,这里会预先分配一些chunk lfs_ = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); @@ -157,9 +157,7 @@ class ChunkServerIoTest : public testing::Test { void TestBasicIO(std::shared_ptr verify) { uint64_t chunkId = 1; - off_t offset = 0; int length = kOpRequestAlignSize; - int ret = 0; const SequenceNum sn1 = 1; std::string data(length * 4, 0); // Now we will zeroing chunk file, even though it fill '0' in start @@ -214,9 +212,7 @@ class ChunkServerIoTest : public testing::Test { const SequenceNum sn1 = 1; const SequenceNum sn2 = 2; const SequenceNum sn3 = 3; - off_t offset = 0; int length = kOpRequestAlignSize; - int ret = 0; std::string data(length * 4, 0); std::string chunkData1a(kChunkSize, 0); // chunk1版本1预期数据 std::string chunkData1b(kChunkSize, 0); // chunk1版本2预期数据 diff --git a/test/integration/chunkserver/chunkserver_clone_recover.cpp b/test/integration/chunkserver/chunkserver_clone_recover.cpp index e3a4808626..abe3557298 100644 --- a/test/integration/chunkserver/chunkserver_clone_recover.cpp +++ b/test/integration/chunkserver/chunkserver_clone_recover.cpp @@ -256,6 +256,7 @@ class CSCloneRecoverTest : public ::testing::Test { server["name"] = std::string("server") + std::to_string(i); server["physicalpool"] = PHYSICAL_POOL_NAME; server["zone"] = std::string("zone") + std::to_string(i); + server["poolset"] = std::string("default"); servers.append(server); } topo["servers"] = servers; @@ -270,6 +271,7 @@ class CSCloneRecoverTest : public ::testing::Test { logicalPool["zonenum"] = 3; logicalPools.append(logicalPool); topo["logicalpools"] = logicalPools; + std::ofstream topoConf(CSCLONE_BASE_DIR + "/topo.json"); topoConf << topo.toStyledString(); topoConf.close(); @@ -778,7 +780,6 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunkWhenLazyAlloc) { // 1. chunk文件不存在 ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 331; - SequenceNum sn0 = 0; SequenceNum sn1 = 1; SequenceNum sn2 = 2; string sourceFile = CURVEFS_FILENAME; @@ -1006,7 +1007,6 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByReadChunk) { // 1. 创建克隆文件 ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 339; - ChunkID cloneChunk2 = 340; SequenceNum sn2 = 2; SequenceNum sn3 = 3; SequenceNum sn4 = 4; @@ -1074,7 +1074,6 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByRecoverChunk) { // 1. 创建克隆文件 ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 341; - ChunkID cloneChunk2 = 342; SequenceNum sn2 = 2; SequenceNum sn3 = 3; SequenceNum sn4 = 4; diff --git a/test/integration/chunkserver/chunkserver_concurrent_test.cpp b/test/integration/chunkserver/chunkserver_concurrent_test.cpp index beb0dc9b87..a5c8806e73 100644 --- a/test/integration/chunkserver/chunkserver_concurrent_test.cpp +++ b/test/integration/chunkserver/chunkserver_concurrent_test.cpp @@ -45,7 +45,7 @@ using curve::common::Thread; const char* kFakeMdsAddr = "127.0.0.1:9329"; -static char *chunkConcurrencyParams1[1][16] = { +static const char *chunkConcurrencyParams1[1][16] = { { "chunkserver", "-chunkServerIp=127.0.0.1", @@ -66,7 +66,7 @@ static char *chunkConcurrencyParams1[1][16] = { }, }; -static char *chunkConcurrencyParams2[1][16] = { +static const char *chunkConcurrencyParams2[1][16] = { { "chunkserver", "-chunkServerIp=127.0.0.1", @@ -121,7 +121,7 @@ class ChunkServerConcurrentNotFromFilePoolTest : public testing::Test { paramsIndexs[PeerCluster::PeerToId(peer1)] = 0; - params.push_back(chunkConcurrencyParams1[0]); + params.push_back(const_cast(chunkConcurrencyParams1[0])); } virtual void TearDown() { std::string rmdir1("rm -fr "); @@ -192,7 +192,7 @@ class ChunkServerConcurrentFromFilePoolTest : public testing::Test { paramsIndexs[PeerCluster::PeerToId(peer1)] = 0; - params.push_back(chunkConcurrencyParams2[0]); + params.push_back(const_cast(chunkConcurrencyParams2[0])); // 初始化FilePool,这里会预先分配一些chunk lfs = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); @@ -1401,7 +1401,6 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, DeleteMultiChunk) { TEST_F(ChunkServerConcurrentFromFilePoolTest, CreateCloneMultiChunk) { const int kThreadNum = 10; ChunkID chunkIdRange = kChunkNum; - const int sn = 1; // 1. 启动一个成员的复制组 PeerCluster cluster("InitShutdown-cluster", diff --git a/test/integration/chunkserver/datastore/datastore_restart_test.cpp b/test/integration/chunkserver/datastore/datastore_restart_test.cpp index 75f5b2f6fa..f7a9d9ae5a 100644 --- a/test/integration/chunkserver/datastore/datastore_restart_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_restart_test.cpp @@ -156,8 +156,8 @@ class ExecWrite : public ExecStep { } void Dump() override { - printf("WriteChunk, id = %llu, sn = %llu, offset = %llu, " - "size = %llu, data = %c.\n", + printf("WriteChunk, id = %lu, sn = %lu, offset = %lu, " + "size = %lu, data = %c.\n", id_, sn_, data_.offset, data_.length, data_.data); } @@ -182,8 +182,8 @@ class ExecPaste : public ExecStep { } void Dump() override { - printf("PasteChunk, id = %llu, offset = %llu, " - "size = %llu, data = %c.\n", + printf("PasteChunk, id = %lu, offset = %lu, " + "size = %lu, data = %c.\n", id_, data_.offset, data_.length, data_.data); } @@ -204,7 +204,7 @@ class ExecDelete : public ExecStep { } void Dump() override { - printf("DeleteChunk, id = %llu, sn = %llu.\n", id_, sn_); + printf("DeleteChunk, id = %lu, sn = %lu.\n", id_, sn_); } private: @@ -226,7 +226,7 @@ class ExecDeleteSnapshot : public ExecStep { void Dump() override { printf("DeleteSnapshotChunkOrCorrectSn, " - "id = %llu, correctedSn = %llu.\n", id_, correctedSn_); + "id = %lu, correctedSn = %lu.\n", id_, correctedSn_); } private: @@ -251,8 +251,8 @@ class ExecCreateClone : public ExecStep { } void Dump() override { - printf("CreateCloneChunk, id = %llu, sn = %llu, correctedSn = %llu, " - "chunk size = %llu, location = %s.\n", + printf("CreateCloneChunk, id = %lu, sn = %lu, correctedSn = %lu, " + "chunk size = %u, location = %s.\n", id_, sn_, correctedSn_, size_, location_.c_str()); } diff --git a/test/integration/chunkserver/datastore/datastore_stress_test.cpp b/test/integration/chunkserver/datastore/datastore_stress_test.cpp index 5f2af8086b..2364d61dd2 100644 --- a/test/integration/chunkserver/datastore/datastore_stress_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_stress_test.cpp @@ -64,8 +64,7 @@ TEST_F(StressTestSuit, StressTest) { auto RunStress = [&](int threadNum, int rwPercent, int ioNum) { uint64_t beginTime = TimeUtility::GetTimeofDayUs(); - const int kThreadNum = threadNum; - Thread threads[kThreadNum]; + Thread *threads = new Thread[threadNum]; int readThreadNum = threadNum * rwPercent / 100; int ioNumAvg = ioNum / threadNum; int idRange = 100; @@ -77,17 +76,18 @@ TEST_F(StressTestSuit, StressTest) { threads[i] = std::thread(RunWrite, idRange, ioNumAvg); } - for (auto& t : threads) { - t.join(); + for (int i = 0; i < threadNum; ++i) { + threads[i].join(); } uint64_t endTime = TimeUtility::GetTimeofDayUs(); uint64_t iops = ioNum * 1000000L / (endTime - beginTime); - printf("Total time used: %llu us\n", endTime - beginTime); + printf("Total time used: %lu us\n", endTime - beginTime); printf("Thread number: %d\n", threadNum); printf("read write percent: %d\n", rwPercent); printf("io num: %d\n", ioNum); - printf("iops: %llu\n", iops); + printf("iops: %lu\n", iops); + delete[] threads; }; printf("===============TEST WRITE==================\n"); diff --git a/test/integration/client/common/file_operation.cpp b/test/integration/client/common/file_operation.cpp index 1be458f930..44dfc186a5 100644 --- a/test/integration/client/common/file_operation.cpp +++ b/test/integration/client/common/file_operation.cpp @@ -20,26 +20,25 @@ * Author: tongguangxun */ -#include +#include "test/integration/client/common/file_operation.h" -#include -#include -#include -#include // NOLINT -#include -#include // NOLINT -#include -#include +#include -#include "src/common/timeutility.h" #include "include/client/libcurve.h" -#include "src/client/inflight_controller.h" -#include "test/integration/client/common/file_operation.h" +#include "src/client/client_common.h" +#include "src/client/libcurve_file.h" + +extern curve::client::FileClient* globalclient; namespace curve { namespace test { + +using curve::client::CreateFileContext; + int FileCommonOperation::Open(const std::string& filename, const std::string& owner) { + assert(globalclient != nullptr); + C_UserInfo_t userinfo; memset(userinfo.owner, 0, 256); memcpy(userinfo.owner, owner.c_str(), owner.size()); @@ -63,19 +62,30 @@ int FileCommonOperation::Open(const std::string& filename, } void FileCommonOperation::Close(int fd) { + assert(globalclient != nullptr); + ::Close(fd); } int FileCommonOperation::Open(const std::string& filename, const std::string& owner, uint64_t stripeUnit, uint64_t stripeCount) { + assert(globalclient != nullptr); + C_UserInfo_t userinfo; memset(userinfo.owner, 0, 256); memcpy(userinfo.owner, owner.c_str(), owner.size()); + CreateFileContext context; + context.pagefile = true; + context.name = filename; + context.user.owner = owner; + context.length = 100 * 1024 * 1024 * 1024ul; + context.stripeUnit = stripeUnit; + context.stripeCount = stripeCount; + // 先创建文件 - int ret = ::Create2(filename.c_str(), &userinfo, - 100*1024*1024*1024ul, stripeUnit, stripeCount); + int ret = globalclient->Create2(context); if (ret != LIBCURVE_ERROR::OK && ret != -LIBCURVE_ERROR::EXISTS) { LOG(ERROR) << "file create failed! " << ret << ", filename = " << filename; diff --git a/test/integration/client/config/client.conf b/test/integration/client/config/client.conf deleted file mode 100644 index 969c098900..0000000000 --- a/test/integration/client/config/client.conf +++ /dev/null @@ -1,48 +0,0 @@ -# -# Copyright (c) 2020 NetEase Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -chunkserver.enableAppliedIndexRead=1 -chunkserver.maxRPCTimeoutMS=8000 -chunkserver.maxRetrySleepIntervalUS=8000000 -chunkserver.maxRetryTimesBeforeConsiderSuspend=20 -chunkserver.maxStableTimeoutTimes=64 -chunkserver.minRetryTimesForceTimeoutBackoff=5 -chunkserver.opMaxRetry=10 -chunkserver.opRetryIntervalUS=100000 -chunkserver.rpcTimeoutMS=1000 -global.fileIOSplitMaxSizeKB=64 -global.fileMaxInFlightRPCNum=64 -global.logLevel=0 -global.logPath=./runlog/MDSExceptionTest -global.metricDummyServerStartPort=9000 -global.sessionMapPath=./session_map.json -isolation.taskQueueCapacity=1000000 -isolation.taskThreadPoolSize=1 -mds.listen.addr=127.0.0.1:22222,127.0.0.1:22223,127.0.0.1:22224 -mds.maxFailedTimesBeforeChangeMDS=2 -mds.maxRPCTimeoutMS=2000 -mds.maxRetryMS=8000 -mds.refreshTimesPerLease=4 -mds.registerToMDS=true -mds.rpcRetryIntervalUS=100000 -mds.rpcTimeoutMS=500 -metacache.getLeaderBackupRequestLbName=rr -metacache.getLeaderBackupRequestMS=100 -metacache.getLeaderRetry=5 -metacache.getLeaderTimeOutMS=500 -metacache.rpcRetryIntervalUS=100000 -schedule.queueCapacity=1000000 -schedule.threadpoolSize=1 \ No newline at end of file diff --git a/test/integration/client/config/client.conf.1 b/test/integration/client/config/client.conf.1 deleted file mode 100644 index 3bf02b96e0..0000000000 --- a/test/integration/client/config/client.conf.1 +++ /dev/null @@ -1,32 +0,0 @@ -chunkserver.checkHealthTimeoutMs=100 -chunkserver.enableAppliedIndexRead=1 -chunkserver.maxRPCTimeoutMS=8000 -chunkserver.maxRetrySleepIntervalUS=8000000 -chunkserver.maxRetryTimesBeforeConsiderSuspend=20 -chunkserver.maxStableTimeoutTimes=10 -chunkserver.minRetryTimesForceTimeoutBackoff=5 -chunkserver.opMaxRetry=10 -chunkserver.opRetryIntervalUS=100000 -chunkserver.rpcTimeoutMS=1000 -chunkserver.serverStableThreshold=3 -global.fileIOSplitMaxSizeKB=64 -global.fileMaxInFlightRPCNum=64 -global.logLevel=0 -global.logPath=./runlog/ChunkserverException -global.metricDummyServerStartPort=9000 -global.turnOffHealthCheck=true -isolation.taskQueueCapacity=1000000 -isolation.taskThreadPoolSize=1 -mds.listen.addr=127.0.0.1:22122,127.0.0.1:22123,127.0.0.1:22124 -mds.maxFailedTimesBeforeChangeMDS=2 -mds.maxRPCTimeoutMS=2000 -mds.maxRetryMS=8000 -mds.refreshTimesPerLease=4 -mds.registerToMDS=true -mds.rpcRetryIntervalUS=100000 -mds.rpcTimeoutMS=500 -metacache.getLeaderRetry=5 -metacache.getLeaderTimeOutMS=500 -metacache.rpcRetryIntervalUS=100000 -schedule.queueCapacity=1000000 -schedule.threadpoolSize=1 diff --git a/test/integration/client/config/topo_example.json b/test/integration/client/config/topo_example.json index a46a983212..1f75ccaeb0 100644 --- a/test/integration/client/config/topo_example.json +++ b/test/integration/client/config/topo_example.json @@ -18,7 +18,8 @@ "internalport": 22225, "name": "server4", "physicalpool": "pool1", - "zone": "zone4" + "zone": "zone4", + "poolset": "default" }, { "externalip": "127.0.0.1", @@ -27,7 +28,8 @@ "internalport": 22226, "name": "server5", "physicalpool": "pool1", - "zone": "zone5" + "zone": "zone5", + "poolset": "default" }, { "externalip": "127.0.0.1", @@ -36,8 +38,8 @@ "internalport": 22227, "name": "server6", "physicalpool": "pool1", - "zone": "zone6" + "zone": "zone6", + "poolset": "default" } ] } - diff --git a/test/integration/client/config/topo_example_1.json b/test/integration/client/config/topo_example_1.json index bb513af4ad..5074f995d7 100644 --- a/test/integration/client/config/topo_example_1.json +++ b/test/integration/client/config/topo_example_1.json @@ -18,7 +18,8 @@ "internalport": 22125, "name": "server4", "physicalpool": "pool1", - "zone": "zone4" + "zone": "zone4", + "poolset": "default" }, { "externalip": "127.0.0.1", @@ -27,7 +28,8 @@ "internalport": 22126, "name": "server5", "physicalpool": "pool1", - "zone": "zone5" + "zone": "zone5", + "poolset": "default" }, { "externalip": "127.0.0.1", @@ -36,7 +38,8 @@ "internalport": 22127, "name": "server6", "physicalpool": "pool1", - "zone": "zone6" + "zone": "zone6", + "poolset": "default" } ] } diff --git a/test/integration/client/config/unstable/topo_unstable.json b/test/integration/client/config/unstable/topo_unstable.json index d996f83b9e..cf62c19be5 100644 --- a/test/integration/client/config/unstable/topo_unstable.json +++ b/test/integration/client/config/unstable/topo_unstable.json @@ -18,7 +18,8 @@ "internalport": 31000, "name": "server4", "physicalpool": "pool1", - "zone": "zone4" + "zone": "zone4", + "poolset": "default" }, { "externalip": "127.0.0.1", @@ -27,7 +28,8 @@ "internalport": 31001, "name": "server5", "physicalpool": "pool1", - "zone": "zone4" + "zone": "zone4", + "poolset": "default" }, { "externalip": "127.0.0.1", @@ -36,7 +38,8 @@ "internalport": 31010, "name": "server7", "physicalpool": "pool1", - "zone": "zone5" + "zone": "zone5", + "poolset": "default" }, { "externalip": "127.0.0.1", @@ -45,7 +48,8 @@ "internalport": 31011, "name": "server8", "physicalpool": "pool1", - "zone": "zone5" + "zone": "zone5", + "poolset": "default" }, { "externalip": "127.0.0.1", @@ -54,7 +58,8 @@ "internalport": 31020, "name": "server10", "physicalpool": "pool1", - "zone": "zone6" + "zone": "zone6", + "poolset": "default" }, { "externalip": "127.0.0.1", @@ -63,8 +68,8 @@ "internalport": 31021, "name": "server11", "physicalpool": "pool1", - "zone": "zone6" + "zone": "zone6", + "poolset": "default" } ] } - diff --git a/test/integration/client/config/unstable/topo_unstable.txt b/test/integration/client/config/unstable/topo_unstable.txt deleted file mode 100644 index ec75273027..0000000000 --- a/test/integration/client/config/unstable/topo_unstable.txt +++ /dev/null @@ -1,6 +0,0 @@ -server4 127.0.0.1:31000 127.0.0.1:31000 zone4 pool1 -server5 127.0.0.1:31001 127.0.0.1:31001 zone4 pool1 -server7 127.0.0.1:31010 127.0.0.1:31010 zone5 pool1 -server8 127.0.0.1:31011 127.0.0.1:31011 zone5 pool1 -server10 127.0.0.1:31020 127.0.0.1:31020 zone6 pool1 -server11 127.0.0.1:31021 127.0.0.1:31021 zone6 pool1 diff --git a/test/integration/client/config/unstable/topo_unstable_small.json b/test/integration/client/config/unstable/topo_unstable_small.json deleted file mode 100644 index 5ff7e99a35..0000000000 --- a/test/integration/client/config/unstable/topo_unstable_small.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "logicalpools": [ - { - "copysetnum": 300, - "name": "logicalPool1", - "physicalpool": "pool1", - "replicasnum": 3, - "scatterwidth": 0, - "type": 0, - "zonenum": 3 - } - ], - "servers": [ - { - "externalip": "127.0.0.1", - "externalport": 31000, - "internalip": "127.0.0.1", - "internalport": 31000, - "name": "server4", - "physicalpool": "pool1", - "zone": "zone4" - }, - { - "externalip": "127.0.0.1", - "externalport": 31010, - "internalip": "127.0.0.1", - "internalport": 31010, - "name": "server7", - "physicalpool": "pool1", - "zone": "zone5" - }, - { - "externalip": "127.0.0.1", - "externalport": 31020, - "internalip": "127.0.0.1", - "internalport": 31020, - "name": "server10", - "physicalpool": "pool1", - "zone": "zone6" - } - ] -} - diff --git a/test/integration/client/config/unstable/topo_unstable_small.txt b/test/integration/client/config/unstable/topo_unstable_small.txt deleted file mode 100644 index 4ca506b66a..0000000000 --- a/test/integration/client/config/unstable/topo_unstable_small.txt +++ /dev/null @@ -1,3 +0,0 @@ -server4 127.0.0.1:31000 127.0.0.1:31000 zone4 pool1 -server7 127.0.0.1:31010 127.0.0.1:31010 zone5 pool1 -server10 127.0.0.1:31020 127.0.0.1:31020 zone6 pool1 diff --git a/test/integration/client/unstable_chunkserver_exception_test.cpp b/test/integration/client/unstable_chunkserver_exception_test.cpp index f542449fe9..2835196a95 100644 --- a/test/integration/client/unstable_chunkserver_exception_test.cpp +++ b/test/integration/client/unstable_chunkserver_exception_test.cpp @@ -50,8 +50,8 @@ const char* kMdsConfPath = "./test/integration/unstable_test_mds.conf"; const char* kCSConfPath = "./test/integration/unstable_test_cs.conf"; const char* kClientConfPath = "./test/integration/unstable_test_client.conf"; -const char* kEtcdClientIpPort = "127.0.0.1:30000"; -const char* kEtcdPeerIpPort = "127.0.0.1:29999"; +const char* kEtcdClientIpPort = "127.0.0.1:21000"; +const char* kEtcdPeerIpPort = "127.0.0.1:20999"; const char* kMdsIpPort = "127.0.0.1:30010"; const char* kClientInflightNum = "6"; const char* kLogPath = "./runlog/"; @@ -230,6 +230,10 @@ class UnstableCSModuleException : public ::testing::Test { system("rm -rf module_exception_curve_unstable_cs.etcd"); system("rm -rf module_exception_curve_unstable_cs"); system("rm -rf ttt"); + + ::unlink(kMdsConfPath); + ::unlink(kCSConfPath); + ::unlink(kClientConfPath); } static void StartAllChunkserver() { @@ -311,9 +315,9 @@ TEST_F(UnstableCSModuleException, TestCommonReadAndWrite) { ::Create(filename.c_str(), &info, 10ull * 1024 * 1024 * 1024); int fd = ::Open(filename.c_str(), &info); - int ret = ::Read(fd, readBuff.get(), offset, length); + (void)::Read(fd, readBuff.get(), offset, length); LOG(INFO) << "Read finish, here"; - ret = ::Write(fd, readBuff.get(), offset, length); + (void)::Write(fd, readBuff.get(), offset, length); LOG(INFO) << "Write finish, here"; ::Close(fd); diff --git a/test/integration/cluster_common/cluster.cpp b/test/integration/cluster_common/cluster.cpp index 1013711343..7c45c14fe8 100644 --- a/test/integration/cluster_common/cluster.cpp +++ b/test/integration/cluster_common/cluster.cpp @@ -43,6 +43,9 @@ using ::curve::kvstorage::EtcdClientImp; using ::curve::snapshotcloneserver::SnapshotCloneCodec; namespace curve { + +using ::curve::client::CreateFileContext; + int CurveCluster::InitMdsClient(const curve::client::MetaServerOption &op) { mdsClient_ = std::make_shared(); return mdsClient_->Initialize(op); @@ -737,12 +740,19 @@ bool CurveCluster::CurrentServiceMDS(int *curId) { int CurveCluster::CreateFile(const std::string &user, const std::string &pwd, const std::string &fileName, uint64_t fileSize, - bool normalFile) { + bool normalFile, const std::string& poolset) { LOG(INFO) << "create file: " << fileName << ", size: " << fileSize << " begin..."; UserInfo_t info(user, pwd); + CreateFileContext context; + context.pagefile = true; + context.name = fileName; + context.user = info; + context.length = fileSize; + context.poolset = poolset; + RETURN_IF_NOT_ZERO( - mdsClient_->CreateFile(fileName, info, fileSize, normalFile)); + mdsClient_->CreateFile(context)); LOG(INFO) << "success create file"; return 0; } @@ -772,7 +782,6 @@ int CurveCluster::ProbePort(const std::string &ipPort, int64_t timeoutMs, addr.sin_port = htons(port); addr.sin_addr.s_addr = inet_addr(res[0].c_str()); - bool satisfy = false; uint64_t start = ::curve::common::TimeUtility::GetTimeofDayMs(); while (::curve::common::TimeUtility::GetTimeofDayMs() - start < timeoutMs) { int connectRes = diff --git a/test/integration/cluster_common/cluster.h b/test/integration/cluster_common/cluster.h index 129bbccddb..bf061e8dc0 100644 --- a/test/integration/cluster_common/cluster.h +++ b/test/integration/cluster_common/cluster.h @@ -367,7 +367,7 @@ class CurveCluster { */ int CreateFile(const std::string &user, const std::string &pwd, const std::string &fileName, uint64_t fileSize = 0, - bool normalFile = true); + bool normalFile = true, const std::string& poolset = ""); private: /** diff --git a/test/integration/cluster_common/cluster_basic_test.cpp b/test/integration/cluster_common/cluster_basic_test.cpp index 7462c17682..a1a1775da9 100644 --- a/test/integration/cluster_common/cluster_basic_test.cpp +++ b/test/integration/cluster_common/cluster_basic_test.cpp @@ -229,7 +229,8 @@ TEST_F(ClusterBasicTest, test_start_stop_module2) { // 创建文件 ASSERT_EQ(0, curveCluster_->CreateFile("test", "test", "/basic_test", - 10 * 1024 * 1024 * 1024UL)); + 10 * 1024 * 1024 * 1024UL, + /*normalFile=*/true, "SSD_2")); // 获取当前正在服务的mds int curMds; @@ -240,12 +241,14 @@ TEST_F(ClusterBasicTest, test_start_stop_module2) { ASSERT_EQ(0, curveCluster_->HangMDS(1)); // 创建文件失败 ASSERT_NE(0, curveCluster_->CreateFile("test1", "test1", "/basic_test1", - 10 * 1024 * 1024 * 1024UL)); + 10 * 1024 * 1024 * 1024UL, + /*normalFile=*/true, "SSD_2")); // 恢复mds进程 ASSERT_EQ(0, curveCluster_->RecoverHangMDS(1)); // 创建文件成功 ASSERT_EQ(0, curveCluster_->CreateFile("test2", "test2", "/basic_test2", - 10 * 1024 * 1024 * 1024UL)); + 10 * 1024 * 1024 * 1024UL, + /*normalFile=*/true, "SSD_2")); // 停掉chunkserver ASSERT_EQ(0, curveCluster_->StopChunkServer(1)); diff --git a/test/integration/cluster_common/cluster_common_topo_1.json b/test/integration/cluster_common/cluster_common_topo_1.json index bd5632180b..e80a1c63da 100644 --- a/test/integration/cluster_common/cluster_common_topo_1.json +++ b/test/integration/cluster_common/cluster_common_topo_1.json @@ -1,4 +1,10 @@ { + "poolsets":[ + { + "name": "SSD_1", + "type": "SSD" + } + ], "logicalpools": [ { "copysetnum": 10, @@ -18,7 +24,8 @@ "internalport": 0, "name": "server1", "physicalpool": "pool1", - "zone": "zone1" + "zone": "zone1", + "poolset": "SSD_1" }, { "externalip": "192.168.200.42", @@ -27,7 +34,8 @@ "internalport": 0, "name": "server2", "physicalpool": "pool1", - "zone": "zone2" + "zone": "zone2", + "poolset": "SSD_1" }, { "externalip": "192.168.200.43", @@ -36,7 +44,8 @@ "internalport": 0, "name": "server3", "physicalpool": "pool1", - "zone": "zone3" + "zone": "zone3", + "poolset": "SSD_1" }, { "externalip": "192.168.200.44", @@ -45,8 +54,8 @@ "internalport": 0, "name": "server4", "physicalpool": "pool1", - "zone": "zone4" + "zone": "zone4", + "poolset": "SSD_1" } ] } - diff --git a/test/integration/cluster_common/cluster_common_topo_2.json b/test/integration/cluster_common/cluster_common_topo_2.json index 97a4291b3d..4616ee0c02 100644 --- a/test/integration/cluster_common/cluster_common_topo_2.json +++ b/test/integration/cluster_common/cluster_common_topo_2.json @@ -1,4 +1,10 @@ { + "poolsets":[ + { + "name": "SSD_2", + "type": "SSD" + } + ], "logicalpools": [ { "copysetnum": 20, @@ -18,7 +24,8 @@ "internalport": 2001, "name": "server1", "physicalpool": "pool1", - "zone": "zone1" + "zone": "zone1", + "poolset": "SSD_2" }, { "externalip": "127.0.0.1", @@ -27,7 +34,8 @@ "internalport": 2002, "name": "server2", "physicalpool": "pool1", - "zone": "zone2" + "zone": "zone2", + "poolset": "SSD_2" }, { "externalip": "127.0.0.1", @@ -36,7 +44,8 @@ "internalport": 2003, "name": "server3", "physicalpool": "pool1", - "zone": "zone3" + "zone": "zone3", + "poolset": "SSD_2" }, { "externalip": "127.0.0.1", @@ -45,7 +54,8 @@ "internalport": 2004, "name": "server4", "physicalpool": "pool1", - "zone": "zone4" + "zone": "zone4", + "poolset": "SSD_2" } ] } diff --git a/test/integration/common/peer_cluster.cpp b/test/integration/common/peer_cluster.cpp index c448897b0b..f09db13283 100644 --- a/test/integration/common/peer_cluster.cpp +++ b/test/integration/common/peer_cluster.cpp @@ -52,8 +52,8 @@ PeerCluster::PeerCluster(const std::string &clusterName, clusterName_(clusterName), snapshotIntervalS_(1), electionTimeoutMs_(1000), - params_(params), paramsIndexs_(paramsIndexs), + params_(params), isFakeMdsStart_(false) { logicPoolID_ = logicPoolID; copysetID_ = copysetID; diff --git a/test/integration/heartbeat/common.cpp b/test/integration/heartbeat/common.cpp index cd497f3d60..5d09293287 100644 --- a/test/integration/heartbeat/common.cpp +++ b/test/integration/heartbeat/common.cpp @@ -26,6 +26,12 @@ namespace curve { namespace mds { +void HeartbeatIntegrationCommon::PrepareAddPoolset( + const Poolset &poolset) { + int ret = topology_->AddPoolset(poolset); + EXPECT_EQ(topology::kTopoErrCodeSuccess, ret); +} + void HeartbeatIntegrationCommon::PrepareAddLogicalPool( const LogicalPool &lpool) { int ret = topology_->AddLogicalPool(lpool); @@ -187,10 +193,14 @@ void HeartbeatIntegrationCommon::RemoveOperatorFromOpController( void HeartbeatIntegrationCommon::PrepareBasicCluseter() { assert(topology_ != nullptr); + // add poolset + PoolsetIdType poolsetId = 2; + Poolset poolset(poolsetId, "testPoolset", "SSD", "descPoolset"); + PrepareAddPoolset(poolset); // add physical pool PoolIdType physicalPoolId = 1; - PhysicalPool ppool(1, "testPhysicalPool", "descPhysicalPool"); + PhysicalPool ppool(1, "testPhysicalPool", poolsetId, "descPhysicalPool"); PrepareAddPhysicalPool(ppool); // add logical pool diff --git a/test/integration/heartbeat/common.h b/test/integration/heartbeat/common.h index d422dad96e..b281d5a9ab 100644 --- a/test/integration/heartbeat/common.h +++ b/test/integration/heartbeat/common.h @@ -70,7 +70,9 @@ using ::curve::mds::topology::DefaultTokenGenerator; using ::curve::mds::topology::kTopoErrCodeSuccess; using ::curve::mds::topology::LogicalPool; using ::curve::mds::topology::LogicalPoolType; +using ::curve::mds::topology::Poolset; using ::curve::mds::topology::PhysicalPool; +using ::curve::mds::topology::PoolsetIdType; using ::curve::mds::topology::PoolIdType; using ::curve::mds::topology::ServerIdType; using ::curve::mds::topology::TopologyImpl; @@ -118,6 +120,11 @@ class FakeTopologyStorage : public TopologyStorage { public: FakeTopologyStorage() {} + bool + LoadPoolset(std::unordered_map *PoolsetMap, + PoolsetIdType *maxPoolsetId) { + return true; + } bool LoadLogicalPool(std::unordered_map *logicalPoolMap, PoolIdType *maxLogicalPoolId) { @@ -146,6 +153,9 @@ class FakeTopologyStorage : public TopologyStorage { return true; } + bool StoragePoolset(const Poolset &data) { + return true; + } bool StorageLogicalPool(const LogicalPool &data) { return true; } @@ -165,6 +175,9 @@ class FakeTopologyStorage : public TopologyStorage { return true; } + bool DeletePoolset(PoolsetIdType id) { + return true; + } bool DeleteLogicalPool(PoolIdType id) { return true; } @@ -222,6 +235,12 @@ class HeartbeatIntegrationCommon { conf_ = conf; } + /* PrepareAddPoolset 在集群中添加物理池集合 + * + * @param[in] poolset 物理池集合(池组) + */ + void PrepareAddPoolset(const Poolset &poolset); + /* PrepareAddLogicalPool 在集群中添加逻辑池 * * @param[in] lpool 逻辑池 diff --git a/test/integration/raft/raft_config_change_test.cpp b/test/integration/raft/raft_config_change_test.cpp index a1704dba01..c585022469 100644 --- a/test/integration/raft/raft_config_change_test.cpp +++ b/test/integration/raft/raft_config_change_test.cpp @@ -44,7 +44,7 @@ const char kRaftConfigChangeTestLogDir[] = "./runlog/RaftConfigChange"; const char* kFakeMdsAddr = "127.0.0.1:9080"; const uint32_t kOpRequestAlignSize = 4096; -static char* raftConfigParam[5][16] = { +static const char* raftConfigParam[5][16] = { { "chunkserver", "-chunkServerIp=127.0.0.1", @@ -224,11 +224,11 @@ class RaftConfigChangeTest : public testing::Test { paramsIndexs[PeerCluster::PeerToId(peer4)] = 3; paramsIndexs[PeerCluster::PeerToId(peer5)] = 4; - params.push_back(raftConfigParam[0]); - params.push_back(raftConfigParam[1]); - params.push_back(raftConfigParam[2]); - params.push_back(raftConfigParam[3]); - params.push_back(raftConfigParam[4]); + params.push_back(const_cast(raftConfigParam[0])); + params.push_back(const_cast(raftConfigParam[1])); + params.push_back(const_cast(raftConfigParam[2])); + params.push_back(const_cast(raftConfigParam[3])); + params.push_back(const_cast(raftConfigParam[4])); } virtual void TearDown() { // wait for process exit @@ -1415,7 +1415,6 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerAndThenAddNewFollowerFromInstallSn braft::cli::CliOptions options; options.max_retry = 3; options.timeout_ms = confChangeTimeoutMs; - const int kMaxLoop = 10; butil::Status st = AddPeer(logicPoolId, copysetId, conf, peer4, options); ASSERT_TRUE(st.ok()); diff --git a/test/integration/raft/raft_log_replication_test.cpp b/test/integration/raft/raft_log_replication_test.cpp index a560b538bb..0a0b25cf54 100644 --- a/test/integration/raft/raft_log_replication_test.cpp +++ b/test/integration/raft/raft_log_replication_test.cpp @@ -46,7 +46,7 @@ const uint32_t kOpRequestAlignSize = 4096; const char kRaftLogRepTestLogDir[] = "./runlog/RaftLogRep"; const char* kFakeMdsAddr = "127.0.0.1:9070"; -static char* raftLogParam[5][16] = { +static const char* raftLogParam[5][16] = { { "chunkserver", "-chunkServerIp=127.0.0.1", @@ -224,11 +224,11 @@ class RaftLogReplicationTest : public testing::Test { paramsIndexs[PeerCluster::PeerToId(peer4)] = 3; paramsIndexs[PeerCluster::PeerToId(peer5)] = 4; - params.push_back(raftLogParam[0]); - params.push_back(raftLogParam[1]); - params.push_back(raftLogParam[2]); - params.push_back(raftLogParam[3]); - params.push_back(raftLogParam[4]); + params.push_back(const_cast(raftLogParam[0])); + params.push_back(const_cast(raftLogParam[1])); + params.push_back(const_cast(raftLogParam[2])); + params.push_back(const_cast(raftLogParam[3])); + params.push_back(const_cast(raftLogParam[4])); } virtual void TearDown() { std::string rmdir1("rm -fr "); diff --git a/test/integration/raft/raft_snapshot_test.cpp b/test/integration/raft/raft_snapshot_test.cpp index b77d53f954..661dad2862 100644 --- a/test/integration/raft/raft_snapshot_test.cpp +++ b/test/integration/raft/raft_snapshot_test.cpp @@ -45,7 +45,7 @@ const char* kFakeMdsAddr = "127.0.0.1:9320"; const uint32_t kOpRequestAlignSize = 4096; -static char *raftVoteParam[4][16] = { +static const char *raftVoteParam[4][16] = { { "chunkserver", "-chunkServerIp=127.0.0.1", @@ -190,10 +190,10 @@ class RaftSnapshotTest : public testing::Test { paramsIndexs_[PeerCluster::PeerToId(peer3_)] = 2; paramsIndexs_[PeerCluster::PeerToId(peer4_)] = 3; - params_.push_back(raftVoteParam[0]); - params_.push_back(raftVoteParam[1]); - params_.push_back(raftVoteParam[2]); - params_.push_back(raftVoteParam[3]); + params_.push_back(const_cast(raftVoteParam[0])); + params_.push_back(const_cast(raftVoteParam[1])); + params_.push_back(const_cast(raftVoteParam[2])); + params_.push_back(const_cast(raftVoteParam[3])); // 配置默认raft client option defaultCliOpt_.max_retry = 3; diff --git a/test/integration/raft/raft_vote_test.cpp b/test/integration/raft/raft_vote_test.cpp index f83c7a2d1d..032e5e397c 100644 --- a/test/integration/raft/raft_vote_test.cpp +++ b/test/integration/raft/raft_vote_test.cpp @@ -45,7 +45,7 @@ const char* kFakeMdsAddr = "127.0.0.1:9089"; const uint32_t kOpRequestAlignSize = 4096; -static char* raftVoteParam[3][16] = { +static const char* raftVoteParam[3][16] = { { "chunkserver", "-chunkServerIp=127.0.0.1", @@ -159,9 +159,9 @@ class RaftVoteTest : public testing::Test { paramsIndexs[PeerCluster::PeerToId(peer2)] = 1; paramsIndexs[PeerCluster::PeerToId(peer3)] = 2; - params.push_back(raftVoteParam[0]); - params.push_back(raftVoteParam[1]); - params.push_back(raftVoteParam[2]); + params.push_back(const_cast(raftVoteParam[0])); + params.push_back(const_cast(raftVoteParam[1])); + params.push_back(const_cast(raftVoteParam[2])); } virtual void TearDown() { std::string rmdir1("rm -fr "); diff --git a/test/integration/snapshotcloneserver/config/topo.json b/test/integration/snapshotcloneserver/config/topo.json index d1ad4f7c50..315fca4a1e 100644 --- a/test/integration/snapshotcloneserver/config/topo.json +++ b/test/integration/snapshotcloneserver/config/topo.json @@ -18,7 +18,8 @@ "internalport": 10004, "name": "server1", "physicalpool": "pool1", - "zone": "zone1" + "zone": "zone1", + "poolset": "default" }, { "externalip": "127.0.0.1", @@ -27,7 +28,8 @@ "internalport": 10005, "name": "server2", "physicalpool": "pool1", - "zone": "zone2" + "zone": "zone2", + "poolset": "default" }, { "externalip": "127.0.0.1", @@ -36,9 +38,8 @@ "internalport": 10006, "name": "server3", "physicalpool": "pool1", - "zone": "zone3" + "zone": "zone3", + "poolset": "default" } ] } - - diff --git a/test/integration/snapshotcloneserver/config/topo2.json b/test/integration/snapshotcloneserver/config/topo2.json index 95f113a424..657a9adbb6 100644 --- a/test/integration/snapshotcloneserver/config/topo2.json +++ b/test/integration/snapshotcloneserver/config/topo2.json @@ -18,7 +18,8 @@ "internalport": 10014, "name": "server1", "physicalpool": "pool1", - "zone": "zone1" + "zone": "zone1", + "poolset": "default" }, { "externalip": "127.0.0.1", @@ -27,7 +28,8 @@ "internalport": 10015, "name": "server2", "physicalpool": "pool1", - "zone": "zone2" + "zone": "zone2", + "poolset": "default" }, { "externalip": "127.0.0.1", @@ -36,9 +38,8 @@ "internalport": 10016, "name": "server3", "physicalpool": "pool1", - "zone": "zone3" + "zone": "zone3", + "poolset": "default" } ] } - - diff --git a/test/integration/snapshotcloneserver/config/topo3.json b/test/integration/snapshotcloneserver/config/topo3.json index ce628e3207..70c9602d58 100644 --- a/test/integration/snapshotcloneserver/config/topo3.json +++ b/test/integration/snapshotcloneserver/config/topo3.json @@ -10,6 +10,12 @@ "zonenum": 3 } ], + "poolsets": [ + { + "name": "default", + "type": "ssd" + } + ], "servers": [ { "externalip": "127.0.0.1", @@ -18,7 +24,8 @@ "internalport": 10024, "name": "server1", "physicalpool": "pool1", - "zone": "zone1" + "zone": "zone1", + "poolset": "default" }, { "externalip": "127.0.0.1", @@ -27,7 +34,8 @@ "internalport": 10025, "name": "server2", "physicalpool": "pool1", - "zone": "zone2" + "zone": "zone2", + "poolset": "default" }, { "externalip": "127.0.0.1", @@ -36,9 +44,8 @@ "internalport": 10026, "name": "server3", "physicalpool": "pool1", - "zone": "zone3" + "zone": "zone3", + "poolset": "default" } ] } - - diff --git a/test/integration/snapshotcloneserver/fake_curvefs_client.cpp b/test/integration/snapshotcloneserver/fake_curvefs_client.cpp index d4df03dc3a..49191fdd40 100644 --- a/test/integration/snapshotcloneserver/fake_curvefs_client.cpp +++ b/test/integration/snapshotcloneserver/fake_curvefs_client.cpp @@ -52,6 +52,7 @@ int FakeCurveFsClient::Init(const CurveClientOptions &options) { fileInfo.filename = shortTestFile1Name; fileInfo.fullPathName = testFile1; fileInfo.filestatus = FileStatus::Created; + fileInfo.poolset = "ssdPoolset1"; fileMap_.emplace(testFile1, fileInfo); @@ -173,6 +174,7 @@ int FakeCurveFsClient::CreateCloneFile( uint32_t chunkSize, uint64_t stripeUnit, uint64_t stripeCount, + const std::string& poolset, FInfo* fileInfo) { fiu_return_on( "test/integration/snapshotcloneserver/FakeCurveFsClient.CreateCloneFile", -LIBCURVE_ERROR::FAILED); // NOLINT @@ -192,6 +194,7 @@ int FakeCurveFsClient::CreateCloneFile( fileInfo->filestatus = FileStatus::Cloning; fileInfo->stripeUnit = stripeUnit; fileInfo->stripeCount = stripeCount; + fileInfo->poolset = poolset; LOG(INFO) << "CreateCloneFile " << filename; fileMap_.emplace(filename, *fileInfo); diff --git a/test/integration/snapshotcloneserver/fake_curvefs_client.h b/test/integration/snapshotcloneserver/fake_curvefs_client.h index b2ec7e45ea..0f3a0a6107 100644 --- a/test/integration/snapshotcloneserver/fake_curvefs_client.h +++ b/test/integration/snapshotcloneserver/fake_curvefs_client.h @@ -94,6 +94,7 @@ class FakeCurveFsClient : public CurveFsClient { uint32_t chunkSize, uint64_t stripeUnit, uint64_t stripeCount, + const std::string& poolset, FInfo* fileInfo) override; int CreateCloneChunk( diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_common_test.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_common_test.cpp index 04605f41b4..1941b714bb 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_common_test.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_common_test.cpp @@ -57,6 +57,8 @@ const int kMdsDummyPort = 10008; const char* kSnapshotCloneServerDummyServerPort = "12000"; const char* kLeaderCampaginPrefix = "snapshotcloneserverleaderlock3"; +static const char* kDefaultPoolset = "default"; + const std::string kLogPath = "./runlog/" + kTestPrefix + "Log"; // NOLINT const std::string kMdsDbName = kTestPrefix + "DB"; // NOLINT const std::string kMdsConfigPath = // NOLINT @@ -914,7 +916,7 @@ TEST_F(SnapshotCloneServerTest, TestImageNotLazyClone) { TEST_F(SnapshotCloneServerTest, TestSnapAndCloneWhenSnapHasError) { std::string snapId = "errorSnapUuid"; SnapshotInfo snapInfo(snapId, testUser1_, testFile4_, "snapxxx", 0, 0, 0, 0, - 0, 0, 0, Status::error); + 0, 0, kDefaultPoolset, 0, Status::error); cluster_->metaStore_->AddSnapshot(snapInfo); diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_recover_test.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_recover_test.cpp index 4001b23995..a5d64cdc30 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_recover_test.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_recover_test.cpp @@ -69,6 +69,8 @@ const char *kSnapshotCloneServerIpPort = "127.0.0.1:10027"; const char *kSnapshotCloneServerDummyServerPort = "12002"; const char *kLeaderCampaginPrefix = "snapshotcloneserverleaderlock1"; +static const char *kDefaultPoolset = "default"; + const int kMdsDummyPort = 10028; const std::string kLogPath = "./runlog/" + kTestPrefix + "Log"; // NOLINT @@ -512,9 +514,10 @@ class SnapshotCloneServerTest : public ::testing::Test { } else { seqNum = 1; // 克隆新文件使用初始版本号1 } - int ret = snapClient_->CreateCloneFile(testFile1_, - fileName, UserInfo_t(mdsRootUser_, mdsRootPassword_), - testFile1Length, seqNum, chunkSize, 0, 0, fInfoOut); + int ret = snapClient_->CreateCloneFile( + testFile1_, fileName, + UserInfo_t(mdsRootUser_, mdsRootPassword_), testFile1Length, + seqNum, chunkSize, 0, 0, kDefaultPoolset, fInfoOut); return ret; } @@ -716,7 +719,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenNotCreateSnapOnCurvefs) { SnapshotInfo snapInfo(uuid1, testUser1_, testFile1_, "snapxxx", 0, chunkSize, segmentSize, testFile1Length, - 0, 0, 0, + 0, 0, kDefaultPoolset, 0, Status::pending); cluster_->metaStore_->AddSnapshot(snapInfo); @@ -745,7 +748,7 @@ TEST_F(SnapshotCloneServerTest, SnapshotInfo snapInfo(uuid1, testUser1_, testFile1_, "snapxxx", 0, chunkSize, segmentSize, testFile1Length, - 0, 0, 0, + 0, 0, kDefaultPoolset, 0, Status::pending); cluster_->metaStore_->AddSnapshot(snapInfo); @@ -773,7 +776,7 @@ TEST_F(SnapshotCloneServerTest, std::string uuid1 = UUIDGenerator().GenerateUUID(); SnapshotInfo snapInfo(uuid1, testUser1_, testFile1_, "snapxxx", seq, chunkSize, segmentSize, testFile1Length, - 0, 0, 0, + 0, 0, kDefaultPoolset, 0, Status::pending); cluster_->metaStore_->AddSnapshot(snapInfo); @@ -836,7 +839,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneFile) { std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotCreateCloneFile"; CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, - dstFile, 0, 0, 0, + dstFile, kDefaultPoolset, 0, 0, 0, CloneFileType::kFile, false, CloneStep::kCreateCloneFile, CloneStatus::cloning); @@ -866,7 +869,7 @@ TEST_F(SnapshotCloneServerTest, "/RcvItUser1/TestRecoverCloneHasCreateCloneFileSuccessNotReturn"; CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, - dstFile, 0, 0, 0, + dstFile, kDefaultPoolset, 0, 0, 0, CloneFileType::kFile, false, CloneStep::kCreateCloneFile, CloneStatus::cloning); @@ -894,7 +897,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneMeta) { std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotCreateCloneMeta"; CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, - dstFile, fInfoOut.id, fInfoOut.id, 0, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, CloneFileType::kFile, false, CloneStep::kCreateCloneMeta, CloneStatus::cloning); @@ -928,7 +931,7 @@ TEST_F(SnapshotCloneServerTest, "/RcvItUser1/TestRecoverCloneCreateCloneMetaSuccessNotReturn"; CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, - dstFile, fInfoOut.id, fInfoOut.id, 0, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, CloneFileType::kFile, false, CloneStep::kCreateCloneMeta, CloneStatus::cloning); @@ -960,7 +963,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneChunk) { std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotCreateCloneChunk"; CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, - dstFile, fInfoOut.id, fInfoOut.id, 0, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, CloneFileType::kFile, false, CloneStep::kCreateCloneChunk, CloneStatus::cloning); @@ -996,7 +999,7 @@ TEST_F(SnapshotCloneServerTest, "/RcvItUser1/TestRecoverCloneCreateCloneChunkSuccessNotReturn"; CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, - dstFile, fInfoOut.id, fInfoOut.id, 0, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, CloneFileType::kFile, false, CloneStep::kCreateCloneChunk, CloneStatus::cloning); @@ -1030,7 +1033,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneMeta) { std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotCompleteCloneMeta"; CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, - dstFile, fInfoOut.id, fInfoOut.id, 0, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, CloneFileType::kFile, false, CloneStep::kCompleteCloneMeta, CloneStatus::cloning); @@ -1068,7 +1071,7 @@ TEST_F(SnapshotCloneServerTest, "/RcvItUser1/TestRecoverCloneCompleteCloneMetaSuccessNotReturn"; CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, - dstFile, fInfoOut.id, fInfoOut.id, 0, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, CloneFileType::kFile, false, CloneStep::kCompleteCloneMeta, CloneStatus::cloning); @@ -1104,7 +1107,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRecoverChunk) { std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotRecoverChunk"; CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, - dstFile, fInfoOut.id, fInfoOut.id, 0, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, CloneFileType::kFile, false, CloneStep::kRecoverChunk, CloneStatus::cloning); @@ -1143,7 +1146,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneRecoverChunkSuccssNotReturn) { "/RcvItUser1/TestRecoverCloneRecoverChunkSuccssNotReturn"; CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, - dstFile, fInfoOut.id, fInfoOut.id, 0, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, CloneFileType::kFile, false, CloneStep::kRecoverChunk, CloneStatus::cloning); @@ -1181,7 +1184,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneFile) { std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotCompleteCloneFile"; CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, - dstFile, fInfoOut.id, fInfoOut.id, 0, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, CloneFileType::kFile, false, CloneStep::kCompleteCloneFile, CloneStatus::cloning); @@ -1223,7 +1226,7 @@ TEST_F(SnapshotCloneServerTest, "/RcvItUser1/TestRecoverCloneCompleteCloneFileSuccessNotReturn"; CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, - dstFile, fInfoOut.id, fInfoOut.id, 0, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, CloneFileType::kFile, false, CloneStep::kCompleteCloneFile, CloneStatus::cloning); @@ -1263,7 +1266,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotChangeOwner) { std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotChangeOwner"; CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, - dstFile, fInfoOut.id, fInfoOut.id, 0, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, CloneFileType::kFile, false, CloneStep::kChangeOwner, CloneStatus::cloning); @@ -1306,7 +1309,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneChangeOwnerSuccessNotReturn) { "/RcvItUser1/TestRecoverCloneChangeOwnerSuccessNotReturn"; CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, - dstFile, fInfoOut.id, fInfoOut.id, 0, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, CloneFileType::kFile, false, CloneStep::kChangeOwner, CloneStatus::cloning); @@ -1348,7 +1351,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRenameCloneFile) { std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotRenameCloneFile"; CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, - dstFile, fInfoOut.id, fInfoOut.id, 0, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, CloneFileType::kFile, false, CloneStep::kRenameCloneFile, CloneStatus::cloning); @@ -1396,7 +1399,7 @@ TEST_F(SnapshotCloneServerTest, CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, - dstFile, fInfoOut.id, fInfoOut.id, 0, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, CloneFileType::kFile, false, CloneStep::kRenameCloneFile, CloneStatus::cloning); @@ -1422,7 +1425,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneFile) { std::string uuid1 = UUIDGenerator().GenerateUUID(); CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, - testFile1_, 0, 0, 0, + testFile1_, kDefaultPoolset, 0, 0, 0, CloneFileType::kSnapshot, true, CloneStep::kCreateCloneFile, CloneStatus::recovering); @@ -1458,7 +1461,7 @@ TEST_F(SnapshotCloneServerTest, CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, - testFile1_, 0, 0, 0, + testFile1_, kDefaultPoolset, 0, 0, 0, CloneFileType::kSnapshot, true, CloneStep::kCreateCloneFile, CloneStatus::recovering); @@ -1493,7 +1496,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneMeta) { CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, - testFile1_, fInfoOut.id, testFd1_, 0, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, CloneFileType::kSnapshot, true, CloneStep::kCreateCloneMeta, CloneStatus::recovering); @@ -1533,7 +1536,7 @@ TEST_F(SnapshotCloneServerTest, CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, - testFile1_, fInfoOut.id, testFd1_, 0, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, CloneFileType::kSnapshot, true, CloneStep::kCreateCloneMeta, CloneStatus::recovering); @@ -1572,7 +1575,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneChunk) { CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, - testFile1_, fInfoOut.id, testFd1_, 0, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, CloneFileType::kSnapshot, true, CloneStep::kCreateCloneChunk, CloneStatus::recovering); @@ -1614,7 +1617,7 @@ TEST_F(SnapshotCloneServerTest, CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, - testFile1_, fInfoOut.id, testFd1_, 0, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, CloneFileType::kSnapshot, true, CloneStep::kCreateCloneChunk, CloneStatus::recovering); @@ -1655,7 +1658,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneMeta) { CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, - testFile1_, fInfoOut.id, testFd1_, 0, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, CloneFileType::kSnapshot, true, CloneStep::kCompleteCloneMeta, CloneStatus::recovering); @@ -1699,7 +1702,7 @@ TEST_F(SnapshotCloneServerTest, CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, - testFile1_, fInfoOut.id, testFd1_, 0, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, CloneFileType::kSnapshot, true, CloneStep::kCompleteCloneMeta, CloneStatus::recovering); @@ -1742,7 +1745,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotChangeOwner) { CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, - testFile1_, fInfoOut.id, testFd1_, 0, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, CloneFileType::kSnapshot, true, CloneStep::kChangeOwner, CloneStatus::recovering); @@ -1788,7 +1791,7 @@ TEST_F(SnapshotCloneServerTest, CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, - testFile1_, fInfoOut.id, testFd1_, 0, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, CloneFileType::kSnapshot, true, CloneStep::kChangeOwner, CloneStatus::recovering); @@ -1833,7 +1836,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRenameCloneFile) { CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, - testFile1_, fInfoOut.id, testFd1_, 0, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, CloneFileType::kSnapshot, true, CloneStep::kRenameCloneFile, CloneStatus::recovering); @@ -1882,7 +1885,7 @@ TEST_F(SnapshotCloneServerTest, CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, - testFile1_, fInfoOut.id, testFd1_, 0, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, CloneFileType::kSnapshot, true, CloneStep::kRenameCloneFile, CloneStatus::recovering); @@ -1930,7 +1933,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRecoverChunk) { CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, - testFile1_, fInfoOut.id, testFd1_, 0, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, CloneFileType::kSnapshot, true, CloneStep::kRecoverChunk, CloneStatus::recovering); @@ -1976,7 +1979,7 @@ TEST_F(SnapshotCloneServerTest, CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, - testFile1_, fInfoOut.id, testFd1_, 0, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, CloneFileType::kSnapshot, true, CloneStep::kRecoverChunk, CloneStatus::recovering); @@ -2021,7 +2024,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneFile) { CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, - testFile1_, fInfoOut.id, testFd1_, 0, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, CloneFileType::kSnapshot, true, CloneStep::kCompleteCloneFile, CloneStatus::recovering); @@ -2069,7 +2072,7 @@ TEST_F(SnapshotCloneServerTest, CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, - testFile1_, fInfoOut.id, testFd1_, 0, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, CloneFileType::kSnapshot, true, CloneStep::kCompleteCloneFile, CloneStatus::recovering); diff --git a/test/kvstorageclient/etcdclient_test.cpp b/test/kvstorageclient/etcdclient_test.cpp index 12df8de8a6..80f3336f3e 100644 --- a/test/kvstorageclient/etcdclient_test.cpp +++ b/test/kvstorageclient/etcdclient_test.cpp @@ -54,7 +54,7 @@ class TestEtcdClinetImp : public ::testing::Test { client_ = std::make_shared(); char endpoints[] = "127.0.0.1:2377"; - EtcdConf conf = { endpoints, strlen(endpoints), 1000 }; + EtcdConf conf = {endpoints, static_cast(strlen(endpoints)), 1000}; ASSERT_EQ(EtcdErrCode::EtcdDeadlineExceeded, client_->Init(conf, 200, 3)); @@ -204,13 +204,15 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { } // 5. rename file: rename file9 ~ file10, file10本来不存在 - Operation op1{ OpType::OpDelete, const_cast(keyMap[9].c_str()), - const_cast(fileInfo9.c_str()), keyMap[9].size(), - fileInfo9.size() }; - Operation op2{ OpType::OpPut, const_cast(fileKey10.c_str()), - const_cast(fileInfo10.c_str()), fileKey10.size(), - fileInfo10.size() }; - std::vector ops{ op1, op2 }; + Operation op1{OpType::OpDelete, const_cast(keyMap[9].c_str()), + const_cast(fileInfo9.c_str()), + static_cast(keyMap[9].size()), + static_cast(fileInfo9.size())}; + Operation op2{OpType::OpPut, const_cast(fileKey10.c_str()), + const_cast(fileInfo10.c_str()), + static_cast(fileKey10.size()), + static_cast(fileInfo10.size())}; + std::vector ops{op1, op2}; ASSERT_EQ(EtcdErrCode::EtcdOK, client_->TxnN(ops)); // cannot get file9 std::string out; @@ -222,12 +224,14 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { ASSERT_EQ(fileName10, fileinfo.filename()); // 6. snapshot of keyMap[6] - Operation op3{ OpType::OpPut, const_cast(keyMap[6].c_str()), - const_cast(fileInfo6.c_str()), keyMap[6].size(), - fileInfo6.size() }; - Operation op4{ OpType::OpPut, const_cast(snapshotKey6.c_str()), - const_cast(snapshotInfo6.c_str()), - snapshotKey6.size(), snapshotInfo6.size() }; + Operation op3{OpType::OpPut, const_cast(keyMap[6].c_str()), + const_cast(fileInfo6.c_str()), + static_cast(keyMap[6].size()), + static_cast(fileInfo6.size())}; + Operation op4{OpType::OpPut, const_cast(snapshotKey6.c_str()), + const_cast(snapshotInfo6.c_str()), + static_cast(snapshotKey6.size()), + static_cast(snapshotInfo6.size())}; ops.clear(); ops.emplace_back(op3); ops.emplace_back(op4); @@ -256,8 +260,9 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { ASSERT_EQ("200", out); // 8. rename file: rename file7 ~ file8 - Operation op8{ OpType::OpDelete, const_cast(keyMap[7].c_str()), "", - keyMap[7].size(), 0 }; + Operation op8{OpType::OpDelete, const_cast(keyMap[7].c_str()), + const_cast(""), static_cast(keyMap[7].size()), + 0}; FileInfo newFileInfo7; newFileInfo7.CopyFrom(fileInfo7); newFileInfo7.set_parentid(fileInfo8.parentid()); @@ -267,10 +272,11 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { newFileInfo7.filename()); std::string encodeNewFileInfo7; ASSERT_TRUE(newFileInfo7.SerializeToString(&encodeNewFileInfo7)); - Operation op9{ OpType::OpPut, - const_cast(encodeNewFileInfo7Key.c_str()), - const_cast(encodeNewFileInfo7.c_str()), - encodeNewFileInfo7Key.size(), encodeNewFileInfo7.size() }; + Operation op9{OpType::OpPut, + const_cast(encodeNewFileInfo7Key.c_str()), + const_cast(encodeNewFileInfo7.c_str()), + static_cast(encodeNewFileInfo7Key.size()), + static_cast(encodeNewFileInfo7.size())}; ops.clear(); ops.emplace_back(op8); ops.emplace_back(op9); @@ -300,9 +306,10 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { ASSERT_EQ(EtcdErrCode::EtcdDeadlineExceeded, client_->TxnN(ops)); client_->SetTimeout(5000); - Operation op5{ OpType(5), const_cast(snapshotKey6.c_str()), - const_cast(snapshotInfo6.c_str()), - snapshotKey6.size(), snapshotInfo6.size() }; + Operation op5{OpType(5), const_cast(snapshotKey6.c_str()), + const_cast(snapshotInfo6.c_str()), + static_cast(snapshotKey6.size()), + static_cast(snapshotInfo6.size())}; ops.clear(); ops.emplace_back(op3); ops.emplace_back(op5); @@ -384,7 +391,7 @@ TEST_F(TestEtcdClinetImp, test_CampaignLeader) { int dialtTimeout = 10000; int retryTimes = 3; char endpoints[] = "127.0.0.1:2377"; - EtcdConf conf = { endpoints, strlen(endpoints), 20000 }; + EtcdConf conf = {endpoints, static_cast(strlen(endpoints)), 20000}; std::string leaderName1("leader1"); std::string leaderName2("leader2"); uint64_t leaderOid; diff --git a/test/mds/mock/mock_topology.h b/test/mds/mock/mock_topology.h index 93bb752080..aaa18ccc84 100644 --- a/test/mds/mock/mock_topology.h +++ b/test/mds/mock/mock_topology.h @@ -61,13 +61,11 @@ namespace mds { namespace topology { class MockTopology : public Topology { public: - MockTopology() {} - ~MockTopology() {} - MOCK_METHOD1(GetClusterInfo, bool(ClusterInformation *info)); // allocate id & token + MOCK_METHOD0(AllocatePoolsetId, PoolsetIdType()); MOCK_METHOD0(AllocateLogicalPoolId, PoolIdType()); MOCK_METHOD0(AllocatePhysicalPoolId, PoolIdType()); MOCK_METHOD0(AllocateZoneId, ZoneIdType()); @@ -78,8 +76,10 @@ class MockTopology : public Topology { MOCK_METHOD0(AllocateToken, std::string()); // add + MOCK_METHOD1(AddPoolset, int(const Poolset &data)); MOCK_METHOD1(AddLogicalPool, int(const LogicalPool &data)); MOCK_METHOD1(AddPhysicalPool, int(const PhysicalPool &data)); + MOCK_METHOD1(AddPhysicalPoolJustForTest, int(const PhysicalPool &data)); MOCK_METHOD1(AddZone, int(const Zone &data)); MOCK_METHOD1(AddServer, int(const Server &data)); MOCK_METHOD1(AddChunkServer, int(const ChunkServer &data)); @@ -90,6 +90,7 @@ class MockTopology : public Topology { ©sets)); // remove + MOCK_METHOD1(RemovePoolset, int(PoolsetIdType id)); MOCK_METHOD1(RemoveLogicalPool, int(PoolIdType id)); MOCK_METHOD1(RemovePhysicalPool, int(PoolIdType id)); MOCK_METHOD1(RemoveZone, int(ZoneIdType id)); @@ -108,6 +109,8 @@ class MockTopology : public Topology { bool scanEnable)); MOCK_METHOD1(UpdatePhysicalPool, int(const PhysicalPool &data)); + MOCK_METHOD2(UpgradePhysicalPool, int(PoolIdType poolId, + PoolsetIdType pstId)); MOCK_METHOD1(UpdateZone, int(const Zone &data)); MOCK_METHOD1(UpdateServer, int(const Server &data)); @@ -138,11 +141,19 @@ class MockTopology : public Topology { int(CopySetKey key, uint32_t allocChunkNum, uint64_t allocSize)); // find + MOCK_CONST_METHOD1(FindPoolset, + PoolsetIdType(const std::string &poolsetName)); MOCK_CONST_METHOD2(FindLogicalPool, PoolIdType(const std::string &logicalPoolName, const std::string &physicalPoolName)); MOCK_CONST_METHOD1(FindPhysicalPool, PoolIdType(const std::string &physicalPoolName)); + MOCK_CONST_METHOD2(FindPhysicalPool, + PoolIdType(const std::string &physicalPoolName, + const std::string &poolsetName)); + MOCK_CONST_METHOD2(FindPhysicalPool, + PoolIdType(const std::string &physicalPoolName, + PoolsetIdType poolsetid)); MOCK_CONST_METHOD2(FindZone, ZoneIdType(const std::string &zoneName, const std::string &physicalPoolName)); @@ -157,10 +168,20 @@ class MockTopology : public Topology { ChunkServerIdType(const std::string &hostIp, uint32_t port)); // get + MOCK_CONST_METHOD2(GetPoolset, + bool(PoolsetIdType poolsetId, Poolset *out)); MOCK_CONST_METHOD2(GetLogicalPool, bool(PoolIdType poolId, LogicalPool *out)); + MOCK_CONST_METHOD2(GetPhysicalPool, bool(PoolIdType poolId, PhysicalPool *out)); + MOCK_CONST_METHOD3(GetPhysicalPool, bool(const std::string &poolName, + const std::string &poolsetName, PhysicalPool *out)); + MOCK_CONST_METHOD3(GetPhysicalPool, bool(const std::string &poolName, + PoolsetIdType poolsetId, PhysicalPool *out)); + MOCK_CONST_METHOD2(GetPhysicalPoolInPoolset, + std::list(PoolsetIdType id, PhysicalPoolFilter filter)); + MOCK_CONST_METHOD2(GetZone, bool(ZoneIdType zoneId, Zone *out)); MOCK_CONST_METHOD2(GetServer, bool(ServerIdType serverId, Server *out)); MOCK_CONST_METHOD2(GetChunkServer, @@ -176,6 +197,9 @@ class MockTopology : public Topology { const std::string &physicalPoolName, LogicalPool *out)); + MOCK_CONST_METHOD2(GetPoolset, + bool(const std::string &poolsetName, + Poolset *out)); MOCK_CONST_METHOD2(GetPhysicalPool, bool(const std::string &physicalPoolName, PhysicalPool *out)); @@ -212,6 +236,10 @@ class MockTopology : public Topology { std::vector(PhysicalPoolFilter filter)); MOCK_CONST_METHOD1(GetLogicalPoolInCluster, std::vector(LogicalPoolFilter filter)); + MOCK_CONST_METHOD1(GetPoolsetInCluster, + std::vector(PoolsetFilter filter)); + MOCK_CONST_METHOD1(GetPoolsetNameInCluster, + std::vector(PoolsetFilter filter)); MOCK_CONST_METHOD1(GetCopySetsInCluster, std::vector(CopySetFilter filter)); @@ -264,6 +292,9 @@ class MockTopology : public Topology { MOCK_METHOD1(GetHostNameAndPortById, std::string(ChunkServerIdType csId)); + + MOCK_METHOD2(UpdateChunkServerVersion, + int(const std::string &, ChunkServerIdType)); }; class MockTopologyStat : public TopologyStat { diff --git a/test/mds/nameserver2/chunk_allocator_test.cpp b/test/mds/nameserver2/chunk_allocator_test.cpp index 780cb090d9..11abfd1e74 100644 --- a/test/mds/nameserver2/chunk_allocator_test.cpp +++ b/test/mds/nameserver2/chunk_allocator_test.cpp @@ -28,13 +28,13 @@ #include "src/mds/nameserver2/chunk_allocator.h" #include "src/mds/common/mds_define.h" -using ::testing::Return; +using ::curve::mds::topology::CopysetIdInfo; +using ::curve::mds::topology::PoolIdType; using ::testing::_; +using ::testing::AtLeast; using ::testing::DoAll; +using ::testing::Return; using ::testing::SetArgPointee; -using ::testing::AtLeast; -using ::curve::mds::topology::CopysetIdInfo; -using ::curve::mds::topology::PoolIdType; namespace curve { namespace mds { @@ -42,7 +42,7 @@ namespace mds { const uint64_t DefaultChunkSize = 16 * kMB; const uint64_t DefaultSegmentSize = kGB * 1; -class ChunkAllocatorTest: public ::testing::Test { +class ChunkAllocatorTest : public ::testing::Test { protected: void SetUp() override { mockChunkIDGenerator_ = std::make_shared(); @@ -59,20 +59,25 @@ class ChunkAllocatorTest: public ::testing::Test { TEST_F(ChunkAllocatorTest, testcase1) { auto impl = std::make_shared( - mockTopologyChunkAllocator_, - mockChunkIDGenerator_); + mockTopologyChunkAllocator_, mockChunkIDGenerator_); // test segment pointer == nullptr ASSERT_EQ(impl->AllocateChunkSegment(FileType::INODE_PAGEFILE, - DefaultSegmentSize, DefaultChunkSize, 0, nullptr), false); + DefaultSegmentSize, DefaultChunkSize, + "ssdPoolset1", 0, nullptr), + false); // test offset not align with segmentsize PageFileSegment segment; ASSERT_EQ(impl->AllocateChunkSegment(FileType::INODE_PAGEFILE, - DefaultSegmentSize, DefaultChunkSize, 1, &segment), false); + DefaultSegmentSize, DefaultChunkSize, + "", 1, &segment), + false); // test chunkSize not align with segmentsize - ASSERT_EQ(impl->AllocateChunkSegment(FileType::INODE_PAGEFILE, - DefaultSegmentSize, DefaultChunkSize - 1, 0, &segment), false); + ASSERT_EQ(impl->AllocateChunkSegment( + FileType::INODE_PAGEFILE, DefaultSegmentSize, + DefaultChunkSize - 1, "ssdPoolset1", 0, &segment), + false); // test topologyAdmin_AllocateChunkRoundRobinInSingleLogicalPool // return false @@ -80,12 +85,14 @@ TEST_F(ChunkAllocatorTest, testcase1) { PageFileSegment segment; EXPECT_CALL(*mockTopologyChunkAllocator_, - AllocateChunkRoundRobinInSingleLogicalPool(_, _, _, _)) + AllocateChunkRoundRobinInSingleLogicalPool(_, _, _, _, _)) .Times(1) .WillOnce(Return(false)); - ASSERT_EQ(impl->AllocateChunkSegment(FileType::INODE_PAGEFILE, - DefaultSegmentSize, DefaultChunkSize, 0, &segment), false); + ASSERT_EQ(impl->AllocateChunkSegment( + FileType::INODE_PAGEFILE, DefaultSegmentSize, + DefaultChunkSize, "ssdPoolset1", 0, &segment), + false); } // test topologyAdmin_ Allocate return size error @@ -94,13 +101,15 @@ TEST_F(ChunkAllocatorTest, testcase1) { std::vector copysetInfos; EXPECT_CALL(*mockTopologyChunkAllocator_, - AllocateChunkRoundRobinInSingleLogicalPool(_, _, _, _)) + AllocateChunkRoundRobinInSingleLogicalPool(_, _, _, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<3>(copysetInfos), + .WillOnce(DoAll(SetArgPointee<4>(copysetInfos), Return(true))); - ASSERT_EQ(impl->AllocateChunkSegment(FileType::INODE_PAGEFILE, - DefaultSegmentSize, DefaultChunkSize, 0, &segment), false); + ASSERT_EQ(impl->AllocateChunkSegment( + FileType::INODE_PAGEFILE, DefaultSegmentSize, + DefaultChunkSize, "ssdPoolset1", 0, &segment), + false); } // test GenChunkID error @@ -108,23 +117,26 @@ TEST_F(ChunkAllocatorTest, testcase1) { PoolIdType logicalPoolID = 1; PageFileSegment segment; std::vector copysetInfos; - for (int i = 0; i != DefaultSegmentSize/DefaultChunkSize; i++) { - CopysetIdInfo info = {logicalPoolID, i}; + for (int i = 0; i != DefaultSegmentSize / DefaultChunkSize; i++) { + CopysetIdInfo info = {logicalPoolID, + static_cast(i)}; copysetInfos.push_back(info); } EXPECT_CALL(*mockTopologyChunkAllocator_, - AllocateChunkRoundRobinInSingleLogicalPool(_, _, _, _)) + AllocateChunkRoundRobinInSingleLogicalPool(_, _, _, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<3>(copysetInfos), + .WillOnce(DoAll(SetArgPointee<4>(copysetInfos), Return(true))); EXPECT_CALL(*mockChunkIDGenerator_, GenChunkID(_)) - .Times(1) - .WillOnce(Return(false)); + .Times(1) + .WillOnce(Return(false)); - ASSERT_EQ(impl->AllocateChunkSegment(FileType::INODE_PAGEFILE, - DefaultSegmentSize, DefaultChunkSize, 0, &segment), false); + ASSERT_EQ(impl->AllocateChunkSegment( + FileType::INODE_PAGEFILE, DefaultSegmentSize, + DefaultChunkSize, "ssdPoolset1", 0, &segment), + false); } // test ok @@ -132,46 +144,51 @@ TEST_F(ChunkAllocatorTest, testcase1) { PoolIdType logicalPoolID = 1; PageFileSegment segment; std::vector copysetInfos; - for (int i = 0; i != DefaultSegmentSize/DefaultChunkSize; i++) { - CopysetIdInfo info = {logicalPoolID, i}; + for (int i = 0; i != DefaultSegmentSize / DefaultChunkSize; i++) { + CopysetIdInfo info = {logicalPoolID, + static_cast(i)}; copysetInfos.push_back(info); } EXPECT_CALL(*mockTopologyChunkAllocator_, - AllocateChunkRoundRobinInSingleLogicalPool(_, _, _, _)) + AllocateChunkRoundRobinInSingleLogicalPool(_, _, _, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<3>(copysetInfos), + .WillOnce(DoAll(SetArgPointee<4>(copysetInfos), Return(true))); EXPECT_CALL(*mockChunkIDGenerator_, GenChunkID(_)) - .Times(1) - .WillOnce(Return(false)); + .Times(1) + .WillOnce(Return(false)); - ASSERT_EQ(impl->AllocateChunkSegment(FileType::INODE_PAGEFILE, - DefaultSegmentSize, DefaultChunkSize, 0, &segment), false); + ASSERT_EQ(impl->AllocateChunkSegment( + FileType::INODE_PAGEFILE, DefaultSegmentSize, + DefaultChunkSize, "ssdPoolset1", 0, &segment), + false); } // test logicalid not same { PageFileSegment segment; - PoolIdType logicalPoolID = 1; std::vector copysetInfos; - uint64_t segmentSize = DefaultChunkSize*2; + uint64_t segmentSize = DefaultChunkSize * 2; - for (int i = 0; i != segmentSize/DefaultChunkSize; i++) { - CopysetIdInfo info = {i, i}; + for (int i = 0; i != segmentSize / DefaultChunkSize; i++) { + CopysetIdInfo info = {static_cast(i), + static_cast(i)}; copysetInfos.push_back(info); } EXPECT_CALL(*mockTopologyChunkAllocator_, - AllocateChunkRoundRobinInSingleLogicalPool(_, _, _, _)) + AllocateChunkRoundRobinInSingleLogicalPool(_, _, _, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<3>(copysetInfos), + .WillOnce(DoAll(SetArgPointee<4>(copysetInfos), Return(true))); ASSERT_EQ(impl->AllocateChunkSegment(FileType::INODE_PAGEFILE, - segmentSize, DefaultChunkSize, 0, &segment), false); + segmentSize, DefaultChunkSize, + "ssdPoolset1", 0, &segment), + false); } @@ -181,39 +198,42 @@ TEST_F(ChunkAllocatorTest, testcase1) { PoolIdType logicalPoolID = 1; std::vector copysetInfos; - uint64_t segmentSize = DefaultChunkSize*2; + uint64_t segmentSize = DefaultChunkSize * 2; - for (int i = 0; i != segmentSize/DefaultChunkSize; i++) { - CopysetIdInfo info = {logicalPoolID, i}; + for (int i = 0; i != segmentSize / DefaultChunkSize; i++) { + CopysetIdInfo info = {logicalPoolID, + static_cast(i)}; copysetInfos.push_back(info); } EXPECT_CALL(*mockTopologyChunkAllocator_, - AllocateChunkRoundRobinInSingleLogicalPool(_, _, _, _)) + AllocateChunkRoundRobinInSingleLogicalPool(_, _, _, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<3>(copysetInfos), + .WillOnce(DoAll(SetArgPointee<4>(copysetInfos), Return(true))); EXPECT_CALL(*mockChunkIDGenerator_, GenChunkID(_)) - .Times(AtLeast(segmentSize/DefaultChunkSize)) - .WillRepeatedly(DoAll(SetArgPointee<0>(1), Return(true))); + .Times(AtLeast(segmentSize / DefaultChunkSize)) + .WillRepeatedly(DoAll(SetArgPointee<0>(1), Return(true))); ASSERT_EQ(impl->AllocateChunkSegment(FileType::INODE_PAGEFILE, - segmentSize, DefaultChunkSize, 0, &segment), true); + segmentSize, DefaultChunkSize, + "ssdPoolset1", 0, &segment), + true); PageFileSegment expectSegment; expectSegment.set_chunksize(DefaultChunkSize); expectSegment.set_segmentsize(segmentSize); expectSegment.set_startoffset(0); expectSegment.set_logicalpoolid(logicalPoolID); - for (uint32_t i = 0; i < segmentSize/DefaultChunkSize ; i++) { - PageFileChunkInfo* chunkinfo = expectSegment.add_chunks(); + for (uint32_t i = 0; i < segmentSize / DefaultChunkSize; i++) { + PageFileChunkInfo *chunkinfo = expectSegment.add_chunks(); chunkinfo->set_chunkid(1); chunkinfo->set_copysetid(i); LOG(INFO) << "chunkid = " << 1 << ", copysetid = " << i; } ASSERT_EQ(segment.SerializeAsString(), - expectSegment.SerializeAsString()); + expectSegment.SerializeAsString()); } } } // namespace mds diff --git a/test/mds/nameserver2/curvefs_test.cpp b/test/mds/nameserver2/curvefs_test.cpp index e08ddada5e..899b942ee8 100644 --- a/test/mds/nameserver2/curvefs_test.cpp +++ b/test/mds/nameserver2/curvefs_test.cpp @@ -27,6 +27,7 @@ #include "src/common/timeutility.h" #include "src/mds/common/mds_define.h" #include "src/mds/topology/topology_item.h" +#include "src/common/namespace_define.h" #include "test/mds/nameserver2/mock/mock_namespace_storage.h" #include "test/mds/nameserver2/mock/mock_inode_id_generator.h" @@ -45,12 +46,14 @@ using ::testing::ReturnArg; using ::testing::DoAll; using ::testing::SetArgPointee; using ::testing::SaveArg; +using ::testing::Invoke; using curve::common::Authenticator; using curve::common::TimeUtility; using curve::mds::topology::MockTopology; using curve::mds::snapshotcloneclient::MockSnapshotCloneClient; using curve::mds::snapshotcloneclient::DestFileInfo; +using curve::common::kDefaultPoolsetName; namespace curve { namespace mds { @@ -89,6 +92,7 @@ class CurveFSTest: public ::testing::Test { fileInfo.set_filename(RECYCLEBINDIRNAME); fileInfo.set_filetype(FileType::INODE_DIRECTORY); fileInfo.set_owner(authOptions_.rootOwner); + fileInfo.set_poolset("default"); EXPECT_CALL(*storage_, GetFile(_, _, _)) .Times(AtLeast(1)) .WillRepeatedly(DoAll(SetArgPointee<2>(fileInfo), @@ -105,6 +109,10 @@ class CurveFSTest: public ::testing::Test { kMiniFileLength = curvefs_->GetMinFileLength(); kMaxFileLength = curvefs_->GetMaxFileLength(); curvefs_->Run(); + + ON_CALL(*topology_, GetPoolsetNameInCluster(_)) + .WillByDefault( + Return(std::vector{kDefaultPoolsetName})); } void TearDown() override { @@ -135,27 +143,31 @@ TEST_F(CurveFSTest, testCreateFile1) { spacePools.insert(std::pair(1, kMaxFileLength - kMiniFileLength)); EXPECT_CALL(*mockChunkAllocator_, - GetRemainingSpaceInLogicalPool(_, _)) + GetRemainingSpaceInLogicalPool(_, _, _)) .Times(AtLeast(1)) .WillRepeatedly(DoAll(SetArgPointee<1>(spacePools), Return())); - ASSERT_EQ(curvefs_->CreateFile("/file1", "owner1", FileType::INODE_PAGEFILE, + ASSERT_EQ(curvefs_->CreateFile("/file1", "", + "owner1", FileType::INODE_PAGEFILE, kMiniFileLength - 1, 0, 0), StatusCode::kFileLengthNotSupported); - ASSERT_EQ(curvefs_->CreateFile("/file1", "owner1", FileType::INODE_PAGEFILE, + ASSERT_EQ(curvefs_->CreateFile("/file1", "", + "owner1", FileType::INODE_PAGEFILE, kMaxFileLength + 1, 0, 0), StatusCode::kFileLengthNotSupported); - ASSERT_EQ(curvefs_->CreateFile("/flie1", "owner1", FileType::INODE_PAGEFILE, + ASSERT_EQ(curvefs_->CreateFile("/flie1", "", + "owner1", FileType::INODE_PAGEFILE, kMiniFileLength + 1, 0, 0), StatusCode::kFileLengthNotSupported); - ASSERT_EQ(curvefs_->CreateFile("/flie1", "owner1", FileType::INODE_PAGEFILE, - kMaxFileLength - kMiniFileLength + DefaultSegmentSize, 0, 0), - StatusCode::kFileLengthNotSupported); + ASSERT_EQ(curvefs_->CreateFile("/flie1", "", "owner1", + FileType::INODE_PAGEFILE, + kMaxFileLength - kMiniFileLength + DefaultSegmentSize, + 0, 0), StatusCode::kFileLengthNotSupported); - ASSERT_EQ(curvefs_->CreateFile("/", "", FileType::INODE_DIRECTORY, 0, 0, 0), - StatusCode::kFileExists); + ASSERT_EQ(curvefs_->CreateFile("/", "", "", FileType::INODE_DIRECTORY, + 0, 0, 0), StatusCode::kFileExists); { // test file exist @@ -163,7 +175,7 @@ TEST_F(CurveFSTest, testCreateFile1) { .Times(AtLeast(1)) .WillOnce(Return(StoreStatus::OK)); - auto statusCode = curvefs_->CreateFile("/file1", "owner1", + auto statusCode = curvefs_->CreateFile("/file1", "", "owner1", FileType::INODE_PAGEFILE, kMiniFileLength, 0, 0); ASSERT_EQ(statusCode, StatusCode::kFileExists); } @@ -174,7 +186,7 @@ TEST_F(CurveFSTest, testCreateFile1) { .Times(AtLeast(1)) .WillOnce(Return(StoreStatus::InternalError)); - auto statusCode = curvefs_->CreateFile("/file1", "owner1", + auto statusCode = curvefs_->CreateFile("/file1", "", "owner1", FileType::INODE_PAGEFILE, kMiniFileLength, 0, 0); ASSERT_EQ(statusCode, StatusCode::kStorageError); } @@ -193,7 +205,7 @@ TEST_F(CurveFSTest, testCreateFile1) { .Times(1) .WillOnce(Return(true)); - auto statusCode = curvefs_->CreateFile("/file1", "owner1", + auto statusCode = curvefs_->CreateFile("/file1", "", "owner1", FileType::INODE_PAGEFILE, kMiniFileLength, 0, 0); ASSERT_EQ(statusCode, StatusCode::kStorageError); } @@ -213,7 +225,7 @@ TEST_F(CurveFSTest, testCreateFile1) { .WillOnce(Return(true)); - auto statusCode = curvefs_->CreateFile("/file1", "owner1", + auto statusCode = curvefs_->CreateFile("/file1", "", "owner1", FileType::INODE_PAGEFILE, kMiniFileLength, 0, 0); ASSERT_EQ(statusCode, StatusCode::kOK); } @@ -228,7 +240,7 @@ TEST_F(CurveFSTest, testCreateFile1) { .Times(1) .WillOnce(Return(false)); - auto statusCode = curvefs_->CreateFile("/file1", "owner1", + auto statusCode = curvefs_->CreateFile("/file1", "", "owner1", FileType::INODE_PAGEFILE, kMiniFileLength, 0, 0); ASSERT_EQ(statusCode, StatusCode::kStorageError); } @@ -250,7 +262,8 @@ TEST_F(CurveFSTest, testCreateFile1) { .WillOnce(Return(true)); auto statusCode = curvefs_->CreateFile( - "/dir1", "owner1", FileType::INODE_DIRECTORY, 0, 0, 0); + "/dir1", kDefaultPoolsetName, "owner1", + FileType::INODE_DIRECTORY, 0, 0, 0); ASSERT_EQ(statusCode, StatusCode::kOK); ASSERT_FALSE(fileInfo.has_throttleparams()); } @@ -272,8 +285,9 @@ TEST_F(CurveFSTest, testCreateFile1) { .WillOnce(Return(true)); auto statusCode = - curvefs_->CreateFile("/file1", "owner1", FileType::INODE_PAGEFILE, - kMiniFileLength, 0, 0); + curvefs_->CreateFile("/file1", kDefaultPoolsetName, + "owner1", FileType::INODE_PAGEFILE, + kMiniFileLength, 0, 0); ASSERT_EQ(statusCode, StatusCode::kOK); ASSERT_TRUE(fileInfo.has_throttleparams()); } @@ -286,7 +300,7 @@ TEST_F(CurveFSTest, testCreateStripeFile) { spacePools.insert(std::pair(1, kMaxFileLength)); spacePools.insert(std::pair(2, kMaxFileLength)); EXPECT_CALL(*mockChunkAllocator_, - GetRemainingSpaceInLogicalPool(_, _)) + GetRemainingSpaceInLogicalPool(_, _, _)) .Times(AtLeast(1)) .WillRepeatedly(DoAll(SetArgPointee<1>(spacePools), Return())); EXPECT_CALL(*storage_, GetFile(_, _, _)) @@ -301,36 +315,145 @@ TEST_F(CurveFSTest, testCreateStripeFile) { .Times(1) .WillOnce(Return(true)); - ASSERT_EQ(curvefs_->CreateFile("/file1", "owner1", + ASSERT_EQ(curvefs_->CreateFile("/file1", "", "owner1", FileType::INODE_PAGEFILE, kMiniFileLength, 1 * 1024 * 1024, 4), StatusCode::kOK); } { // test stripeStripe and stripeCount is not all zero - ASSERT_EQ(curvefs_->CreateFile("/file1", "owner1", + ASSERT_EQ(curvefs_->CreateFile("/file1", "", "owner1", FileType::INODE_PAGEFILE, kMiniFileLength, 0, 1), StatusCode::kParaError); - ASSERT_EQ(curvefs_->CreateFile("/file1", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, 1024*1024ul, 0), - StatusCode::kParaError); + ASSERT_EQ(curvefs_->CreateFile("/file1", "", "owner1", + FileType::INODE_PAGEFILE, kMiniFileLength, 1024*1024ul, + 0), StatusCode::kParaError); } { // test stripeUnit more then chunksize - ASSERT_EQ(curvefs_->CreateFile("/file1", "owner1", - FileType::INODE_PAGEFILE, kMiniFileLength, 16*1024*1024ul + 1, 0), - StatusCode::kParaError); + ASSERT_EQ(curvefs_->CreateFile("/file1", "", "owner1", + FileType::INODE_PAGEFILE, kMiniFileLength, 16*1024*1024ul + 1, + 0), StatusCode::kParaError); } { // test stripeUnit is not divisible by chunksize - ASSERT_EQ(curvefs_->CreateFile("/file1", "owner1", + ASSERT_EQ(curvefs_->CreateFile("/file1", "", "owner1", FileType::INODE_PAGEFILE, kMiniFileLength, 4*1024*1024ul + 1, 0), StatusCode::kParaError); } } +TEST_F(CurveFSTest, testCreateFileWithPoolset) { + const std::map spacePools{ + {1, kMaxFileLength}, + {2, kMaxFileLength}, + }; + + EXPECT_CALL(*mockChunkAllocator_, GetRemainingSpaceInLogicalPool(_, _, _)) + .Times(AtLeast(1)) + .WillRepeatedly(DoAll(SetArgPointee<1>(spacePools), Return())); + + EXPECT_CALL(*storage_, GetFile(_, _, _)) + .Times(AtLeast(1)) + .WillRepeatedly(Return(StoreStatus::KeyNotExist)); + + EXPECT_CALL(*inodeIdGenerator_, GenInodeID(_)) + .WillRepeatedly(Invoke([](uint64_t* id) { + static std::atomic counter{0}; + *id = counter++; + return true; + })); + + // create file without poolset, assign to default poolset + { + FileInfo info; + EXPECT_CALL(*storage_, PutFile(_)) + .WillOnce(DoAll(SaveArg<0>(&info), Return(StoreStatus::OK))); + + ASSERT_EQ(StatusCode::kOK, + curvefs_->CreateFile("/file1", "", "owner", + FileType::INODE_PAGEFILE, + kMiniFileLength, 0, 0)); + ASSERT_EQ(kDefaultPoolsetName, info.poolset()); + } + + // create file with poolset but not same with anyone + { + EXPECT_CALL(*topology_, GetPoolsetNameInCluster(_)) + .WillOnce(Return( + std::vector{kDefaultPoolsetName, "SSD"})); + + ASSERT_EQ(StatusCode::kPoolsetNotExist, + curvefs_->CreateFile("/file1", "HDD", "owner", + FileType::INODE_PAGEFILE, + kMiniFileLength, 0, 0)); + } + + // create file with poolset and poolset exists + { + EXPECT_CALL(*storage_, PutFile(_)) + .WillOnce(Return(StoreStatus::OK)); + EXPECT_CALL(*topology_, GetPoolsetNameInCluster(_)); + + ASSERT_EQ(StatusCode::kOK, + curvefs_->CreateFile("/file1", kDefaultPoolsetName, "owner", + FileType::INODE_PAGEFILE, + kMiniFileLength, 0, 0)); + } + + // cluster doesn't have poolset + { + EXPECT_CALL(*topology_, GetPoolsetNameInCluster(_)) + .WillOnce(Return( + std::vector{})); + ASSERT_EQ(StatusCode::kPoolsetNotExist, + curvefs_->CreateFile("/file1", "SSD", "owner", + FileType::INODE_PAGEFILE, + kMiniFileLength, 0, 0)); + } +} + +TEST(TestSelectPoolsetByRules, Test) { + ASSERT_EQ(kDefaultPoolsetName, SelectPoolsetByRules("/filename", {})); + + { + std::map rules{ + {"/system/", "system"} + }; + ASSERT_EQ("system", SelectPoolsetByRules("/system/file", rules)); + } + + { + std::map rules{ + {"/system/", "system"} + }; + ASSERT_EQ(kDefaultPoolsetName, SelectPoolsetByRules("/systems", rules)); + } + + { + std::map rules{ + {"/system/", "system"}, + {"/systems/", "system1"}, + }; + ASSERT_EQ("system1", SelectPoolsetByRules("/systems/file", rules)); + } + + // subdir rules + { + std::map rules{ + {"/system/", "system"}, + {"/system/sub/", "system-sub"} + }; + ASSERT_EQ("system-sub", + SelectPoolsetByRules("/system/sub/file", rules)); + + ASSERT_EQ("system-sub", + SelectPoolsetByRules("/system/sub/sub/file", rules)); + } +} + TEST_F(CurveFSTest, testGetFileInfo) { // test parm error FileInfo fileInfo; @@ -1512,7 +1635,6 @@ TEST_F(CurveFSTest, testRenameFile) { // new file exist, rename success { - uint64_t fileId = 10; FileInfo fileInfo1; FileInfo fileInfo2; FileInfo fileInfo3; @@ -2190,6 +2312,7 @@ TEST_F(CurveFSTest, testGetOrAllocateSegment) { fileInfo2.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_length(kMiniFileLength); fileInfo2.set_segmentsize(DefaultSegmentSize); + fileInfo2.set_poolset("default"); EXPECT_CALL(*storage_, GetFile(_, _, _)) .Times(2) @@ -2217,6 +2340,7 @@ TEST_F(CurveFSTest, testGetOrAllocateSegment) { fileInfo2.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_length(kMiniFileLength); fileInfo2.set_segmentsize(DefaultSegmentSize); + fileInfo2.set_poolset("default"); EXPECT_CALL(*storage_, GetFile(_, _, _)) .Times(2) @@ -2230,7 +2354,8 @@ TEST_F(CurveFSTest, testGetOrAllocateSegment) { .WillOnce(Return(StoreStatus::KeyNotExist)); - EXPECT_CALL(*mockChunkAllocator_, AllocateChunkSegment(_, _, _, _, _)) + EXPECT_CALL(*mockChunkAllocator_, + AllocateChunkSegment(_, _, _, _, _, _)) .Times(1) .WillOnce(Return(true)); @@ -2275,6 +2400,7 @@ TEST_F(CurveFSTest, testGetOrAllocateSegment) { fileInfo2.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_length(kMiniFileLength); fileInfo2.set_segmentsize(DefaultSegmentSize); + fileInfo2.set_poolset("default"); EXPECT_CALL(*storage_, GetFile(_, _, _)) .Times(2) @@ -2298,6 +2424,7 @@ TEST_F(CurveFSTest, testGetOrAllocateSegment) { fileInfo2.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_length(kMiniFileLength); fileInfo2.set_segmentsize(DefaultSegmentSize); + fileInfo2.set_poolset("default"); EXPECT_CALL(*storage_, GetFile(_, _, _)) .Times(2) @@ -2321,6 +2448,7 @@ TEST_F(CurveFSTest, testGetOrAllocateSegment) { fileInfo2.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_length(kMiniFileLength); fileInfo2.set_segmentsize(DefaultSegmentSize); + fileInfo2.set_poolset("default"); EXPECT_CALL(*storage_, GetFile(_, _, _)) .Times(2) @@ -2334,7 +2462,8 @@ TEST_F(CurveFSTest, testGetOrAllocateSegment) { .WillOnce(Return(StoreStatus::KeyNotExist)); - EXPECT_CALL(*mockChunkAllocator_, AllocateChunkSegment(_, _, _, _, _)) + EXPECT_CALL(*mockChunkAllocator_, + AllocateChunkSegment(_, _, _, _, _, _)) .Times(1) .WillOnce(Return(false)); @@ -2353,6 +2482,7 @@ TEST_F(CurveFSTest, testGetOrAllocateSegment) { fileInfo2.set_filetype(FileType::INODE_PAGEFILE); fileInfo2.set_length(kMiniFileLength); fileInfo2.set_segmentsize(DefaultSegmentSize); + fileInfo2.set_poolset("default"); EXPECT_CALL(*storage_, GetFile(_, _, _)) .Times(2) @@ -2366,7 +2496,8 @@ TEST_F(CurveFSTest, testGetOrAllocateSegment) { .WillOnce(Return(StoreStatus::KeyNotExist)); - EXPECT_CALL(*mockChunkAllocator_, AllocateChunkSegment(_, _, _, _, _)) + EXPECT_CALL(*mockChunkAllocator_, + AllocateChunkSegment(_, _, _, _, _, _)) .Times(1) .WillOnce(Return(true)); @@ -3923,12 +4054,12 @@ TEST_F(CurveFSTest, testCreateCloneFile) { // test parm error ASSERT_EQ(curvefs_->CreateCloneFile("/file1", "owner1", FileType::INODE_DIRECTORY, kMiniFileLength, kStartSeqNum, - curvefs_->GetDefaultChunkSize(), 0, 0, nullptr), + curvefs_->GetDefaultChunkSize(), 0, 0, "default", nullptr), StatusCode::kParaError); ASSERT_EQ(curvefs_->CreateCloneFile("/file1", "owner1", FileType::INODE_PAGEFILE, kMiniFileLength - 1, kStartSeqNum, - curvefs_->GetDefaultChunkSize(), 0, 0, nullptr), + curvefs_->GetDefaultChunkSize(), 0, 0, "default", nullptr), StatusCode::kParaError); { @@ -3939,7 +4070,7 @@ TEST_F(CurveFSTest, testCreateCloneFile) { auto statusCode = curvefs_->CreateCloneFile("/file1", "owner1", FileType::INODE_PAGEFILE, kMiniFileLength, kStartSeqNum, - curvefs_->GetDefaultChunkSize(), 0, 0, nullptr); + curvefs_->GetDefaultChunkSize(), 0, 0, "default", nullptr); ASSERT_EQ(statusCode, StatusCode::kFileExists); } @@ -3951,7 +4082,7 @@ TEST_F(CurveFSTest, testCreateCloneFile) { auto statusCode = curvefs_->CreateCloneFile("/file1", "owner1", FileType::INODE_PAGEFILE, kMiniFileLength, kStartSeqNum, - curvefs_->GetDefaultChunkSize(), 0, 0, nullptr); + curvefs_->GetDefaultChunkSize(), 0, 0, "default", nullptr); ASSERT_EQ(statusCode, StatusCode::kStorageError); } @@ -3967,7 +4098,7 @@ TEST_F(CurveFSTest, testCreateCloneFile) { auto statusCode = curvefs_->CreateCloneFile("/file1", "owner1", FileType::INODE_PAGEFILE, kMiniFileLength, kStartSeqNum, - curvefs_->GetDefaultChunkSize(), 0, 0, nullptr); + curvefs_->GetDefaultChunkSize(), 0, 0, "default", nullptr); ASSERT_EQ(statusCode, StatusCode::kStorageError); } @@ -3987,7 +4118,7 @@ TEST_F(CurveFSTest, testCreateCloneFile) { auto statusCode = curvefs_->CreateCloneFile("/file1", "owner1", FileType::INODE_PAGEFILE, kMiniFileLength, kStartSeqNum, - curvefs_->GetDefaultChunkSize(), 0, 0, nullptr); + curvefs_->GetDefaultChunkSize(), 0, 0, "default", nullptr); ASSERT_EQ(statusCode, StatusCode::kStorageError); } { @@ -4007,7 +4138,8 @@ TEST_F(CurveFSTest, testCreateCloneFile) { FileInfo fileInfo; auto statusCode = curvefs_->CreateCloneFile("/file1", "owner1", FileType::INODE_PAGEFILE, kMiniFileLength, kStartSeqNum, - curvefs_->GetDefaultChunkSize(), 0, 0, &fileInfo); + curvefs_->GetDefaultChunkSize(), 0, 0, + "default", &fileInfo); ASSERT_EQ(statusCode, StatusCode::kOK); ASSERT_EQ(fileInfo.filename(), "file1"); ASSERT_EQ(fileInfo.owner(), "owner1"); diff --git a/test/mds/nameserver2/fakes.h b/test/mds/nameserver2/fakes.h index 140ab65ee4..bae4ae261d 100644 --- a/test/mds/nameserver2/fakes.h +++ b/test/mds/nameserver2/fakes.h @@ -79,7 +79,9 @@ class FackTopologyChunkAllocator: public TopologyChunkAllocator { FackTopologyChunkAllocator() {} bool AllocateChunkRandomInSingleLogicalPool( - FileType fileType, uint32_t chunkNumer, + FileType fileType, + const std::string& pstName, + uint32_t chunkNumer, ChunkSizeType chunkSize, std::vector *infos) override { for (uint32_t i = 0; i != chunkNumer; i++) { @@ -89,8 +91,8 @@ class FackTopologyChunkAllocator: public TopologyChunkAllocator { return true; } bool AllocateChunkRoundRobinInSingleLogicalPool( - FileType fileType, uint32_t chunkNumer, - ChunkSizeType chunkSize, + FileType fileType, const std::string& pstName, + uint32_t chunkNumer, ChunkSizeType chunkSize, std::vector *infos) override { for (uint32_t i = 0; i != chunkNumer; i++) { CopysetIdInfo copysetIdInfo{0, i}; @@ -98,9 +100,11 @@ class FackTopologyChunkAllocator: public TopologyChunkAllocator { } return true; } + void GetRemainingSpaceInLogicalPool( const std::vector& logicalPools, - std::map* enoughSpacePools) override { + std::map* enoughSpacePools, + const std::string& pstName) override { for (auto i = logicalPools.begin(); i != logicalPools.end(); i++) { enoughSpacePools->insert(std::pair(*i, 10*FACK_FILE_INTTIALIZE)); diff --git a/test/mds/nameserver2/mock/mock_chunk_allocate.h b/test/mds/nameserver2/mock/mock_chunk_allocate.h index 1aea9d3bd9..0e364b24d1 100644 --- a/test/mds/nameserver2/mock/mock_chunk_allocate.h +++ b/test/mds/nameserver2/mock/mock_chunk_allocate.h @@ -26,6 +26,7 @@ #include #include #include +#include #include "src/mds/nameserver2/chunk_allocator.h" namespace curve { @@ -36,11 +37,18 @@ class MockChunkAllocator: public ChunkSegmentAllocator { MOCK_METHOD4(AllocateChunkSegment, bool(SegmentSizeType, ChunkSizeType, offset_t, PageFileSegment*)); - MOCK_METHOD5(AllocateChunkSegment, bool(FileType, SegmentSizeType, - ChunkSizeType, offset_t, PageFileSegment*)); - MOCK_METHOD2(GetRemainingSpaceInLogicalPool, - void(const std::vector &, - std::map*)); + MOCK_METHOD3(GetRemainingSpaceInLogicalPool, + void(const std::vector&, + std::map*, + const std::string& pstName)); + + MOCK_METHOD6(AllocateChunkSegment, + bool(FileType, + SegmentSizeType, + ChunkSizeType, + const std::string&, + offset_t, + PageFileSegment*)); }; } // namespace mds } // namespace curve diff --git a/test/mds/nameserver2/mock/mock_topology_chunk_allocator.h b/test/mds/nameserver2/mock/mock_topology_chunk_allocator.h index 024056e008..e5adde5938 100644 --- a/test/mds/nameserver2/mock/mock_topology_chunk_allocator.h +++ b/test/mds/nameserver2/mock/mock_topology_chunk_allocator.h @@ -27,6 +27,7 @@ #include #include #include +#include #include "src/mds/topology/topology_chunk_allocator.h" using ::curve::mds::topology::TopologyChunkAllocator; @@ -39,19 +40,20 @@ class MockTopologyChunkAllocator: public TopologyChunkAllocator { using CopysetIdInfo = ::curve::mds::topology::CopysetIdInfo; ~MockTopologyChunkAllocator() {} - MOCK_METHOD4(AllocateChunkRandomInSingleLogicalPool, - bool(FileType, uint32_t, - ChunkSizeType chunkSize, std::vector *)); - - MOCK_METHOD4(AllocateChunkRoundRobinInSingleLogicalPool, - bool(FileType, uint32_t, - ChunkSizeType chunkSize, std::vector *)); + MOCK_METHOD5(AllocateChunkRandomInSingleLogicalPool, + bool(FileType, const std::string&, uint32_t, + ChunkSizeType chunkSize, std::vector*)); MOCK_METHOD3(UpdateChunkFilePoolAllocConfig, void(bool, bool, uint32_t)); - MOCK_METHOD2(GetRemainingSpaceInLogicalPool, + MOCK_METHOD3(GetRemainingSpaceInLogicalPool, void(const std::vector &, - std::map*)); + std::map*, + const std::string&)); + + MOCK_METHOD5(AllocateChunkRoundRobinInSingleLogicalPool, + bool(FileType, const std::string&, uint32_t, + ChunkSizeType chunkSize, std::vector*)); }; } // namespace mds diff --git a/test/mds/nameserver2/namespace_service_test.cpp b/test/mds/nameserver2/namespace_service_test.cpp index 61549d6cac..c5247030f2 100644 --- a/test/mds/nameserver2/namespace_service_test.cpp +++ b/test/mds/nameserver2/namespace_service_test.cpp @@ -57,6 +57,8 @@ using ::testing::Matcher; namespace curve { namespace mds { +static const char* kDefaultPoolset = "default"; + class NameSpaceServiceTest : public ::testing::Test { public: struct RequestOption { @@ -133,6 +135,9 @@ class NameSpaceServiceTest : public ::testing::Test { kMaxFileLength = kCurveFS.GetMaxFileLength(); kCurveFS.Run(); + ON_CALL(*topology_, GetPoolsetNameInCluster(_)) + .WillByDefault(Return(std::vector{kDefaultPoolset})); + std::this_thread::sleep_for(std::chrono::microseconds( 11 * fileRecordOptions.fileRecordExpiredTimeUs)); } @@ -312,6 +317,7 @@ TEST_F(NameSpaceServiceTest, test1) { cntl.Reset(); request.set_filename("/dir"); + request.set_poolset(""); request.set_owner("owner3"); request.set_date(TimeUtility::GetTimeofDayUs()); request.set_filetype(INODE_DIRECTORY); @@ -327,6 +333,7 @@ TEST_F(NameSpaceServiceTest, test1) { cntl.Reset(); request.set_filename("/dir/file3"); + request.set_poolset(""); request.set_owner("owner3"); request.set_date(TimeUtility::GetTimeofDayUs()); request.set_filetype(INODE_PAGEFILE); @@ -375,6 +382,7 @@ TEST_F(NameSpaceServiceTest, test1) { // 如果创建一个已经存在的文件,会创建失败kFileExists cntl.Reset(); request.set_filename("/file2"); + request.set_poolset(""); request.set_owner("owner2"); request.set_date(TimeUtility::GetTimeofDayUs()); request.set_filetype(INODE_PAGEFILE); @@ -425,6 +433,7 @@ TEST_F(NameSpaceServiceTest, test1) { request.set_date(TimeUtility::GetTimeofDayUs()); request.set_filetype(INODE_APPENDECFILE); request.set_filelength(fileLength); + cntl.set_log_id(3); // set by user stub.CreateFile(&cntl, &request, &response, NULL); if (!cntl.Failed()) { @@ -439,6 +448,7 @@ TEST_F(NameSpaceServiceTest, test1) { request.set_date(TimeUtility::GetTimeofDayUs()); request.set_filetype(INODE_SNAPSHOT_PAGEFILE); request.set_filelength(fileLength); + cntl.set_log_id(3); // set by user stub.CreateFile(&cntl, &request, &response, NULL); if (!cntl.Failed()) { @@ -1171,6 +1181,7 @@ TEST_F(NameSpaceServiceTest, snapshottests) { uint64_t fileLength = kMiniFileLength; request.set_filename("/file1"); + request.set_poolset(""); request.set_owner("owner1"); request.set_date(TimeUtility::GetTimeofDayUs()); request.set_filetype(INODE_PAGEFILE); @@ -1446,7 +1457,6 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { request.set_date(TimeUtility::GetTimeofDayUs()); request.set_filetype(INODE_DIRECTORY); request.set_filelength(0); - cntl.set_log_id(3); // set by user stub.CreateFile(&cntl, &request, &response, NULL); if (!cntl.Failed()) { @@ -1874,6 +1884,7 @@ TEST_F(NameSpaceServiceTest, clonetest) { request.set_date(TimeUtility::GetTimeofDayUs()); request.set_owner("tom"); request.set_clonesource("/sourcefile1"); + request.set_poolset(kDefaultPoolset); cntl.set_log_id(1); stub.CreateCloneFile(&cntl, &request, &response, NULL); @@ -2145,6 +2156,7 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { createRequest.set_date(TimeUtility::GetTimeofDayUs()); createRequest.set_filetype(INODE_DIRECTORY); createRequest.set_filelength(0); + stub.CreateFile(&cntl, &createRequest, &createResponse, NULL); if (!cntl.Failed()) { ASSERT_EQ(createResponse.statuscode(), StatusCode::kOK); @@ -2158,6 +2170,7 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { createRequest.set_date(TimeUtility::GetTimeofDayUs()); createRequest.set_filetype(INODE_PAGEFILE); createRequest.set_filelength(fileLength); + stub.CreateFile(&cntl, &createRequest, &createResponse, NULL); if (!cntl.Failed()) { ASSERT_EQ(createResponse.statuscode(), StatusCode::kOK); @@ -2339,6 +2352,7 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { createRequest.set_date(TimeUtility::GetTimeofDayUs()); createRequest.set_filetype(INODE_PAGEFILE); createRequest.set_filelength(fileLength); + stub.CreateFile(&cntl, &createRequest, &createResponse, NULL); if (!cntl.Failed()) { ASSERT_EQ(createResponse.statuscode(), StatusCode::kOK); @@ -2571,6 +2585,7 @@ TEST_F(NameSpaceServiceTest, testRecoverFile) { createCloneRequest.set_chunksize(curveFSOptions.defaultChunkSize); createCloneRequest.set_date(TimeUtility::GetTimeofDayUs()); createCloneRequest.set_owner("owner"); + createCloneRequest.set_poolset(kDefaultPoolset); createCloneRequest.set_clonesource("/sourcefile1"); cntl.Reset(); stub.CreateCloneFile(&cntl, &createCloneRequest, diff --git a/test/mds/schedule/leaderScheduler_test.cpp b/test/mds/schedule/leaderScheduler_test.cpp index e109647809..3be00637b0 100644 --- a/test/mds/schedule/leaderScheduler_test.cpp +++ b/test/mds/schedule/leaderScheduler_test.cpp @@ -132,7 +132,6 @@ TEST_F(TestLeaderSchedule, test_copySet_has_candidate) { PeerInfo peer2(2, 2, 2, "192.168.10.2", 9000); PeerInfo peer3(3, 3, 3, "192.168.10.3", 9000); auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; - auto offlineState = ::curve::mds::topology::OnlineState::OFFLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); ChunkServerInfo csInfo1( @@ -173,7 +172,6 @@ TEST_F(TestLeaderSchedule, test_cannot_get_chunkServerInfo) { PeerInfo peer2(2, 2, 2, "192.168.10.2", 9000); PeerInfo peer3(3, 3, 3, "192.168.10.3", 9000); auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; - auto offlineState = ::curve::mds::topology::OnlineState::OFFLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); ChunkServerInfo csInfo1( @@ -218,7 +216,6 @@ TEST_F(TestLeaderSchedule, test_no_need_tranferLeaderOut) { PeerInfo peer2(2, 2, 2, "192.168.10.2", 9000); PeerInfo peer3(3, 3, 3, "192.168.10.3", 9000); auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; - auto offlineState = ::curve::mds::topology::OnlineState::OFFLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); ChunkServerInfo csInfo1( @@ -265,7 +262,6 @@ TEST_F(TestLeaderSchedule, test_tranferLeaderout_normal) { PeerInfo peer5(5, 5, 5, "192.168.10.5", 9000); PeerInfo peer6(6, 6, 6, "192.168.10.6", 9000); auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; - auto offlineState = ::curve::mds::topology::OnlineState::OFFLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); ChunkServerInfo csInfo1( @@ -361,7 +357,6 @@ TEST_F(TestLeaderSchedule, test_tranferLeaderout_pendding) { PeerInfo peer5(5, 5, 5, "192.168.10.5", 9000); PeerInfo peer6(6, 6, 6, "192.168.10.6", 9000); auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; - auto offlineState = ::curve::mds::topology::OnlineState::OFFLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); ChunkServerInfo csInfo1( @@ -452,7 +447,6 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_normal) { PeerInfo peer3(3, 3, 3, "192.168.10.3", 9000); PeerInfo peer4(3, 4, 4, "192.168.10.4", 9000); auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; - auto offlineState = ::curve::mds::topology::OnlineState::OFFLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); ChunkServerInfo csInfo1( @@ -535,7 +529,6 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_pendding) { PeerInfo peer3(3, 3, 3, "192.168.10.3", 9000); PeerInfo peer4(3, 4, 4, "192.168.10.4", 9000); auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; - auto offlineState = ::curve::mds::topology::OnlineState::OFFLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); ChunkServerInfo csInfo1( diff --git a/test/mds/schedule/recoverScheduler_test.cpp b/test/mds/schedule/recoverScheduler_test.cpp index 036b21526f..c7c11b299e 100644 --- a/test/mds/schedule/recoverScheduler_test.cpp +++ b/test/mds/schedule/recoverScheduler_test.cpp @@ -214,7 +214,6 @@ TEST_F(TestRecoverSheduler, test_all_chunkServer_online_offline) { ChunkServerIdType id1 = 1; ChunkServerIdType id2 = 2; ChunkServerIdType id3 = 3; - ChunkServerIdType id4 = 4; Operator op; EXPECT_CALL(*topoAdapter_, GetAvgScatterWidthInLogicalPool(_)) .WillRepeatedly(Return(90)); diff --git a/test/mds/schedule/schedulerPOC/mock_topology.h b/test/mds/schedule/schedulerPOC/mock_topology.h index 3ebc810525..47a6747b0c 100644 --- a/test/mds/schedule/schedulerPOC/mock_topology.h +++ b/test/mds/schedule/schedulerPOC/mock_topology.h @@ -41,11 +41,13 @@ using ::testing::Return; using ::testing::_; +using ::curve::mds::topology::PoolsetIdType; using ::curve::mds::topology::PoolIdType; using ::curve::mds::topology::ZoneIdType; using ::curve::mds::topology::ServerIdType; using ::curve::mds::topology::ChunkServerIdType; using ::curve::mds::topology::CopySetIdType; +using ::curve::mds::topology::Poolset; using ::curve::mds::topology::LogicalPool; using ::curve::mds::topology::PhysicalPool; using ::curve::mds::topology::Zone; @@ -63,11 +65,14 @@ using ::curve::mds::topology::ClusterInformation; namespace curve { namespace mds { namespace schedule { + class MockIdGenerator : public TopologyIdGenerator { public: MockIdGenerator() {} ~MockIdGenerator() {} + MOCK_METHOD1(initPoolsetIdGenerator, void(PoolsetIdType + idMax)); MOCK_METHOD1(initLogicalPoolIdGenerator, void(PoolIdType idMax)); MOCK_METHOD1(initPhysicalPoolIdGenerator, void(PoolIdType @@ -83,6 +88,7 @@ class MockIdGenerator : public TopologyIdGenerator { MOCK_METHOD0(GenLogicalPoolId, PoolIdType()); MOCK_METHOD0(GenPhysicalPoolId, PoolIdType()); + MOCK_METHOD0(GenPoolsetId, PoolsetIdType()); MOCK_METHOD0(GenZoneId, ZoneIdType()); MOCK_METHOD0(GenServerId, ServerIdType()); MOCK_METHOD0(GenChunkServerId, ChunkServerIdType()); @@ -109,6 +115,9 @@ class MockStorage : public TopologyStorage { const std::string &url, const std::string &password)); + MOCK_METHOD2(LoadPoolset, + bool(std::unordered_map + *poolsetMap, PoolsetIdType * maxPoolsetId)); MOCK_METHOD2(LoadLogicalPool, bool(std::unordered_map *logicalPoolMap, PoolIdType * maxLogicalPoolId)); @@ -126,6 +135,8 @@ class MockStorage : public TopologyStorage { std::map *copySetMap, std::map * copySetIdMaxMap)); + MOCK_METHOD1(StoragePoolset, bool( + const Poolset &data)); MOCK_METHOD1(StorageLogicalPool, bool( const LogicalPool &data)); MOCK_METHOD1(StoragePhysicalPool, bool( @@ -139,6 +150,8 @@ class MockStorage : public TopologyStorage { MOCK_METHOD1(StorageCopySet, bool( const ::curve::mds::topology::CopySetInfo &data)); + MOCK_METHOD1(DeletePoolset, bool(PoolsetIdType + id)); MOCK_METHOD1(DeleteLogicalPool, bool(PoolIdType id)); MOCK_METHOD1(DeletePhysicalPool, bool(PoolIdType diff --git a/test/mds/schedule/schedulerPOC/scheduler_poc.cpp b/test/mds/schedule/schedulerPOC/scheduler_poc.cpp index f6fdfa107c..b8b3ddb148 100644 --- a/test/mds/schedule/schedulerPOC/scheduler_poc.cpp +++ b/test/mds/schedule/schedulerPOC/scheduler_poc.cpp @@ -44,16 +44,16 @@ using ::curve::mds::topology::MockTopology; -using ::curve::mds::topology::Server; using ::curve::mds::topology::ChunkServer; -using ::curve::mds::topology::CopySetInfo; using ::curve::mds::topology::ChunkServerState; +using ::curve::mds::topology::CopySetInfo; +using ::curve::mds::topology::Server; using ::curve::mds::topology::TopologyOption; -using ::curve::mds::topology::ZoneIdType; -using ::curve::mds::topology::ServerIdType; using ::curve::mds::topology::ChunkServerIdType; using ::curve::mds::topology::LogicalPoolType; +using ::curve::mds::topology::ServerIdType; +using ::curve::mds::topology::ZoneIdType; using ::curve::mds::topology::ChunkServerStatus; using ::curve::mds::topology::OnlineState; @@ -62,14 +62,14 @@ using ::curve::mds::topology::ChunkServerFilter; using ::curve::mds::topology::CopySetFilter; using ::curve::mds::copyset::ChunkServerInfo; -using ::curve::mds::copyset::CopysetPolicy; +using ::curve::mds::copyset::ClusterInfo; +using ::curve::mds::copyset::Copyset; +using ::curve::mds::copyset::CopysetConstrait; +using ::curve::mds::copyset::CopysetManager; using ::curve::mds::copyset::CopysetPermutationPolicy; using ::curve::mds::copyset::CopysetPermutationPolicyNXX; +using ::curve::mds::copyset::CopysetPolicy; using ::curve::mds::copyset::CopysetZoneShufflePolicy; -using ::curve::mds::copyset::Copyset; -using ::curve::mds::copyset::ClusterInfo; -using ::curve::mds::copyset::CopysetManager; -using ::curve::mds::copyset::CopysetConstrait; namespace curve { namespace mds { namespace schedule { @@ -78,10 +78,10 @@ class FakeTopologyStat; class FakeTopo : public ::curve::mds::topology::TopologyImpl { public: - FakeTopo() : TopologyImpl( - std::make_shared(), - std::make_shared(), - std::make_shared()) {} + FakeTopo() + : TopologyImpl(std::make_shared(), + std::make_shared(), + std::make_shared()) {} void BuildMassiveTopo() { constexpr int serverNum = 9; @@ -91,18 +91,20 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { // gen server for (int i = 1; i <= serverNum; i++) { - std::string internalHostIP = "10.192.0." + std::to_string(i+1); - serverMap_[i]= Server(static_cast(i), "", - internalHostIP, 0, "", 0, i % zoneNum + 1, 1, ""); + std::string internalHostIP = "10.192.0." + std::to_string(i + 1); + serverMap_[i] = + Server(static_cast(i), "", internalHostIP, 0, "", + 0, i % zoneNum + 1, 1, ""); } // gen chunkserver for (int i = 1; i <= serverNum; i++) { for (int j = 1; j <= diskNumPerServer; j++) { - ChunkServerIdType id = j + diskNumPerServer * (i-1); - ChunkServer chunkserver(static_cast(id), - "", "sata", i, serverMap_[i].GetInternalHostIp(), 9000+j, - "", ChunkServerStatus::READWRITE); + ChunkServerIdType id = j + diskNumPerServer * (i - 1); + ChunkServer chunkserver( + static_cast(id), "", "sata", i, + serverMap_[i].GetInternalHostIp(), 9000 + j, "", + ChunkServerStatus::READWRITE); chunkserver.SetOnlineState(OnlineState::ONLINE); chunkServerMap_[id] = chunkserver; } @@ -133,15 +135,16 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { ::curve::mds::topology::CopySetInfo info(0, id++); info.SetCopySetMembers(it.replicas); info.SetLeader(*it.replicas.begin()); - copySetMap_[info.GetCopySetKey()] = info; + copySetMap_[info.GetCopySetKey()] = info; } logicalPoolSet_.insert(0); } - std::vector GetLogicalPoolInCluster( - LogicalPoolFilter filter = [](const LogicalPool&) { - return true;}) const override { + std::vector + GetLogicalPoolInCluster(LogicalPoolFilter filter = [](const LogicalPool &) { + return true; + }) const override { std::vector ret; for (auto lid : logicalPoolSet_) { ret.emplace_back(lid); @@ -149,35 +152,34 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { return ret; } - std::vector GetChunkServerInCluster( - ChunkServerFilter filter = [](const ChunkServer&) { - return true;}) const override { + std::vector + GetChunkServerInCluster(ChunkServerFilter filter = [](const ChunkServer &) { + return true; + }) const override { std::vector ret; - for (auto it = chunkServerMap_.begin(); - it != chunkServerMap_.end(); - it++) { + for (auto it = chunkServerMap_.begin(); it != chunkServerMap_.end(); + it++) { ret.emplace_back(it->first); } return ret; } std::list GetChunkServerInLogicalPool( - PoolIdType id, - ChunkServerFilter filter = [](const ChunkServer&) { - return true;}) const override { + PoolIdType id, ChunkServerFilter filter = [](const ChunkServer &) { + return true; + }) const override { std::list ret; - for (auto it = chunkServerMap_.begin(); - it != chunkServerMap_.end(); - it++) { + for (auto it = chunkServerMap_.begin(); it != chunkServerMap_.end(); + it++) { ret.emplace_back(it->first); } return ret; } std::list GetChunkServerInServer( - ServerIdType id, - ChunkServerFilter filter = [](const ChunkServer&) { - return true;}) const override { + ServerIdType id, ChunkServerFilter filter = [](const ChunkServer &) { + return true; + }) const override { std::list res; for (auto it : chunkServerMap_) { if (it.second.GetServerId() == id) { @@ -188,8 +190,9 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { } std::vector GetCopySetsInCluster( - CopySetFilter filter = [](const ::curve::mds::topology::CopySetInfo&) { - return true;}) const override { + CopySetFilter filter = [](const ::curve::mds::topology::CopySetInfo &) { + return true; + }) const override { std::vector ret; for (auto it : copySetMap_) { ret.emplace_back(it.first); @@ -199,8 +202,9 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { std::vector GetCopySetsInChunkServer( ChunkServerIdType csId, - CopySetFilter filter = [](const ::curve::mds::topology::CopySetInfo&) { - return true;}) const override { + CopySetFilter filter = [](const ::curve::mds::topology::CopySetInfo &) { + return true; + }) const override { std::vector ret; for (auto it : copySetMap_) { if (it.second.GetCopySetMembers().count(csId) > 0) { @@ -211,9 +215,11 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { } std::vector<::curve::mds::topology::CopySetInfo> - GetCopySetInfosInLogicalPool(PoolIdType logicalPoolId, - CopySetFilter filter = [](const ::curve::mds::topology::CopySetInfo&) { - return true;}) const override { + GetCopySetInfosInLogicalPool( + PoolIdType logicalPoolId, + CopySetFilter filter = [](const ::curve::mds::topology::CopySetInfo &) { + return true; + }) const override { std::vector<::curve::mds::topology::CopySetInfo> ret; for (auto it : copySetMap_) { if (it.first.first == logicalPoolId) { @@ -234,7 +240,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { } bool GetCopySet(::curve::mds::topology::CopySetKey key, - ::curve::mds::topology::CopySetInfo *out) const override { + ::curve::mds::topology::CopySetInfo *out) const override { auto it = copySetMap_.find(key); if (it != copySetMap_.end()) { *out = it->second; @@ -244,8 +250,8 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { } } - bool GetChunkServer( - ChunkServerIdType chunkserverId, ChunkServer *out) const override { + bool GetChunkServer(ChunkServerIdType chunkserverId, + ChunkServer *out) const override { auto it = chunkServerMap_.find(chunkserverId); if (it != chunkServerMap_.end()) { *out = it->second; @@ -260,15 +266,15 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { rap.pageFileRAP.replicaNum = 3; rap.pageFileRAP.zoneNum = 3; - LogicalPool pool(0, "logicalpool-0", 1, LogicalPoolType::PAGEFILE, - rap, LogicalPool::UserPolicy{}, 0, true, true); + LogicalPool pool(0, "logicalpool-0", 1, LogicalPoolType::PAGEFILE, rap, + LogicalPool::UserPolicy{}, 0, true, true); pool.SetScatterWidth(100); *out = pool; return true; } - int UpdateChunkServerOnlineState( - const OnlineState &onlineState, ChunkServerIdType id) override { + int UpdateChunkServerOnlineState(const OnlineState &onlineState, + ChunkServerIdType id) override { auto it = chunkServerMap_.find(id); if (it == chunkServerMap_.end()) { return -1; @@ -279,7 +285,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { } int UpdateChunkServerRwState(const ChunkServerStatus &rwStatus, - ChunkServerIdType id) { + ChunkServerIdType id) { auto it = chunkServerMap_.find(id); if (it == chunkServerMap_.end()) { return -1; @@ -312,13 +318,15 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { class FakeTopologyServiceManager : public TopologyServiceManager { public: - FakeTopologyServiceManager() : - TopologyServiceManager(std::make_shared(), - std::static_pointer_cast( - std::make_shared( - std::make_shared())), nullptr, - std::make_shared( - ::curve::mds::copyset::CopysetOption{}), nullptr) {} + FakeTopologyServiceManager() + : TopologyServiceManager(std::make_shared(), + std::static_pointer_cast( + std::make_shared( + std::make_shared())), + nullptr, + std::make_shared( + ::curve::mds::copyset::CopysetOption{}), + nullptr) {} bool CreateCopysetNodeOnChunkServer( ChunkServerIdType csId, @@ -332,7 +340,7 @@ class FakeTopologyStat : public TopologyStat { explicit FakeTopologyStat(const std::shared_ptr &topo) : topo_(topo) {} void UpdateChunkServerStat(ChunkServerIdType csId, - const ChunkServerStat &stat) {} + const ChunkServerStat &stat) {} bool GetChunkServerStat(ChunkServerIdType csId, ChunkServerStat *stat) { if (!leaderCountOn) { @@ -352,8 +360,7 @@ class FakeTopologyStat : public TopologyStat { stat->leaderCount = leaderCount; return true; } - bool GetChunkPoolSize(PoolIdType pId, - uint64_t *chunkPoolSize) { + bool GetChunkPoolSize(PoolIdType pId, uint64_t *chunkPoolSize) { return true; } @@ -439,10 +446,9 @@ class CopysetSchedulerPOC : public testing::Test { } variance /= factorMap.size(); LOG(INFO) << "###print scatter-with in online chunkserver###\n" - << "均值:" << avg - << ", 方差:" << variance - << ", 标准差: " << std::sqrt(variance) - << ", 最大值:(" << max << "," << maxId << ")" + << "均值:" << avg << ", 方差:" << variance << ", 标准差: " + << std::sqrt(variance) << ", 最大值:(" << max << "," << maxId + << ")" << ", 最小值:(" << min << "," << minId << ")"; } @@ -480,10 +486,9 @@ class CopysetSchedulerPOC : public testing::Test { } variance /= factorMap.size(); LOG(INFO) << "###print scatter-with in cluster###\n" - << "均值:" << avg - << ", 方差:" << variance - << ", 标准差: " << std::sqrt(variance) - << ", 最大值:(" << max << "," << maxId << ")" + << "均值:" << avg << ", 方差:" << variance << ", 标准差: " + << std::sqrt(variance) << ", 最大值:(" << max << "," << maxId + << ")" << ", 最小值:(" << min << "," << minId << ")"; } @@ -523,17 +528,16 @@ class CopysetSchedulerPOC : public testing::Test { // 打印方差 float avg = static_cast(sumNumber) / - static_cast(numberMap.size()); + static_cast(numberMap.size()); float variance = 0; for (auto it : numberMap) { variance += std::pow(it.second - avg, 2); } variance /= numberMap.size(); LOG(INFO) << "###print copyset-num in online chunkserver###\n" - << "均值:" << avg - << ", 方差:" << variance - << ", 标准差: " << std::sqrt(variance) - << ", 最大值:(" << max << "," << maxId << ")" + << "均值:" << avg << ", 方差:" << variance << ", 标准差: " + << std::sqrt(variance) << ", 最大值:(" << max << "," << maxId + << ")" << "), 最小值:(" << min << "," << minId << ")"; } @@ -559,18 +563,16 @@ class CopysetSchedulerPOC : public testing::Test { // 打印方差 float avg = static_cast(sumNumber) / - static_cast(numberMap.size()); + static_cast(numberMap.size()); float variance = 0; for (auto it : numberMap) { variance += std::pow(it.second - avg, 2); } variance /= numberMap.size(); LOG(INFO) << "###print copyset-num in cluster###\n" - << "均值:" << avg - << ", 方差:" << variance - << ", 标准差: " << std::sqrt(variance) - << ", 最大值: " << max - << ", 最小值:" << min; + << "均值:" << avg << ", 方差:" << variance << ", 标准差: " + << std::sqrt(variance) << ", 最大值: " << max << ", 最小值:" + << min; } void PrintLeaderCountInChunkServer(PoolIdType lid = 0) { @@ -598,22 +600,21 @@ class CopysetSchedulerPOC : public testing::Test { sumNumber += out.leaderCount; LOG(INFO) << "PRINT chunkserverid:" << it - << ", leader num:" << out.leaderCount; + << ", leader num:" << out.leaderCount; } } float avg = static_cast(sumNumber) / - static_cast(leaderDistribute.size()); + static_cast(leaderDistribute.size()); float variance = 0; for (auto it : leaderDistribute) { variance += std::pow(it.second - avg, 2); } variance /= leaderDistribute.size(); LOG(INFO) << "###print leader-num in cluster###\n" - << "均值:" << avg - << ", 方差:" << variance - << ", 标准差: " << std::sqrt(variance) - << ", 最大值:(" << max << "," << maxId << ")" + << "均值:" << avg << ", 方差:" << variance << ", 标准差: " + << std::sqrt(variance) << ", 最大值:(" << max << "," << maxId + << ")" << "), 最小值:(" << min << "," << minId << ")"; } @@ -703,36 +704,33 @@ class CopysetSchedulerPOC : public testing::Test { } void BuildLeaderScheduler(int opConcurrent) { - topoAdapter_ = std::make_shared( + topoAdapter_ = std::make_shared( topo_, std::make_shared(), topoStat_); - opController_ = - std::make_shared( - opConcurrent, std::make_shared(topo_)); + opController_ = std::make_shared( + opConcurrent, std::make_shared(topo_)); - leaderScheduler_ = std::make_shared( - opt, topoAdapter_, opController_); + leaderScheduler_ = + std::make_shared(opt, topoAdapter_, opController_); } void BuildRapidLeaderScheduler(int opConcurrent) { - topoAdapter_ = std::make_shared( + topoAdapter_ = std::make_shared( topo_, std::make_shared(), topoStat_); - opController_ = - std::make_shared( - opConcurrent, std::make_shared(topo_)); + opController_ = std::make_shared( + opConcurrent, std::make_shared(topo_)); rapidLeaderScheduler_ = std::make_shared( opt, topoAdapter_, opController_, 0); } void BuilRecoverScheduler(int opConcurrent) { - topoAdapter_ = std::make_shared( + topoAdapter_ = std::make_shared( topo_, std::make_shared(), topoStat_); - opController_ = - std::make_shared( - opConcurrent, std::make_shared(topo_)); + opController_ = std::make_shared( + opConcurrent, std::make_shared(topo_)); recoverScheduler_ = std::make_shared( opt, topoAdapter_, opController_); @@ -743,8 +741,7 @@ class CopysetSchedulerPOC : public testing::Test { opt, topoAdapter_, opController_); } - void ApplyOperatorsInOpController( - const std::set &list) { + void ApplyOperatorsInOpController(const std::set &list) { std::vector keys; for (auto op : opController_->GetOperators()) { auto type = dynamic_cast(op.step.get()); @@ -860,9 +857,11 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_1) { // 均值:97.9556, 方差:11.5314, 标准差: 3.39579, 最大值:106, 最小值:88 // ###print copyset-num in cluster### // 均值:100, 方差:0, 标准差: 0, 最大值: 100, 最小值:100 - // ==========================恢复之后的状态================================= //NOLINT + // ==========================恢复之后的状态================================= + // //NOLINT // ###print scatter-with in online chunkserver### - // 均值:均值:98.8156, 方差:10.3403, 标准差: 3.21564, 最大值:106, 最小值:95 //NOLINT + // 均值:均值:98.8156, 方差:10.3403, 标准差: 3.21564, 最大值:106, + // 最小值:95 //NOLINT // ###print scatter-with in cluster### // 均值:98.2667, 方差:64.2289, 标准差: 8.0143, 最大值:106, 最小值:0 // ###print copyset-num in online chunkserver### @@ -923,7 +922,7 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_2) { TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_3) { // 测试一个chunkserver offline恢复过程中,接连有5个chunkserver offline // 1. 创建recoverScheduler - BuilRecoverScheduler(1); + BuilRecoverScheduler(1); // 2. 任意选择两个chunkserver处于offline状态 std::set idlist; @@ -948,7 +947,7 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_3) { } } - ApplyOperatorsInOpController(idlist); + ApplyOperatorsInOpController(idlist); } while (!SatisfyStopCondition(idlist)); // 4. 打印最终的scatter-with @@ -977,7 +976,7 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_3) { TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_4) { // 测试20个chunkserver 接连 offline // 1. 创建recoverScheduler - BuilRecoverScheduler(1); + BuilRecoverScheduler(1); // 2. 任意选择两个chunkserver处于offline状态 std::set idlist; @@ -1002,7 +1001,7 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_4) { } } - ApplyOperatorsInOpController(idlist); + ApplyOperatorsInOpController(idlist); } while (!SatisfyStopCondition(idlist)); // 4. 打印最终的scatter-with @@ -1037,7 +1036,8 @@ TEST_F(CopysetSchedulerPOC, test_chunkserver_offline_over_concurrency) { ASSERT_EQ(targetOpNum, opNum); } -TEST_F(CopysetSchedulerPOC, test_scatterwith_after_copysetRebalance_1) { //NOLINT +TEST_F(CopysetSchedulerPOC, + test_scatterwith_after_copysetRebalance_1) { // NOLINT // 测试一个chunkserver offline, 集群回迁的情况 // 1. 一个chunkserver offline后恢复 @@ -1084,7 +1084,7 @@ TEST_F(CopysetSchedulerPOC, test_scatterwith_after_copysetRebalance_1) { //NOLIN PrintCopySetNumInLogicalPool(); LOG(INFO) << "offline one:" << choose; ASSERT_TRUE(GetChunkServerScatterwith(choose) <= - minScatterwidth_ * (1 + scatterwidthPercent_)); + minScatterwidth_ * (1 + scatterwidthPercent_)); ASSERT_TRUE(GetChunkServerScatterwith(choose) >= minScatterwidth_); // ============================结果==================================== @@ -1095,7 +1095,8 @@ TEST_F(CopysetSchedulerPOC, test_scatterwith_after_copysetRebalance_1) { //NOLIN // 均值:100, 方差:0.5, 标准差: 0.707107, 最大值: 101, 最小值:91 } -TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_copysetRebalance_2) { //NOLINT +TEST_F(CopysetSchedulerPOC, + DISABLED_test_scatterwith_after_copysetRebalance_2) { // NOLINT // 测试一个chunkserver offline恢复过程中,另一个chunkserver offline // 集群回迁的情况 @@ -1146,7 +1147,8 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_copysetRebalance_2) int removeOne = 0; do { removeOne = copySetScheduler_->Schedule(); - ApplyOperatorsInOpController(std::set{removeOne}); + ApplyOperatorsInOpController(std::set{ + static_cast(removeOne)}); } while (removeOne > 0); PrintScatterWithInLogicalPool(); PrintCopySetNumInLogicalPool(); @@ -1158,7 +1160,8 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_copysetRebalance_2) // 均值:100, 方差:1, 标准差: 1, 最大值: 101, 最小值:91 } -TEST_F(CopysetSchedulerPOC, test_scatterwith_after_copysetRebalance_3) { //NOLINT +TEST_F(CopysetSchedulerPOC, + test_scatterwith_after_copysetRebalance_3) { // NOLINT // 测试一个chunkserver offline恢复过程中,接连有5个chunkserver offline // 回迁的情况 @@ -1187,7 +1190,7 @@ TEST_F(CopysetSchedulerPOC, test_scatterwith_after_copysetRebalance_3) { //NOLIN } } - ApplyOperatorsInOpController(idlist); + ApplyOperatorsInOpController(idlist); } while (!SatisfyStopCondition(idlist)); PrintScatterWithInOnlineChunkServer(); PrintScatterWithInLogicalPool(); @@ -1228,7 +1231,7 @@ TEST_F(CopysetSchedulerPOC, test_scatterwith_after_copysetRebalance_3) { //NOLIN for (auto choose : idlist) { ASSERT_TRUE(GetChunkServerScatterwith(choose) <= - minScatterwidth_ * (1 + scatterwidthPercent_)); + minScatterwidth_ * (1 + scatterwidthPercent_)); ASSERT_TRUE(GetChunkServerScatterwith(choose) >= minScatterwidth_); } @@ -1240,8 +1243,10 @@ TEST_F(CopysetSchedulerPOC, test_scatterwith_after_copysetRebalance_3) { //NOLIN // 均值:100, 方差:1, 标准差: 1, 最大值: 101, 最小值:91 } -TEST_F(CopysetSchedulerPOC, test_scatterwith_after_copysetRebalance_4) { //NOLINT - // set one chunkserver status from online to pendding, and the copyset on it will schedule out //NOLINT +TEST_F(CopysetSchedulerPOC, + test_scatterwith_after_copysetRebalance_4) { // NOLINT + // set one chunkserver status from online to pendding, and the copyset on it + // will schedule out //NOLINT // set one chunkserver status to pendding auto chunkserverlist = topo_->GetChunkServerInServer(1); @@ -1257,8 +1262,7 @@ TEST_F(CopysetSchedulerPOC, test_scatterwith_after_copysetRebalance_4) { //NOLIN removeOne = copySetScheduler_->Schedule(); opNum += removeOne; if (removeOne > 0) { - ApplyOperatorsInOpController( - std::set{target}); + ApplyOperatorsInOpController(std::set{target}); } } while (removeOne > 0); ASSERT_EQ(opNum, targetOpNum); @@ -1267,8 +1271,10 @@ TEST_F(CopysetSchedulerPOC, test_scatterwith_after_copysetRebalance_4) { //NOLIN PrintCopySetNumInOnlineChunkServer(); } -TEST_F(CopysetSchedulerPOC, test_scatterwith_after_copysetRebalance_5) { //NOLINT - // set two chunkserver status from online to pendding, and the copyset on it will schedule out //NOLINT +TEST_F(CopysetSchedulerPOC, + test_scatterwith_after_copysetRebalance_5) { // NOLINT + // set two chunkserver status from online to pendding, and the copyset on it + // will schedule out //NOLINT // set two chunkserver status to pendding auto chunkserverlist = topo_->GetChunkServerInServer(1); diff --git a/test/mds/server/mds_test.cpp b/test/mds/server/mds_test.cpp index 93c2207cfa..7708ac1280 100644 --- a/test/mds/server/mds_test.cpp +++ b/test/mds/server/mds_test.cpp @@ -64,7 +64,7 @@ class MDSTest : public ::testing::Test { } // 一定时间内尝试init直到etcd完全起来 auto client = std::make_shared(); - EtcdConf conf = { kEtcdAddr, strlen(kEtcdAddr), 1000 }; + EtcdConf conf = {kEtcdAddr, static_cast(strlen(kEtcdAddr)), 1000}; uint64_t now = ::curve::common::TimeUtility::GetTimeofDaySec(); bool initSuccess = false; while (::curve::common::TimeUtility::GetTimeofDaySec() - now <= 5) { @@ -176,7 +176,7 @@ TEST_F(MDSTest, common) { request1.set_token("123"); request1.set_ip("127.0.0.1"); request1.set_port(8888); - heartbeat::DiskState* diskState = new heartbeat::DiskState(); + heartbeat::DiskState *diskState = new heartbeat::DiskState(); diskState->set_errtype(0); diskState->set_errmsg(""); request1.set_allocated_diskstate(diskState); @@ -217,5 +217,42 @@ TEST_F(MDSTest, common) { ASSERT_LE(stopTime - startTime, 100); } +TEST(TestParsePoolsetRules, Test) { + std::map rules; + + { + ASSERT_TRUE(ParsePoolsetRules("", &rules)); + ASSERT_TRUE(rules.empty()); + } + + { + ASSERT_TRUE(ParsePoolsetRules("/:hello", &rules)); + ASSERT_EQ(1, rules.size()); + ASSERT_EQ("hello", rules["/"]); + } + + { + ASSERT_TRUE(ParsePoolsetRules("/system/:system;/data/:data", &rules)); + ASSERT_EQ(2, rules.size()); + ASSERT_EQ("system", rules["/system/"]); + ASSERT_EQ("data", rules["/data/"]); + } + + { + // key must starts and ends with '/' + ASSERT_FALSE(ParsePoolsetRules("/system:system;/data/:data", &rules)); + } + + { + // subdir rules + ASSERT_TRUE(ParsePoolsetRules( + "/system/:system;/data/:data;/system/sub/:system-sub", &rules)); + ASSERT_EQ(3, rules.size()); + ASSERT_EQ("system", rules["/system/"]); + ASSERT_EQ("data", rules["/data/"]); + ASSERT_EQ("system-sub", rules["/system/sub/"]); + } +} + } // namespace mds } // namespace curve diff --git a/test/mds/snapshotcloneclient/test_snapshotclone_client.cpp b/test/mds/snapshotcloneclient/test_snapshotclone_client.cpp index d2f62b6d9e..40d785c371 100644 --- a/test/mds/snapshotcloneclient/test_snapshotclone_client.cpp +++ b/test/mds/snapshotcloneclient/test_snapshotclone_client.cpp @@ -90,7 +90,6 @@ TEST_F(TestSnapshotCloneClient, TestInitSuccess) { } TEST_F(TestSnapshotCloneClient, TestInitFalse) { - uint32_t port = listenAddr_.port; option.snapshotCloneAddr = ""; client_->Init(option); ASSERT_FALSE(client_->GetInitStatus()); @@ -107,7 +106,6 @@ TEST_F(TestSnapshotCloneClient, TestGetCloneRefStatusFalseNotInit) { } TEST_F(TestSnapshotCloneClient, TestGetCloneRefStatusFalseConnectFail) { - uint32_t port = listenAddr_.port; option.snapshotCloneAddr = "aa"; client_->Init(option); ASSERT_TRUE(client_->GetInitStatus()); @@ -122,7 +120,6 @@ TEST_F(TestSnapshotCloneClient, TestGetCloneRefStatusFalseConnectFail) { } TEST_F(TestSnapshotCloneClient, TestGetCloneRefStatusFalseCallFail) { - uint32_t port = listenAddr_.port; option.snapshotCloneAddr = "127.0.0.1:" + std::to_string(0); client_->Init(option); ASSERT_TRUE(client_->GetInitStatus()); @@ -216,7 +213,6 @@ TEST_F(TestSnapshotCloneClient, TestGetCloneRefStatusFalseInvalidStatus) { butil::IOBufBuilder os; Json::Value mainObj; mainObj[kCodeStr] = std::to_string(kErrCodeSuccess); - CloneRefStatus refStatus = CloneRefStatus::kNoRef; mainObj[kRefStatusStr] = 4; os << mainObj.toStyledString(); os.move_to(bcntl->response_attachment()); diff --git a/test/mds/topology/mock_topology.h b/test/mds/topology/mock_topology.h index 90a64e7ddb..90afab62e1 100644 --- a/test/mds/topology/mock_topology.h +++ b/test/mds/topology/mock_topology.h @@ -56,7 +56,8 @@ class MockIdGenerator : public TopologyIdGenerator { public: MockIdGenerator() {} ~MockIdGenerator() {} - + MOCK_METHOD1(initPoolsetIdGenerator, void(PoolsetIdType + idMax)); MOCK_METHOD1(initLogicalPoolIdGenerator, void(PoolIdType idMax)); MOCK_METHOD1(initPhysicalPoolIdGenerator, void(PoolIdType @@ -70,6 +71,7 @@ class MockIdGenerator : public TopologyIdGenerator { MOCK_METHOD1(initCopySetIdGenerator, void( const std::map &idMaxMap)); + MOCK_METHOD0(GenPoolsetId, PoolsetIdType()); MOCK_METHOD0(GenLogicalPoolId, PoolIdType()); MOCK_METHOD0(GenPhysicalPoolId, PoolIdType()); MOCK_METHOD0(GenZoneId, ZoneIdType()); @@ -92,6 +94,9 @@ class MockStorage : public TopologyStorage { MockStorage() {} ~MockStorage() {} + MOCK_METHOD2(LoadPoolset, + bool(std::unordered_map + *poolsetMap, PoolsetIdType * maxPoolsetId)); MOCK_METHOD2(LoadLogicalPool, bool(std::unordered_map *logicalPoolMap, PoolIdType * maxLogicalPoolId)); @@ -109,6 +114,8 @@ class MockStorage : public TopologyStorage { bool(std::map *copySetMap, std::map * copySetIdMaxMap)); + MOCK_METHOD1(StoragePoolset, bool( + const Poolset &data)); MOCK_METHOD1(StorageLogicalPool, bool( const LogicalPool &data)); MOCK_METHOD1(StoragePhysicalPool, bool( @@ -122,6 +129,9 @@ class MockStorage : public TopologyStorage { MOCK_METHOD1(StorageCopySet, bool( const CopySetInfo &data)); + + MOCK_METHOD1(DeletePoolset, bool(PoolsetIdType + id)); MOCK_METHOD1(DeleteLogicalPool, bool(PoolIdType id)); MOCK_METHOD1(DeletePhysicalPool, bool(PoolIdType @@ -146,7 +156,7 @@ class MockStorage : public TopologyStorage { MOCK_METHOD1(UpdateChunkServer, bool( const ChunkServer &data)); MOCK_METHOD1(UpdateCopySet, bool( - const CopySetInfo &data)); + const CopySetInfo &data)); MOCK_METHOD1(LoadClusterInfo, bool(std::vector *info)); @@ -243,6 +253,10 @@ class MockTopologyServiceManager : public TopologyServiceManager { const ListPhysicalPoolRequest *request, ListPhysicalPoolResponse *response)); + MOCK_METHOD2(ListPhysicalPoolsInPoolset, + void(const ListPhysicalPoolsInPoolsetRequest* request, + ListPhysicalPoolResponse* response)); + MOCK_METHOD2(CreateLogicalPool, void( const CreateLogicalPoolRequest *request, CreateLogicalPoolResponse *response)); @@ -266,6 +280,22 @@ class MockTopologyServiceManager : public TopologyServiceManager { void(const SetLogicalPoolScanStateRequest* request, SetLogicalPoolScanStateResponse* response)); + MOCK_METHOD2(CreatePoolset, void( + const PoolsetRequest *request, + PoolsetResponse *response)); + + MOCK_METHOD2(DeletePoolset, void( + const PoolsetRequest *request, + PoolsetResponse *response)); + + MOCK_METHOD2(GetPoolset, + void(const PoolsetRequest *request, + PoolsetResponse *response)); + + MOCK_METHOD2(ListPoolset, + void(const ListPoolsetRequest *request, + ListPoolsetResponse *response)); + MOCK_METHOD2(GetChunkServerListInCopySets, void( const GetChunkServerListInCopySetsRequest *request, GetChunkServerListInCopySetsResponse *response)); @@ -314,6 +344,12 @@ class MockTopologyServiceImpl : public TopologyService { const PhysicalPoolRequest* request, PhysicalPoolResponse* response, google::protobuf::Closure* done)); + + MOCK_METHOD4(CreatePoolset, + void(google::protobuf::RpcController* cntl_base, + const PoolsetRequest* request, + PoolsetResponse* response, + google::protobuf::Closure* done)); }; } // namespace topology diff --git a/test/mds/topology/test_topology.cpp b/test/mds/topology/test_topology.cpp index 9ee1eaedf9..686cd00fc9 100644 --- a/test/mds/topology/test_topology.cpp +++ b/test/mds/topology/test_topology.cpp @@ -26,6 +26,7 @@ #include "src/mds/topology/topology.h" #include "src/mds/topology/topology_item.h" #include "src/common/configuration.h" +#include "src/common/namespace_define.h" namespace curve { namespace mds { @@ -35,7 +36,10 @@ using ::testing::Return; using ::testing::_; using ::testing::Contains; using ::testing::SetArgPointee; +using ::testing::SaveArg; using ::curve::common::Configuration; +using ::curve::common::kDefaultPoolsetId; +using ::curve::common::kDefaultPoolsetName; class TestTopology : public ::testing::Test { protected: @@ -49,6 +53,17 @@ class TestTopology : public ::testing::Test { topology_ = std::make_shared(idGenerator_, tokenGenerator_, storage_); + + const std::unordered_map poolsetMap{ + {kDefaultPoolsetId, + {kDefaultPoolsetId, kDefaultPoolsetName, "", ""}} + }; + + ON_CALL(*storage_, LoadPoolset(_, _)) + .WillByDefault(DoAll( + SetArgPointee<0>(poolsetMap), + SetArgPointee<1>(static_cast(kDefaultPoolsetId)), + Return(true))); } virtual void TearDown() { @@ -59,6 +74,18 @@ class TestTopology : public ::testing::Test { } protected: + void PrepareAddPoolset(PoolsetIdType id = 0x61, + const std::string& name = "ssdPoolset1", + const std::string& type = "SSD", + const std::string& desc = "descPoolset") { + Poolset poolset(id, name, type, desc); + EXPECT_CALL(*storage_, StoragePoolset(_)) + .WillOnce(Return(true)); + + int ret = topology_->AddPoolset(poolset); + ASSERT_EQ(kTopoErrCodeSuccess, ret); + } + void PrepareAddLogicalPool(PoolIdType id = 0x01, const std::string &name = "testLogicalPool", PoolIdType phyPoolId = 0x11, @@ -89,10 +116,12 @@ class TestTopology : public ::testing::Test { void PrepareAddPhysicalPool(PoolIdType id = 0x11, const std::string &name = "testPhysicalPool", + PoolsetIdType pid = 0x61, const std::string &desc = "descPhysicalPool", uint64_t diskCapacity = 0) { PhysicalPool pool(id, name, + pid, desc); pool.SetDiskCapacity(diskCapacity); EXPECT_CALL(*storage_, StoragePhysicalPool(_)) @@ -194,6 +223,10 @@ TEST_F(TestTopology, test_init_success) { EXPECT_CALL(*storage_, StorageClusterInfo(_)) .WillOnce(Return(true)); + const std::unordered_map poolsetMap{ + {kDefaultPoolsetId, + {kDefaultPoolsetId, kDefaultPoolsetName, "", ""}} + }; std::unordered_map logicalPoolMap_; std::unordered_map physicalPoolMap_; std::unordered_map zoneMap_; @@ -205,7 +238,7 @@ TEST_F(TestTopology, test_init_success) { LogicalPool::RedundanceAndPlaceMentPolicy(), LogicalPool::UserPolicy(), 0, false, true); - physicalPoolMap_[0x11] = PhysicalPool(0x11, "pPool1", "des1"); + physicalPoolMap_[0x11] = PhysicalPool(0x11, "pPool1", 0X61, "des1"); zoneMap_[0x21] = Zone(0x21, "zone1", 0x11, "desc1"); serverMap_[0x31] = Server(0x31, "server1", "127.0.0.1", 8200, "127.0.0.1", 8200, 0x21, 0x11, "desc1"); @@ -214,6 +247,9 @@ TEST_F(TestTopology, test_init_success) { copySetMap_[std::pair(0x01, 0x51)] = CopySetInfo(0x01, 0x51); + EXPECT_CALL(*storage_, LoadPoolset(_, _)) + .WillOnce(DoAll(SetArgPointee<0>(poolsetMap), + Return(true))); EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) .WillOnce(DoAll(SetArgPointee<0>(logicalPoolMap_), Return(true))); @@ -233,6 +269,7 @@ TEST_F(TestTopology, test_init_success) { .WillOnce(DoAll(SetArgPointee<0>(copySetMap_), Return(true))); + EXPECT_CALL(*idGenerator_, initPoolsetIdGenerator(_)); EXPECT_CALL(*idGenerator_, initLogicalPoolIdGenerator(_)); EXPECT_CALL(*idGenerator_, initPhysicalPoolIdGenerator(_)); EXPECT_CALL(*idGenerator_, initZoneIdGenerator(_)); @@ -341,7 +378,6 @@ TEST_F(TestTopology, test_init_LoadServerFail) { EXPECT_CALL(*storage_, LoadClusterInfo(_)) .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) .WillOnce(Return(true)); EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) @@ -422,6 +458,7 @@ TEST_F(TestTopology, test_init_LoadCopysetFail) { } TEST_F(TestTopology, test_AddLogicalPool_success) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); LogicalPool pool(0x01, @@ -443,6 +480,7 @@ TEST_F(TestTopology, test_AddLogicalPool_success) { } TEST_F(TestTopology, test_AddLogicalPool_IdDuplicated) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); PoolIdType id = 0x01; @@ -464,6 +502,7 @@ TEST_F(TestTopology, test_AddLogicalPool_IdDuplicated) { } TEST_F(TestTopology, test_AddLogicalPool_StorageFail) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); LogicalPool pool(0x01, @@ -485,6 +524,7 @@ TEST_F(TestTopology, test_AddLogicalPool_StorageFail) { } TEST_F(TestTopology, test_AddLogicalPool_PhysicalPoolNotFound) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); LogicalPool pool(0x01, @@ -504,8 +544,10 @@ TEST_F(TestTopology, test_AddLogicalPool_PhysicalPoolNotFound) { } TEST_F(TestTopology, test_AddPhysicalPool_success) { + PrepareAddPoolset(); PhysicalPool pool(0x11, "test1", + 0X61, "desc"); EXPECT_CALL(*storage_, StoragePhysicalPool(_)) .WillOnce(Return(true)); @@ -516,10 +558,12 @@ TEST_F(TestTopology, test_AddPhysicalPool_success) { TEST_F(TestTopology, test_AddPhysicalPool_IdDuplicated) { + PrepareAddPoolset(); PoolIdType id = 0x11; - + PoolsetIdType pid = 0x61; PhysicalPool pool(id, "test1", + pid, "desc"); PrepareAddPhysicalPool(id); int ret = topology_->AddPhysicalPool(pool); @@ -527,8 +571,10 @@ TEST_F(TestTopology, test_AddPhysicalPool_IdDuplicated) { } TEST_F(TestTopology, test_AddPhysicalPool_StorageFail) { + PrepareAddPoolset(); PhysicalPool pool(0x11, "test1", + 0X61, "desc"); EXPECT_CALL(*storage_, StoragePhysicalPool(_)) .WillOnce(Return(false)); @@ -538,6 +584,7 @@ TEST_F(TestTopology, test_AddPhysicalPool_StorageFail) { } TEST_F(TestTopology, test_AddZone_success) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(physicalPoolId); @@ -563,6 +610,7 @@ TEST_F(TestTopology, test_AddZone_success) { } TEST_F(TestTopology, test_AddZone_IdDuplicated) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(physicalPoolId); @@ -578,6 +626,7 @@ TEST_F(TestTopology, test_AddZone_IdDuplicated) { } TEST_F(TestTopology, test_AddZone_StorageFail) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); @@ -595,6 +644,7 @@ TEST_F(TestTopology, test_AddZone_StorageFail) { } TEST_F(TestTopology, test_AddZone_PhysicalPoolNotFound) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; @@ -610,6 +660,7 @@ TEST_F(TestTopology, test_AddZone_PhysicalPoolNotFound) { } TEST_F(TestTopology, test_AddServer_success) { + PrepareAddPoolset(); ServerIdType id = 0x31; PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; @@ -641,6 +692,7 @@ TEST_F(TestTopology, test_AddServer_success) { } TEST_F(TestTopology, test_AddServer_IdDuplicated) { + PrepareAddPoolset(); ServerIdType id = 0x31; PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; @@ -664,6 +716,7 @@ TEST_F(TestTopology, test_AddServer_IdDuplicated) { } TEST_F(TestTopology, test_AddServer_StorageFail) { + PrepareAddPoolset(); ServerIdType id = 0x31; PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; @@ -690,6 +743,7 @@ TEST_F(TestTopology, test_AddServer_StorageFail) { TEST_F(TestTopology, test_AddServer_ZoneNotFound) { + PrepareAddPoolset(); ServerIdType id = 0x31; PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; @@ -710,6 +764,7 @@ TEST_F(TestTopology, test_AddServer_ZoneNotFound) { TEST_F(TestTopology, test_AddChunkServers_success) { + PrepareAddPoolset(); ChunkServerIdType csId = 0x41; ServerIdType serverId = 0x31; @@ -749,6 +804,7 @@ TEST_F(TestTopology, test_AddChunkServers_success) { } TEST_F(TestTopology, test_AddChunkServer_IdDuplicated) { + PrepareAddPoolset(); ChunkServerIdType csId = 0x41; ServerIdType serverId = 0x31; @@ -774,6 +830,7 @@ TEST_F(TestTopology, test_AddChunkServer_IdDuplicated) { } TEST_F(TestTopology, test_AddChunkServer_StorageFail) { + PrepareAddPoolset(); ChunkServerIdType csId = 0x41; ServerIdType serverId = 0x31; @@ -798,6 +855,7 @@ TEST_F(TestTopology, test_AddChunkServer_StorageFail) { } TEST_F(TestTopology, test_AddChunkServer_ServerNotFound) { + PrepareAddPoolset(); ChunkServerIdType csId = 0x41; ServerIdType serverId = 0x31; @@ -815,6 +873,7 @@ TEST_F(TestTopology, test_AddChunkServer_ServerNotFound) { } TEST_F(TestTopology, test_RemoveLogicalPool_success) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); PoolIdType id = 0x01; @@ -829,6 +888,7 @@ TEST_F(TestTopology, test_RemoveLogicalPool_success) { } TEST_F(TestTopology, test_RemoveLogicalPool_LogicalPoolNotFound) { + PrepareAddPoolset(); PoolIdType id = 0x01; int ret = topology_->RemoveLogicalPool(id); @@ -837,6 +897,7 @@ TEST_F(TestTopology, test_RemoveLogicalPool_LogicalPoolNotFound) { } TEST_F(TestTopology, test_RemoveLogicalPool_StorageFail) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); PoolIdType id = 0x01; @@ -851,6 +912,7 @@ TEST_F(TestTopology, test_RemoveLogicalPool_StorageFail) { } TEST_F(TestTopology, test_RemovePhysicalPool_success) { + PrepareAddPoolset(); PoolIdType poolId = 0x11; PrepareAddPhysicalPool(poolId); @@ -863,6 +925,7 @@ TEST_F(TestTopology, test_RemovePhysicalPool_success) { } TEST_F(TestTopology, test_RemovePhysicalPool_PhysicalPoolNotFound) { + PrepareAddPoolset(); PoolIdType poolId = 0x11; int ret = topology_->RemovePhysicalPool(poolId); @@ -871,6 +934,7 @@ TEST_F(TestTopology, test_RemovePhysicalPool_PhysicalPoolNotFound) { } TEST_F(TestTopology, test_RemovePhysicalPool_StorageFail) { + PrepareAddPoolset(); PoolIdType poolId = 0x11; PrepareAddPhysicalPool(poolId); @@ -883,6 +947,7 @@ TEST_F(TestTopology, test_RemovePhysicalPool_StorageFail) { } TEST_F(TestTopology, test_RemoveZone_success) { + PrepareAddPoolset(); ZoneIdType zoneId = 0x21; PoolIdType poolId = 0x11; PrepareAddPhysicalPool(poolId); @@ -911,6 +976,7 @@ TEST_F(TestTopology, test_RemoveZone_ZoneNotFound) { } TEST_F(TestTopology, test_RemoveZone_StorageFail) { + PrepareAddPoolset(); ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(); PrepareAddZone(zoneId); @@ -923,6 +989,7 @@ TEST_F(TestTopology, test_RemoveZone_StorageFail) { } TEST_F(TestTopology, test_RemoveServer_success) { + PrepareAddPoolset(); ServerIdType serverId = 0x31; ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(); @@ -957,6 +1024,7 @@ TEST_F(TestTopology, test_RemoveSever_ServerNotFound) { } TEST_F(TestTopology, test_RemoveServer_StorageFail) { + PrepareAddPoolset(); ServerIdType serverId = 0x31; ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(); @@ -979,7 +1047,7 @@ TEST_F(TestTopology, test_RemoveServer_StorageFail) { TEST_F(TestTopology, test_RemoveChunkServer_success) { ChunkServerIdType csId = 0x41; ServerIdType serverId = 0x31; - + PrepareAddPoolset(); PrepareAddPhysicalPool(); PrepareAddZone(); PrepareAddServer(serverId); @@ -1017,7 +1085,7 @@ TEST_F(TestTopology, test_RemoveChunkServer_ChunkSeverNotFound) { TEST_F(TestTopology, test_RemoveChunkServer_StorageFail) { ChunkServerIdType csId = 0x41; ServerIdType serverId = 0x31; - + PrepareAddPoolset(); PrepareAddPhysicalPool(); PrepareAddZone(); PrepareAddServer(serverId); @@ -1040,6 +1108,7 @@ TEST_F(TestTopology, test_RemoveChunkServer_StorageFail) { } TEST_F(TestTopology, UpdateLogicalPool_success) { + PrepareAddPoolset(); PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); @@ -1092,6 +1161,7 @@ TEST_F(TestTopology, UpdateLogicalPool_LogicalPoolNotFound) { } TEST_F(TestTopology, UpdateLogicalPool_StorageFail) { + PrepareAddPoolset(); PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); @@ -1122,6 +1192,7 @@ TEST_F(TestTopology, UpdateLogicalPool_StorageFail) { } TEST_F(TestTopology, UpdateLogicalPoolAllocateStatus_success) { + PrepareAddPoolset(); PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); @@ -1184,6 +1255,7 @@ TEST_F(TestTopology, UpdateLogicalPoolAllocateStatus_LogicalPoolNotFound) { } TEST_F(TestTopology, UpdateLogicalPoolAllocateStatus_StorageFail) { + PrepareAddPoolset(); PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); @@ -1205,6 +1277,7 @@ TEST_F(TestTopology, UpdateLogicalPoolAllocateStatus_StorageFail) { } TEST_F(TestTopology, TestUpdateLogicalPoolScanState) { + PrepareAddPoolset(); PoolIdType lpid = 1; // logicalPoolId PoolIdType ppid = 1; // physicalPoolId PrepareAddPhysicalPool(ppid); @@ -1248,13 +1321,17 @@ TEST_F(TestTopology, TestUpdateLogicalPoolScanState) { } TEST_F(TestTopology, UpdatePhysicalPool_success) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; + PoolsetIdType poolsetId = 0x61; PrepareAddPhysicalPool(physicalPoolId, "name1", + poolsetId, "desc1"); PhysicalPool newPool(physicalPoolId, "name1", + poolsetId, "desc2"); EXPECT_CALL(*storage_, UpdatePhysicalPool(_)) @@ -1270,9 +1347,10 @@ TEST_F(TestTopology, UpdatePhysicalPool_success) { TEST_F(TestTopology, UpdatePhysicalPool_PhysicalPoolNotFound) { PoolIdType physicalPoolId = 0x11; - + PoolIdType pid = 0x61; PhysicalPool newPool(physicalPoolId, "name1", + pid, "desc2"); int ret = topology_->UpdatePhysicalPool(newPool); @@ -1281,13 +1359,17 @@ TEST_F(TestTopology, UpdatePhysicalPool_PhysicalPoolNotFound) { TEST_F(TestTopology, UpdatePhysicalPool_StorageFail) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; + PoolsetIdType poolsetId = 0x61; PrepareAddPhysicalPool(physicalPoolId, "name1", + poolsetId, "desc1"); PhysicalPool newPool(physicalPoolId, "name1", + poolsetId, "desc2"); EXPECT_CALL(*storage_, UpdatePhysicalPool(_)) @@ -1300,6 +1382,7 @@ TEST_F(TestTopology, UpdatePhysicalPool_StorageFail) { TEST_F(TestTopology, UpdateZone_success) { + PrepareAddPoolset(); ZoneIdType zoneId = 0x21; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); @@ -1334,6 +1417,7 @@ TEST_F(TestTopology, UpdateZone_ZoneNotFound) { } TEST_F(TestTopology, UpdateZone_StorageFail) { + PrepareAddPoolset(); ZoneIdType zoneId = 0x21; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); @@ -1354,6 +1438,7 @@ TEST_F(TestTopology, UpdateZone_StorageFail) { } TEST_F(TestTopology, UpdateServer_success) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; ServerIdType serverId = 0x31; @@ -1406,6 +1491,7 @@ TEST_F(TestTopology, UpdateServer_ServerNotFound) { } TEST_F(TestTopology, UpdateServer_StorageFail) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; ServerIdType serverId = 0x31; @@ -1440,6 +1526,7 @@ TEST_F(TestTopology, UpdateServer_StorageFail) { TEST_F(TestTopology, UpdateChunkServerTopo_success) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; ServerIdType serverId = 0x31; @@ -1470,6 +1557,7 @@ TEST_F(TestTopology, UpdateChunkServerTopo_success) { } TEST_F(TestTopology, UpdateChunkServerTopo_UpdateServerSuccess) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; ServerIdType serverId = 0x31; @@ -1520,6 +1608,7 @@ TEST_F(TestTopology, UpdateChunkServerTopo_ChunkServerNotFound) { } TEST_F(TestTopology, UpdateChunkServerTopo_StorageFail) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; ServerIdType serverId = 0x31; @@ -1550,6 +1639,7 @@ TEST_F(TestTopology, UpdateChunkServerTopo_StorageFail) { } TEST_F(TestTopology, UpdateChunkServerDiskStatus_success) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; ServerIdType serverId = 0x31; @@ -1598,6 +1688,7 @@ TEST_F(TestTopology, UpdateChunkServerDiskStatus_ChunkServerNotFound) { } TEST_F(TestTopology, UpdateChunkServerRwStateToStorage_success) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; ServerIdType serverId = 0x31; @@ -1626,6 +1717,7 @@ TEST_F(TestTopology, UpdateChunkServerRwStateToStorage_success) { } TEST_F(TestTopology, UpdateChunkServerRwStateTestPhysicalPoolCapacity_success) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; ServerIdType serverId = 0x31; @@ -1702,6 +1794,7 @@ TEST_F(TestTopology, UpdateChunkServerRwState_ChunkServerNotFound) { } TEST_F(TestTopology, UpdateChunkServerStartUpTime_success) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; ServerIdType serverId = 0x31; @@ -1730,6 +1823,7 @@ TEST_F(TestTopology, UpdateChunkServerStartUpTime_ChunkServerNotFound) { } TEST_F(TestTopology, FindLogicalPool_success) { + PrepareAddPoolset(); PoolIdType logicalPoolId = 0x01; std::string logicalPoolName = "logicalPool1"; PoolIdType physicalPoolId = 0x11; @@ -1752,6 +1846,7 @@ TEST_F(TestTopology, FindLogicalPool_LogicalPoolNotFound) { } TEST_F(TestTopology, FindPhysicalPool_success) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; std::string physicalPoolName = "physicalPoolName"; PrepareAddPhysicalPool(physicalPoolId, physicalPoolName); @@ -1768,6 +1863,7 @@ TEST_F(TestTopology, FindPhysicalPool_PhysicalPoolNotFound) { TEST_F(TestTopology, FindZone_success) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; std::string physicalPoolName = "physicalPoolName"; ZoneIdType zoneId = 0x21; @@ -1787,6 +1883,7 @@ TEST_F(TestTopology, FindZone_ZoneNotFound) { } TEST_F(TestTopology, FindZone_success2) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; std::string physicalPoolName = "physicalPoolName"; ZoneIdType zoneId = 0x21; @@ -1807,6 +1904,7 @@ TEST_F(TestTopology, FindZone_ZoneNotFound2) { } TEST_F(TestTopology, FindServerByHostName_success) { + PrepareAddPoolset(); ServerIdType serverId = 0x31; std::string hostName = "host1"; PrepareAddPhysicalPool(); @@ -1826,6 +1924,7 @@ TEST_F(TestTopology, FindServerByHostName_ServerNotFound) { } TEST_F(TestTopology, FindServerByHostIpPort_success) { + PrepareAddPoolset(); ServerIdType serverId = 0x31; std::string hostName = "host1"; std::string internalHostIp = "ip1"; @@ -1847,6 +1946,7 @@ TEST_F(TestTopology, FindServerByHostIpPort_success) { } TEST_F(TestTopology, FindSeverByHostIp_ServerNotFound) { + PrepareAddPoolset(); ServerIdType serverId = 0x31; std::string hostName = "host1"; std::string internalHostIp = "ip1"; @@ -1866,6 +1966,7 @@ TEST_F(TestTopology, FindSeverByHostIp_ServerNotFound) { } TEST_F(TestTopology, FindChunkServerNotRetired_success) { + PrepareAddPoolset(); ServerIdType serverId = 0x31; std::string hostName = "host1"; std::string internalHostIp = "ip1"; @@ -1894,6 +1995,7 @@ TEST_F(TestTopology, FindChunkServerNotRetired_success) { } TEST_F(TestTopology, FindChunkServerNotRetired_ChunkServerNotFound) { + PrepareAddPoolset(); ServerIdType serverId = 0x31; std::string hostName = "host1"; std::string internalHostIp = "ip1"; @@ -1922,6 +2024,7 @@ TEST_F(TestTopology, FindChunkServerNotRetired_ChunkServerNotFound) { } TEST_F(TestTopology, GetLogicalPool_success) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); PoolIdType logicalPoolId = 0x01; @@ -1939,6 +2042,7 @@ TEST_F(TestTopology, GetLogicalPool_LogicalPoolNotFound) { } TEST_F(TestTopology, GetPhysicalPool_success) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); PhysicalPool pool; @@ -1954,6 +2058,7 @@ TEST_F(TestTopology, GetPhysicalPool_PhysicalPoolNotFound) { } TEST_F(TestTopology, GetZone_success) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(physicalPoolId); @@ -1971,6 +2076,7 @@ TEST_F(TestTopology, GetZone_ZoneNotFound) { } TEST_F(TestTopology, GetServer_success) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; ServerIdType serverId = 0x31; @@ -1984,6 +2090,7 @@ TEST_F(TestTopology, GetServer_success) { TEST_F(TestTopology, GetServer_GetServerNotFound) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; ServerIdType serverId = 0x31; @@ -1996,6 +2103,7 @@ TEST_F(TestTopology, GetServer_GetServerNotFound) { } TEST_F(TestTopology, GetChunkServer_success) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; ServerIdType serverId = 0x31; @@ -2010,6 +2118,7 @@ TEST_F(TestTopology, GetChunkServer_success) { } TEST_F(TestTopology, GetChunkServer_ChunkServerNotFound) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; ServerIdType serverId = 0x31; @@ -2031,6 +2140,7 @@ TEST_F(TestTopology, GetChunkServerInCluster_success) { ChunkServerIdType csId = 0x41; ChunkServerIdType csId2 = 0x42; + PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); @@ -2048,6 +2158,7 @@ TEST_F(TestTopology, GetServerInCluster_success) { ServerIdType serverId = 0x31; ServerIdType serverId2 = 0x32; + PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); @@ -2063,6 +2174,7 @@ TEST_F(TestTopology, GetZoneInCluster_success) { ZoneIdType zoneId = 0x21; ZoneIdType zoneId2 = 0x22; + PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddZone(zoneId2); @@ -2076,6 +2188,7 @@ TEST_F(TestTopology, GetPhysicalPoolInCluster_success) { PoolIdType physicalPoolId = 0x11; PoolIdType physicalPoolId2 = 0x12; + PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddPhysicalPool(physicalPoolId2); @@ -2089,6 +2202,7 @@ TEST_F(TestTopology, GetLogicalPoolInCluster_success) { PoolIdType logicalPoolId = 0x01; PoolIdType logicalPoolId2 = 0x02; + PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddLogicalPool(logicalPoolId, "name", physicalPoolId); PrepareAddLogicalPool(logicalPoolId2, "name2", physicalPoolId); @@ -2105,6 +2219,7 @@ TEST_F(TestTopology, GetChunkServerInServer_success) { ChunkServerIdType csId = 0x41; ChunkServerIdType csId2 = 0x42; + PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); @@ -2131,6 +2246,7 @@ TEST_F(TestTopology, GetChunkServerInZone_success) { ChunkServerIdType csId = 0x41; ChunkServerIdType csId2 = 0x42; + PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); @@ -2150,6 +2266,7 @@ TEST_F(TestTopology, GetChunkServerInPhysicalPool_success) { ChunkServerIdType csId = 0x41; ChunkServerIdType csId2 = 0x42; + PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); @@ -2168,6 +2285,7 @@ TEST_F(TestTopology, GetServerInZone_success) { ServerIdType serverId = 0x31; ServerIdType serverId2 = 0x32; + PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); @@ -2190,6 +2308,7 @@ TEST_F(TestTopology, GetServerInPhysicalPool_success) { ServerIdType serverId = 0x31; ServerIdType serverId2 = 0x32; + PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); @@ -2206,6 +2325,7 @@ TEST_F(TestTopology, GetZoneInPhysicalPool_success) { ZoneIdType zoneId = 0x21; ZoneIdType zoneId2 = 0x22; + PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddZone(zoneId2); @@ -2228,6 +2348,7 @@ TEST_F(TestTopology, GetLogicalPoolInPhysicalPool_success) { PoolIdType logicalPoolId = 0x01; PoolIdType logicalPoolId2 = 0x02; + PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId); PrepareAddLogicalPool(logicalPoolId2, "logicalPool2", physicalPoolId); @@ -2246,6 +2367,7 @@ TEST_F(TestTopology, GetChunkServerInLogicalPool_success) { ChunkServerIdType csId2 = 0x42; PoolIdType logicalPoolId = 0x01; + PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId, "name", physicalPoolId); PrepareAddServer( @@ -2274,6 +2396,7 @@ TEST_F(TestTopology, GetServerInLogicalPool_success) { ServerIdType serverId2 = 0x32; PoolIdType logicalPoolId = 0x01; + PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); @@ -2299,6 +2422,7 @@ TEST_F(TestTopology, GetZoneInLogicalPool_success) { ZoneIdType zoneId2 = 0x22; PoolIdType logicalPoolId = 0x01; + PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddZone(zoneId2); @@ -2322,6 +2446,7 @@ TEST_F(TestTopology, AddCopySet_success) { PoolIdType physicalPoolId = 0x11; CopySetIdType copysetId = 0x51; + PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); @@ -2355,6 +2480,7 @@ TEST_F(TestTopology, AddCopySet_IdDuplicated) { PoolIdType physicalPoolId = 0x11; CopySetIdType copysetId = 0x51; + PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); @@ -2387,6 +2513,7 @@ TEST_F(TestTopology, AddCopySet_LogicalPoolNotFound) { PoolIdType physicalPoolId = 0x11; CopySetIdType copysetId = 0x51; + PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); @@ -2418,6 +2545,7 @@ TEST_F(TestTopology, AddCopySet_StorageFail) { PoolIdType physicalPoolId = 0x11; CopySetIdType copysetId = 0x51; + PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); @@ -2451,6 +2579,7 @@ TEST_F(TestTopology, RemoveCopySet_success) { PoolIdType physicalPoolId = 0x11; CopySetIdType copysetId = 0x51; + PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); @@ -2485,6 +2614,7 @@ TEST_F(TestTopology, RemoveCopySet_storageFail) { PoolIdType physicalPoolId = 0x11; CopySetIdType copysetId = 0x51; + PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); @@ -2519,6 +2649,7 @@ TEST_F(TestTopology, RemoveCopySet_CopySetNotFound) { PoolIdType physicalPoolId = 0x11; CopySetIdType copysetId = 0x51; + PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); @@ -2550,6 +2681,7 @@ TEST_F(TestTopology, UpdateCopySetTopo_success) { PoolIdType physicalPoolId = 0x11; CopySetIdType copysetId = 0x51; + PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); @@ -2597,6 +2729,7 @@ TEST_F(TestTopology, UpdateCopySetTopo_CopySetNotFound) { PoolIdType physicalPoolId = 0x11; CopySetIdType copysetId = 0x51; + PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); @@ -2636,6 +2769,7 @@ TEST_F(TestTopology, GetCopySet_success) { PoolIdType physicalPoolId = 0x11; CopySetIdType copysetId = 0x51; + PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); @@ -2669,6 +2803,7 @@ TEST_F(TestTopology, GetCopySet_CopysetNotFound) { PoolIdType physicalPoolId = 0x11; CopySetIdType copysetId = 0x51; + PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); @@ -2702,6 +2837,7 @@ TEST_F(TestTopology, GetCopySetsInLogicalPool_success) { PoolIdType physicalPoolId = 0x11; CopySetIdType copysetId = 0x51; + PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); @@ -2732,6 +2868,7 @@ TEST_F(TestTopology, GetCopySetsInCluster_success) { PoolIdType physicalPoolId = 0x11; CopySetIdType copysetId = 0x51; + PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); @@ -2758,6 +2895,7 @@ TEST_F(TestTopology, GetCopySetsInCluster_success) { } TEST_F(TestTopology, GetCopySetsInChunkServer_success) { + PrepareAddPoolset(); PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; CopySetIdType copysetId = 0x51; @@ -2787,6 +2925,53 @@ TEST_F(TestTopology, GetCopySetsInChunkServer_success) { ASSERT_EQ(1, csList.size()); } +TEST_F(TestTopology, test_create_default_poolset) { + EXPECT_CALL(*storage_, LoadClusterInfo(_)) + .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageClusterInfo(_)) + .WillOnce(Return(true)); + + EXPECT_CALL(*storage_, LoadPoolset(_, _)) + .WillOnce(Return(true)); + + Poolset poolset; + EXPECT_CALL(*storage_, StoragePoolset(_)) + .WillOnce( + DoAll(SaveArg<0>(&poolset), Return(true))); + + std::unordered_map physicalPoolMap{ + {1, {1, "pool1", UNINTIALIZE_ID, ""}}, + {2, {2, "pool2", UNINTIALIZE_ID, ""}}, + }; + EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) + .WillOnce(DoAll(SetArgPointee<0>(physicalPoolMap), + SetArgPointee<1>(2), + Return(true))); + + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) + .WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadZone(_, _)) + .WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadServer(_, _)) + .WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadChunkServer(_, _)) + .WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadCopySet(_, _)) + .WillOnce(Return(true)); + + int rc = topology_->Init({}); + ASSERT_EQ(kTopoErrCodeSuccess, rc); + + ASSERT_EQ(curve::common::kDefaultPoolsetId, poolset.GetId()); + ASSERT_EQ(curve::common::kDefaultPoolsetName, poolset.GetName()); + + auto poolsets = topology_->GetPoolsetInCluster(); + auto physicals = topology_->GetPhysicalPoolInCluster(); + EXPECT_EQ(1, poolsets.size()); + EXPECT_EQ(2, physicals.size()); + EXPECT_EQ(2, topology_->GetPhysicalPoolInPoolset(poolsets[0]).size()); +} + } // namespace topology } // namespace mds } // namespace curve diff --git a/test/mds/topology/test_topology_chunk_allocator.cpp b/test/mds/topology/test_topology_chunk_allocator.cpp index 3dfbc51b58..a1ea8aa942 100644 --- a/test/mds/topology/test_topology_chunk_allocator.cpp +++ b/test/mds/topology/test_topology_chunk_allocator.cpp @@ -80,6 +80,18 @@ class TestTopologyChunkAllocator : public ::testing::Test { testObj_ = nullptr; } + void PrepareAddPoolset(PoolsetIdType pid = 0x61, + const std::string& name = "testPoolset", + const std::string& type = "SSD", + const std::string& desc = "descPoolset") { + Poolset poolset(pid, name, type, desc); + EXPECT_CALL(*storage_, StoragePoolset(_)) + .WillOnce(Return(true)); + + int ret = topology_->AddPoolset(poolset); + ASSERT_EQ(kTopoErrCodeSuccess, ret); + } + void PrepareAddLogicalPool(PoolIdType id = 0x01, const std::string &name = "testLogicalPool", PoolIdType phyPoolId = 0x11, @@ -110,17 +122,20 @@ class TestTopologyChunkAllocator : public ::testing::Test { void PrepareAddPhysicalPool(PoolIdType id = 0x11, const std::string &name = "testPhysicalPool", + PoolsetIdType pid = 0x61, const std::string &desc = "descPhysicalPool", uint64_t diskCapacity = 10240) { PhysicalPool pool(id, name, + pid, desc); pool.SetDiskCapacity(diskCapacity); EXPECT_CALL(*storage_, StoragePhysicalPool(_)) .WillOnce(Return(true)); int ret = topology_->AddPhysicalPool(pool); - ASSERT_EQ(kTopoErrCodeSuccess, ret); + ASSERT_EQ(kTopoErrCodeSuccess, ret) + << "should have PrepareAddPoolset()"; } void PrepareAddZone(ZoneIdType id = 0x21, @@ -216,6 +231,7 @@ TEST_F(TestTopologyChunkAllocator, Test_AllocateChunkRandomInSingleLogicalPool_success) { std::vector infos; + PrepareAddPoolset(); PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; CopySetIdType copysetId = 0x51; @@ -244,6 +260,7 @@ TEST_F(TestTopologyChunkAllocator, bool ret = testObj_->AllocateChunkRandomInSingleLogicalPool(INODE_PAGEFILE, + "testPoolset", 2, 1024, &infos); @@ -262,6 +279,7 @@ TEST_F(TestTopologyChunkAllocator, std::vector infos; bool ret = testObj_->AllocateChunkRandomInSingleLogicalPool(INODE_PAGEFILE, + "testPoolset", 1, 1024, &infos); @@ -277,6 +295,7 @@ TEST_F(TestTopologyChunkAllocator, PoolIdType physicalPoolId = 0x11; CopySetIdType copysetId = 0x51; + PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); @@ -298,6 +317,7 @@ TEST_F(TestTopologyChunkAllocator, bool ret = testObj_->AllocateChunkRandomInSingleLogicalPool(INODE_PAGEFILE, + "testPoolset", 2, 1024, &infos); @@ -310,6 +330,7 @@ TEST_F(TestTopologyChunkAllocator, ret = testObj_->AllocateChunkRandomInSingleLogicalPool(INODE_PAGEFILE, + "testPoolset", 2, 1024, &infos); @@ -324,6 +345,7 @@ TEST_F(TestTopologyChunkAllocator, ret = testObj_->AllocateChunkRandomInSingleLogicalPool(INODE_PAGEFILE, + "testPoolset", 2, 1024, &infos); @@ -339,6 +361,7 @@ TEST_F(TestTopologyChunkAllocator, PoolIdType physicalPoolId = 0x11; CopySetIdType copysetId = 0x51; + PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); @@ -364,7 +387,8 @@ TEST_F(TestTopologyChunkAllocator, std::map enoughsize; std::vector pools ={0x01}; for (int i = 0; i < 10; i++) { - testObj_->GetRemainingSpaceInLogicalPool(pools, &enoughsize); + testObj_->GetRemainingSpaceInLogicalPool(pools, + &enoughsize, "testPoolset"); ASSERT_EQ(enoughsize[logicalPoolId], 1109); } } @@ -373,6 +397,7 @@ TEST_F(TestTopologyChunkAllocator, Test_AllocateChunkRoundRobinInSingleLogicalPool_success) { std::vector infos; + PrepareAddPoolset(); PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; @@ -404,6 +429,7 @@ TEST_F(TestTopologyChunkAllocator, bool ret = testObj_->AllocateChunkRoundRobinInSingleLogicalPool(INODE_PAGEFILE, + "testPoolset", 3, 1024, &infos); @@ -419,6 +445,7 @@ TEST_F(TestTopologyChunkAllocator, std::vector infos2; ret = testObj_->AllocateChunkRoundRobinInSingleLogicalPool(INODE_PAGEFILE, + "testPoolset", 3, 1024, &infos2); @@ -470,6 +497,7 @@ TEST_F(TestTopologyChunkAllocator, std::vector infos; bool ret = testObj_->AllocateChunkRoundRobinInSingleLogicalPool(INODE_PAGEFILE, + "testPoolset", 1, 1024, &infos); @@ -480,6 +508,7 @@ TEST_F(TestTopologyChunkAllocator, TEST_F(TestTopologyChunkAllocator, Test_AllocateChunkRoundRobinInSingleLogicalPool_copysetEmpty) { std::vector infos; + PrepareAddPoolset(); PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; @@ -487,6 +516,7 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddLogicalPool(logicalPoolId); bool ret = testObj_->AllocateChunkRoundRobinInSingleLogicalPool(INODE_PAGEFILE, + "testPoolset", 1, 1024, &infos); @@ -497,7 +527,7 @@ TEST_F(TestTopologyChunkAllocator, TEST_F(TestTopologyChunkAllocator, Test_AllocateChunkRoundRobinInSingleLogicalPool_logicalPoolIsDENY) { std::vector infos; - + PrepareAddPoolset(); PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; @@ -534,6 +564,7 @@ TEST_F(TestTopologyChunkAllocator, bool ret = testObj_->AllocateChunkRoundRobinInSingleLogicalPool(INODE_PAGEFILE, + "testPoolset", 3, 1024, &infos); diff --git a/test/mds/topology/test_topology_helper.cpp b/test/mds/topology/test_topology_helper.cpp index a640b73c44..adcf1a6cf4 100644 --- a/test/mds/topology/test_topology_helper.cpp +++ b/test/mds/topology/test_topology_helper.cpp @@ -27,6 +27,14 @@ namespace curve { namespace mds { namespace topology { +bool JudgePoolsetEqual(const Poolset &lh, const Poolset &rh) { + if (lh.GetId() == rh.GetId() && lh.GetName() == rh.GetName() && + lh.GetType() == rh.GetType() && lh.GetDesc() == rh.GetDesc()) { + return true; + } + return false; +} + bool JudgeLogicalPoolEqual(const LogicalPool &lh, const LogicalPool &rh) { if (lh.GetId() == rh.GetId() && lh.GetName() == rh.GetName() && diff --git a/test/mds/topology/test_topology_helper.h b/test/mds/topology/test_topology_helper.h index 68dc6827df..7a439942d2 100644 --- a/test/mds/topology/test_topology_helper.h +++ b/test/mds/topology/test_topology_helper.h @@ -28,6 +28,7 @@ namespace curve { namespace mds { namespace topology { +bool JudgePoolsetEqual(const Poolset &lh, const Poolset &rh); bool JudgeLogicalPoolEqual(const LogicalPool &lh, const LogicalPool &rh); diff --git a/test/mds/topology/test_topology_item.cpp b/test/mds/topology/test_topology_item.cpp index 2af26ed9cd..935f9ec6c1 100644 --- a/test/mds/topology/test_topology_item.cpp +++ b/test/mds/topology/test_topology_item.cpp @@ -112,6 +112,49 @@ TEST_F(TestTopologyItem, Test_SetCopySetMembersByJson_Fail) { ASSERT_EQ(false, cInfo.SetCopySetMembersByJson(jsonStr)); } +TEST(TestPhysicalPool, SerializeAndDeserializeTest) { + { + PhysicalPoolData data; + data.set_physicalpoolid(1); + data.set_physicalpoolname("pool1"); + data.set_desc(""); + + std::string value; + ASSERT_TRUE(data.SerializeToString(&value)); + + PhysicalPool pool; + ASSERT_TRUE(pool.ParseFromString(value)); + + ASSERT_EQ(1, pool.GetId()); + ASSERT_EQ(UNINTIALIZE_ID, pool.GetPoolsetId()); + + ASSERT_TRUE(pool.SerializeToString(&value)); + ASSERT_TRUE(data.ParseFromString(value)); + ASSERT_FALSE(data.has_poolsetid()); + } + + { + PhysicalPoolData data; + data.set_physicalpoolid(1); + data.set_physicalpoolname("pool1"); + data.set_desc(""); + data.set_poolsetid(1); + + std::string value; + ASSERT_TRUE(data.SerializeToString(&value)); + + PhysicalPool pool; + ASSERT_TRUE(pool.ParseFromString(value)); + + ASSERT_EQ(1, pool.GetId()); + ASSERT_EQ(1, pool.GetPoolsetId()); + + ASSERT_TRUE(pool.SerializeToString(&value)); + ASSERT_TRUE(data.ParseFromString(value)); + ASSERT_TRUE(data.has_poolsetid()); + } +} + } // namespace topology } // namespace mds } // namespace curve diff --git a/test/mds/topology/test_topology_metric.cpp b/test/mds/topology/test_topology_metric.cpp index b5638b77e0..87c97bb47a 100644 --- a/test/mds/topology/test_topology_metric.cpp +++ b/test/mds/topology/test_topology_metric.cpp @@ -70,6 +70,18 @@ class TestTopologyMetric : public ::testing::Test { testObj_ = nullptr; } + void PrepareAddPoolset(PoolsetIdType pid = 0x61, + const std::string& name = "ssdPoolset1", + const std::string& type = "SSD", + const std::string& desc = "descPoolset") { + Poolset poolset(pid, name, type, desc); + EXPECT_CALL(*storage_, StoragePoolset(_)) + .WillOnce(Return(true)); + + int ret = topology_->AddPoolset(poolset); + ASSERT_EQ(kTopoErrCodeSuccess, ret); + } + void PrepareAddLogicalPool(PoolIdType id = 0x01, const std::string &name = "testLogicalPool", PoolIdType phyPoolId = 0x11, @@ -100,9 +112,11 @@ class TestTopologyMetric : public ::testing::Test { void PrepareAddPhysicalPool(PoolIdType id = 0x11, const std::string &name = "testPhysicalPool", + PoolsetIdType pid = 0x61, const std::string &desc = "descPhysicalPool") { PhysicalPool pool(id, name, + pid, desc); EXPECT_CALL(*storage_, StoragePhysicalPool(_)) .WillOnce(Return(true)); @@ -193,10 +207,12 @@ class TestTopologyMetric : public ::testing::Test { }; TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { + PoolsetIdType poolsetId = 0x61; PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; CopySetIdType copysetId = 0x51; + PrepareAddPoolset(poolsetId); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); @@ -356,6 +372,7 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { } TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsCleanRetired) { + PrepareAddPoolset(); PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; CopySetIdType copysetId = 0x51; diff --git a/test/mds/topology/test_topology_service.cpp b/test/mds/topology/test_topology_service.cpp index 6c23187743..f10a3ffda0 100644 --- a/test/mds/topology/test_topology_service.cpp +++ b/test/mds/topology/test_topology_service.cpp @@ -480,6 +480,7 @@ TEST_F(TestTopologyService, test_RegistServer_success) { request.set_internalip("2"); request.set_externalip("3"); request.set_desc("4"); + request.set_poolsetname("ssdPoolset1"); ServerRegistResponse response; @@ -512,6 +513,7 @@ TEST_F(TestTopologyService, test_RegistServer_fail) { request.set_internalip("2"); request.set_externalip("3"); request.set_desc("4"); + request.set_poolsetname("ssdPoolset1"); ServerRegistResponse response; @@ -947,6 +949,7 @@ TEST_F(TestTopologyService, test_CreatePhysicalPool_success) { brpc::Controller cntl; PhysicalPoolRequest request; request.set_physicalpoolid(1); + request.set_poolsetname("ssdPoolset1"); PhysicalPoolResponse response; @@ -976,6 +979,7 @@ TEST_F(TestTopologyService, test_CreatePhysicalPool_fail) { brpc::Controller cntl; PhysicalPoolRequest request; request.set_physicalpoolid(1); + request.set_poolsetname("ssdPoolset1"); PhysicalPoolResponse response; @@ -1109,6 +1113,7 @@ TEST_F(TestTopologyService, test_GetPhysicalPool_fail) { ASSERT_EQ(kTopoErrCodeInvalidParam, response.statuscode()); } + TEST_F(TestTopologyService, test_ListPhysicalPool_success) { brpc::Channel channel; if (channel.Init(listenAddr_, NULL) != 0) { @@ -1165,6 +1170,37 @@ TEST_F(TestTopologyService, test_ListPhysicalPool_fail) { ASSERT_EQ(kTopoErrCodeInvalidParam, response.statuscode()); } +TEST_F(TestTopologyService, test_ListPhysicalPoolsInPoolset_success) { + brpc::Channel channel; + if (channel.Init(listenAddr_, NULL) != 0) { + FAIL() << "Fail to init channel " + << std::endl; + } + + TopologyService_Stub stub(&channel); + + brpc::Controller cntl; + ListPhysicalPoolsInPoolsetRequest request; + request.add_poolsetid(1); + request.add_poolsetid(2); + request.add_poolsetid(3); + + ListPhysicalPoolResponse response; + + ListPhysicalPoolResponse reps; + reps.set_statuscode(kTopoErrCodeSuccess); + EXPECT_CALL(*manager_, ListPhysicalPoolsInPoolset(_, _)) + .WillRepeatedly(SetArgPointee<1>(reps)); + + stub.ListPhysicalPoolsInPoolset(&cntl, &request, &response, nullptr); + + if (cntl.Failed()) { + FAIL() << cntl.ErrorText() << std::endl; + } + + ASSERT_EQ(kTopoErrCodeSuccess, response.statuscode()); +} + TEST_F(TestTopologyService, test_CreateLogicalPool_success) { brpc::Channel channel; if (channel.Init(listenAddr_, NULL) != 0) { @@ -1504,6 +1540,184 @@ TEST_F(TestTopologyService, TestSetLogicalPoolScanState) { } } +TEST_F(TestTopologyService, test_CreatePoolset_success) { + brpc::Channel channel; + if (channel.Init(listenAddr_, NULL) != 0) { + FAIL() << "Fail to init channel " + << std::endl; + } + + TopologyService_Stub stub(&channel); + + brpc::Controller cntl; + PoolsetRequest request; + request.set_poolsetname("ssdPoolset1"); + + PoolsetResponse response; + + PoolsetResponse reps; + reps.set_statuscode(kTopoErrCodeSuccess); + EXPECT_CALL(*manager_, CreatePoolset(_, _)) + .WillRepeatedly(SetArgPointee<1>(reps)); + + stub.CreatePoolset(&cntl, &request, &response, nullptr); + + if (cntl.Failed()) { + FAIL() << cntl.ErrorText() << std::endl; + } + + ASSERT_EQ(kTopoErrCodeSuccess, response.statuscode()); +} + + +TEST_F(TestTopologyService, test_CreatePoolset_fail) { + brpc::Channel channel; + if (channel.Init(listenAddr_, NULL) != 0) { + FAIL() << "Fail to init channel " + << std::endl; + } + + TopologyService_Stub stub(&channel); + + brpc::Controller cntl; + PoolsetRequest request; + request.set_poolsetname("ssdPoolset1"); + + PoolsetResponse response; + + PoolsetResponse reps; + reps.set_statuscode(kTopoErrCodeInvalidParam); + EXPECT_CALL(*manager_, CreatePoolset(_, _)) + .WillRepeatedly(SetArgPointee<1>(reps)); + + stub.CreatePoolset(&cntl, &request, &response, nullptr); + + if (cntl.Failed()) { + FAIL() << cntl.ErrorText() << std::endl; + } + + ASSERT_EQ(kTopoErrCodeInvalidParam, response.statuscode()); +} + +TEST_F(TestTopologyService, test_DeletePoolset_success) { + brpc::Channel channel; + if (channel.Init(listenAddr_, NULL) != 0) { + FAIL() << "Fail to init channel " + << std::endl; + } + + TopologyService_Stub stub(&channel); + + brpc::Controller cntl; + PoolsetRequest request; + request.set_poolsetname("ssdPoolset1"); + + PoolsetResponse response; + + PoolsetResponse reps; + reps.set_statuscode(kTopoErrCodeSuccess); + EXPECT_CALL(*manager_, DeletePoolset(_, _)) + .WillRepeatedly(SetArgPointee<1>(reps)); + + stub.DeletePoolset(&cntl, &request, &response, nullptr); + + if (cntl.Failed()) { + FAIL() << cntl.ErrorText() << std::endl; + } + + ASSERT_EQ(kTopoErrCodeSuccess, response.statuscode()); +} + + +TEST_F(TestTopologyService, test_DeletePoolset_fail) { + brpc::Channel channel; + if (channel.Init(listenAddr_, NULL) != 0) { + FAIL() << "Fail to init channel " + << std::endl; + } + + TopologyService_Stub stub(&channel); + + brpc::Controller cntl; + PoolsetRequest request; + request.set_poolsetname("ssdPoolset1"); + + PoolsetResponse response; + + PoolsetResponse reps; + reps.set_statuscode(kTopoErrCodeInvalidParam); + EXPECT_CALL(*manager_, DeletePoolset(_, _)) + .WillRepeatedly(SetArgPointee<1>(reps)); + + stub.DeletePoolset(&cntl, &request, &response, nullptr); + + if (cntl.Failed()) { + FAIL() << cntl.ErrorText() << std::endl; + } + + ASSERT_EQ(kTopoErrCodeInvalidParam, response.statuscode()); +} + +TEST_F(TestTopologyService, test_GetPoolset_success) { + brpc::Channel channel; + if (channel.Init(listenAddr_, NULL) != 0) { + FAIL() << "Fail to init channel " + << std::endl; + } + + TopologyService_Stub stub(&channel); + + brpc::Controller cntl; + PoolsetRequest request; + request.set_poolsetname("ssdPoolset1"); + + PoolsetResponse response; + + PoolsetResponse reps; + reps.set_statuscode(kTopoErrCodeSuccess); + EXPECT_CALL(*manager_, GetPoolset(_, _)) + .WillRepeatedly(SetArgPointee<1>(reps)); + + stub.GetPoolset(&cntl, &request, &response, nullptr); + + if (cntl.Failed()) { + FAIL() << cntl.ErrorText() << std::endl; + } + + ASSERT_EQ(kTopoErrCodeSuccess, response.statuscode()); +} + + +TEST_F(TestTopologyService, test_ListPoolset_success) { + brpc::Channel channel; + if (channel.Init(listenAddr_, NULL) != 0) { + FAIL() << "Fail to init channel " + << std::endl; + } + + TopologyService_Stub stub(&channel); + + brpc::Controller cntl; + ListPoolsetRequest request; + + ListPoolsetResponse response; + + ListPoolsetResponse reps; + reps.set_statuscode(kTopoErrCodeSuccess); + EXPECT_CALL(*manager_, ListPoolset(_, _)) + .WillRepeatedly(SetArgPointee<1>(reps)); + + stub.ListPoolset(&cntl, &request, &response, nullptr); + + if (cntl.Failed()) { + FAIL() << cntl.ErrorText() << std::endl; + } + + ASSERT_EQ(kTopoErrCodeSuccess, response.statuscode()); +} + + + TEST_F(TestTopologyService, test_GetChunkServerListInCopySets_success) { brpc::Channel channel; if (channel.Init(listenAddr_, NULL) != 0) { diff --git a/test/mds/topology/test_topology_service_manager.cpp b/test/mds/topology/test_topology_service_manager.cpp index 3f7e2cfa92..3c21b0e5f1 100644 --- a/test/mds/topology/test_topology_service_manager.cpp +++ b/test/mds/topology/test_topology_service_manager.cpp @@ -109,6 +109,18 @@ class TestTopologyServiceManager : public ::testing::Test { } protected: + void PrepareAddPoolset(PoolsetIdType pid = 0x61, + const std::string& name = "testPoolset", + const std::string& type = "SSD", + const std::string& desc = "descPoolset") { + Poolset poolset(pid, name, type, desc); + EXPECT_CALL(*storage_, StoragePoolset(_)) + .WillOnce(Return(true)); + + int ret = topology_->AddPoolset(poolset); + ASSERT_EQ(kTopoErrCodeSuccess, ret); + } + void PrepareAddLogicalPool(PoolIdType id = 0x01, const std::string &name = "testLogicalPool", PoolIdType phyPoolId = 0x11, @@ -136,12 +148,13 @@ class TestTopologyServiceManager : public ::testing::Test { << "should have PrepareAddPhysicalPool()"; } - void PrepareAddPhysicalPool(PoolIdType id = 0x11, const std::string &name = "testPhysicalPool", + PoolsetIdType pid = 0x61, const std::string &desc = "descPhysicalPool") { PhysicalPool pool(id, name, + pid, desc); EXPECT_CALL(*storage_, StoragePhysicalPool(_)) .WillOnce(Return(true)); @@ -244,6 +257,7 @@ class TestTopologyServiceManager : public ::testing::Test { TEST_F(TestTopologyServiceManager, test_RegistChunkServer_SuccessWithExIp) { + PrepareAddPoolset(); ChunkServerIdType csId = 0x41; ServerIdType serverId = 0x31; std::string token = "token"; @@ -284,6 +298,7 @@ TEST_F(TestTopologyServiceManager, test_RegistChunkServer_SuccessWithExIp) { } TEST_F(TestTopologyServiceManager, test_RegistChunkServer_ExIpNotMatch) { + PrepareAddPoolset(); ChunkServerIdType csId = 0x41; ServerIdType serverId = 0x31; std::string token = "token"; @@ -312,6 +327,7 @@ TEST_F(TestTopologyServiceManager, test_RegistChunkServer_ExIpNotMatch) { } TEST_F(TestTopologyServiceManager, test_RegistChunkServer_SuccessWithoutExIp) { + PrepareAddPoolset(); ChunkServerIdType csId = 0x41; ServerIdType serverId = 0x31; std::string token = "token"; @@ -350,6 +366,7 @@ TEST_F(TestTopologyServiceManager, test_RegistChunkServer_SuccessWithoutExIp) { } TEST_F(TestTopologyServiceManager, test_RegistChunkServer_ServerNotFound) { + PrepareAddPoolset(); ServerIdType serverId = 0x31; std::string token = "token"; @@ -371,6 +388,7 @@ TEST_F(TestTopologyServiceManager, test_RegistChunkServer_ServerNotFound) { } TEST_F(TestTopologyServiceManager, test_RegistChunkServer_AllocateIdFail) { + PrepareAddPoolset(); ServerIdType serverId = 0x31; std::string token = "token"; @@ -395,6 +413,7 @@ TEST_F(TestTopologyServiceManager, test_RegistChunkServer_AllocateIdFail) { } TEST_F(TestTopologyServiceManager, test_RegistChunkServer_AddChunkServerFail) { + PrepareAddPoolset(); ChunkServerIdType csId = 0x41; ServerIdType serverId = 0x31; std::string token = "token"; @@ -426,6 +445,7 @@ TEST_F(TestTopologyServiceManager, test_RegistChunkServer_AddChunkServerFail) { } TEST_F(TestTopologyServiceManager, test_ListChunkServer_ByIdSuccess) { + PrepareAddPoolset(); ChunkServerIdType csId1 = 0x41; ChunkServerIdType csId2 = 0x42; ServerIdType serverId = 0x31; @@ -463,6 +483,7 @@ TEST_F(TestTopologyServiceManager, test_ListChunkServer_ByIdSuccess) { } TEST_F(TestTopologyServiceManager, test_ListChunkServer_ByIpSuccess) { + PrepareAddPoolset(); ChunkServerIdType csId1 = 0x41; ChunkServerIdType csId2 = 0x42; ServerIdType serverId = 0x31; @@ -500,6 +521,7 @@ TEST_F(TestTopologyServiceManager, test_ListChunkServer_ByIpSuccess) { } TEST_F(TestTopologyServiceManager, test_ListChunkServer_ServerNotFound) { + PrepareAddPoolset(); ChunkServerIdType csId1 = 0x41; ChunkServerIdType csId2 = 0x42; ServerIdType serverId = 0x31; @@ -523,6 +545,7 @@ TEST_F(TestTopologyServiceManager, test_ListChunkServer_ServerNotFound) { } TEST_F(TestTopologyServiceManager, test_ListChunkServer_IpServerNotFound) { + PrepareAddPoolset(); ChunkServerIdType csId1 = 0x41; ChunkServerIdType csId2 = 0x42; ServerIdType serverId = 0x31; @@ -557,6 +580,7 @@ TEST_F(TestTopologyServiceManager, test_ListChunkServer_InvalidParam) { } TEST_F(TestTopologyServiceManager, test_GetChunkServer_ByIdSuccess) { + PrepareAddPoolset(); ChunkServerIdType csId1 = 0x41; ServerIdType serverId = 0x31; @@ -582,6 +606,7 @@ TEST_F(TestTopologyServiceManager, test_GetChunkServer_ByIdSuccess) { } TEST_F(TestTopologyServiceManager, test_GetChunkServer_ByIpSuccess) { + PrepareAddPoolset(); ChunkServerIdType csId1 = 0x41; ServerIdType serverId = 0x31; @@ -608,6 +633,7 @@ TEST_F(TestTopologyServiceManager, test_GetChunkServer_ByIpSuccess) { } TEST_F(TestTopologyServiceManager, test_GetChunkServer_ChunkServerNotFound) { + PrepareAddPoolset(); ChunkServerIdType csId1 = 0x41; ServerIdType serverId = 0x31; @@ -628,6 +654,7 @@ TEST_F(TestTopologyServiceManager, test_GetChunkServer_ChunkServerNotFound) { TEST_F(TestTopologyServiceManager, test_GetChunkServer_ByIpChunkServerNotFound) { + PrepareAddPoolset(); ChunkServerIdType csId1 = 0x41; ServerIdType serverId = 0x31; @@ -661,6 +688,7 @@ TEST_F(TestTopologyServiceManager, test_GetChunkServerInCluster_Success) { ChunkServerIdType csId1 = 0x41, csId2 = 0x42; ServerIdType serverId1 = 0x31, serverId2 = 0x32; + PrepareAddPoolset(); PrepareAddPhysicalPool(); PrepareAddZone(); PrepareAddServer(serverId1, "server1", "ip1", "ip2"); @@ -677,6 +705,7 @@ TEST_F(TestTopologyServiceManager, test_GetChunkServerInCluster_Success) { } TEST_F(TestTopologyServiceManager, test_DeleteChunkServer_success) { + PrepareAddPoolset(); ChunkServerIdType csId1 = 0x41; ServerIdType serverId = 0x31; @@ -702,6 +731,7 @@ TEST_F(TestTopologyServiceManager, test_DeleteChunkServer_success) { } TEST_F(TestTopologyServiceManager, test_DeleteChunkServer_ChunkServerNotFound) { + PrepareAddPoolset(); ChunkServerIdType csId1 = 0x41; ServerIdType serverId = 0x31; @@ -720,6 +750,7 @@ TEST_F(TestTopologyServiceManager, test_DeleteChunkServer_ChunkServerNotFound) { } TEST_F(TestTopologyServiceManager, test_SetChunkServer_Success) { + PrepareAddPoolset(); ChunkServerIdType csId1 = 0x41; ServerIdType serverId = 0x31; @@ -740,6 +771,7 @@ TEST_F(TestTopologyServiceManager, test_SetChunkServer_Success) { } TEST_F(TestTopologyServiceManager, test_SetChunkServer_ChunkServerNotFound) { + PrepareAddPoolset(); ChunkServerIdType csId1 = 0x41; ServerIdType serverId = 0x31; @@ -759,6 +791,7 @@ TEST_F(TestTopologyServiceManager, test_SetChunkServer_ChunkServerNotFound) { } TEST_F(TestTopologyServiceManager, test_RegistServer_ByZoneAndPoolIdSuccess) { + PrepareAddPoolset(); ServerIdType id = 0x31; PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; @@ -788,6 +821,7 @@ TEST_F(TestTopologyServiceManager, test_RegistServer_ByZoneAndPoolIdSuccess) { } TEST_F(TestTopologyServiceManager, test_RegistServer_ByZoneAndPoolNameSuccess) { + PrepareAddPoolset(); ServerIdType id = 0x31; PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; @@ -817,6 +851,7 @@ TEST_F(TestTopologyServiceManager, test_RegistServer_ByZoneAndPoolNameSuccess) { } TEST_F(TestTopologyServiceManager, test_RegistServer_PhysicalPoolNotFound) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(physicalPoolId); @@ -840,7 +875,7 @@ TEST_F(TestTopologyServiceManager, test_RegistServer_PhysicalPoolNotFound) { TEST_F(TestTopologyServiceManager, test_RegistServer_ByNamePhysicalPoolNotFound) { - ServerIdType id = 0x31; + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(physicalPoolId, "PhysicalPool1"); @@ -863,6 +898,7 @@ TEST_F(TestTopologyServiceManager, } TEST_F(TestTopologyServiceManager, test_RegistServer_ZoneNotFound) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(physicalPoolId); @@ -885,7 +921,7 @@ TEST_F(TestTopologyServiceManager, test_RegistServer_ZoneNotFound) { } TEST_F(TestTopologyServiceManager, test_RegistServer_ByNameZoneNotFound) { - ServerIdType id = 0x31; + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(physicalPoolId, "PhysicalPool1"); @@ -908,6 +944,7 @@ TEST_F(TestTopologyServiceManager, test_RegistServer_ByNameZoneNotFound) { } TEST_F(TestTopologyServiceManager, test_RegistServer_InvalidParam) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(physicalPoolId, "PhysicalPool1"); @@ -924,6 +961,7 @@ TEST_F(TestTopologyServiceManager, test_RegistServer_InvalidParam) { TEST_F(TestTopologyServiceManager, test_RegistServer_InvalidParamMissingZoneIdAndName) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(physicalPoolId); @@ -946,7 +984,7 @@ TEST_F(TestTopologyServiceManager, TEST_F(TestTopologyServiceManager, test_RegistServer_InvalidParamMissingPhysicalPoolIdAndName) { - ServerIdType id = 0x31; + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(physicalPoolId, "PhysicalPool1"); @@ -968,7 +1006,7 @@ TEST_F(TestTopologyServiceManager, } TEST_F(TestTopologyServiceManager, test_RegistServer_AllocateIdFail) { - ServerIdType id = 0x31; + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(physicalPoolId); @@ -993,6 +1031,7 @@ TEST_F(TestTopologyServiceManager, test_RegistServer_AllocateIdFail) { } TEST_F(TestTopologyServiceManager, test_RegistServer_AddServerFail) { + PrepareAddPoolset(); ServerIdType id = 0x31; PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; @@ -1020,6 +1059,7 @@ TEST_F(TestTopologyServiceManager, test_RegistServer_AddServerFail) { } TEST_F(TestTopologyServiceManager, test_GetServer_ByIdSuccess) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; ServerIdType serverId = 0x31; @@ -1052,6 +1092,7 @@ TEST_F(TestTopologyServiceManager, test_GetServer_ByIdSuccess) { } TEST_F(TestTopologyServiceManager, test_GetServer_ByNameSuccess) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; ServerIdType serverId = 0x31; @@ -1084,6 +1125,7 @@ TEST_F(TestTopologyServiceManager, test_GetServer_ByNameSuccess) { } TEST_F(TestTopologyServiceManager, test_GetServer_ByInternalIpSuccess) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; ServerIdType serverId = 0x31; @@ -1116,6 +1158,7 @@ TEST_F(TestTopologyServiceManager, test_GetServer_ByInternalIpSuccess) { } TEST_F(TestTopologyServiceManager, test_GetServer_ByExternalIpSuccess) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; ServerIdType serverId = 0x31; @@ -1148,6 +1191,7 @@ TEST_F(TestTopologyServiceManager, test_GetServer_ByExternalIpSuccess) { } TEST_F(TestTopologyServiceManager, test_GetServer_ServerNotFound) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; ServerIdType serverId = 0x31; @@ -1173,6 +1217,7 @@ TEST_F(TestTopologyServiceManager, test_GetServer_ServerNotFound) { } TEST_F(TestTopologyServiceManager, test_GetServer_ByNameServerNotFound) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; ServerIdType serverId = 0x31; @@ -1198,6 +1243,7 @@ TEST_F(TestTopologyServiceManager, test_GetServer_ByNameServerNotFound) { } TEST_F(TestTopologyServiceManager, test_GetServer_ByIpServerNotFound) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; ServerIdType serverId = 0x31; @@ -1223,6 +1269,7 @@ TEST_F(TestTopologyServiceManager, test_GetServer_ByIpServerNotFound) { } TEST_F(TestTopologyServiceManager, test_DeleteServer_success) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; ServerIdType serverId = 0x31; @@ -1254,7 +1301,10 @@ TEST_F(TestTopologyServiceManager, test_ListZoneServer_ByIdSuccess) { ZoneIdType zoneId = 0x21; ServerIdType serverId = 0x31; ServerIdType serverId2 = 0x32; - PrepareAddPhysicalPool(physicalPoolId); + PoolsetIdType poolsetId = 0x61; + + PrepareAddPoolset(poolsetId); + PrepareAddPhysicalPool(physicalPoolId, "testPool", poolsetId); PrepareAddZone(zoneId); PrepareAddServer(serverId, "hostname1", @@ -1300,6 +1350,7 @@ TEST_F(TestTopologyServiceManager, test_ListZoneServer_ByIdSuccess) { } TEST_F(TestTopologyServiceManager, test_ListZoneServer_ByNameSuccess) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; ServerIdType serverId = 0x31; @@ -1351,6 +1402,7 @@ TEST_F(TestTopologyServiceManager, test_ListZoneServer_ByNameSuccess) { } TEST_F(TestTopologyServiceManager, test_ListZoneServer_ZoneNotFound) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; ServerIdType serverId = 0x31; @@ -1383,6 +1435,7 @@ TEST_F(TestTopologyServiceManager, test_ListZoneServer_ZoneNotFound) { } TEST_F(TestTopologyServiceManager, test_ListZoneServer_ByNameZoneNotFound) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; ServerIdType serverId = 0x31; @@ -1416,6 +1469,7 @@ TEST_F(TestTopologyServiceManager, test_ListZoneServer_ByNameZoneNotFound) { } TEST_F(TestTopologyServiceManager, test_ListZoneServer_InvalidParam) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; ServerIdType serverId = 0x31; @@ -1447,6 +1501,7 @@ TEST_F(TestTopologyServiceManager, test_ListZoneServer_InvalidParam) { } TEST_F(TestTopologyServiceManager, test_CreateZone_success) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(physicalPoolId, "poolname1"); @@ -1475,8 +1530,8 @@ TEST_F(TestTopologyServiceManager, test_CreateZone_success) { } TEST_F(TestTopologyServiceManager, test_CreateZone_AllocateIdFail) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; - ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(physicalPoolId, "poolname1"); ZoneRequest request; @@ -1494,6 +1549,7 @@ TEST_F(TestTopologyServiceManager, test_CreateZone_AllocateIdFail) { } TEST_F(TestTopologyServiceManager, test_CreateZone_PhysicalPoolNotFound) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId, "poolname1"); @@ -1510,6 +1566,7 @@ TEST_F(TestTopologyServiceManager, test_CreateZone_PhysicalPoolNotFound) { } TEST_F(TestTopologyServiceManager, test_CreateZone_InvalidParam) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId, "poolname1"); @@ -1522,6 +1579,7 @@ TEST_F(TestTopologyServiceManager, test_CreateZone_InvalidParam) { } TEST_F(TestTopologyServiceManager, test_CreateZone_AddZoneFail) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(physicalPoolId, "poolname1"); @@ -1544,6 +1602,7 @@ TEST_F(TestTopologyServiceManager, test_CreateZone_AddZoneFail) { } TEST_F(TestTopologyServiceManager, test_DeleteZone_Success) { + PrepareAddPoolset(); ZoneIdType zoneId = 0x21; PoolIdType poolId = 0x11; PrepareAddPhysicalPool(poolId); @@ -1564,6 +1623,7 @@ TEST_F(TestTopologyServiceManager, test_DeleteZone_Success) { } TEST_F(TestTopologyServiceManager, test_DeleteZone_ByNameSuccess) { + PrepareAddPoolset(); ZoneIdType zoneId = 0x21; PoolIdType poolId = 0x11; PrepareAddPhysicalPool(poolId, "pool1"); @@ -1585,6 +1645,7 @@ TEST_F(TestTopologyServiceManager, test_DeleteZone_ByNameSuccess) { } TEST_F(TestTopologyServiceManager, test_DeleteZone_ZoneNotFound) { + PrepareAddPoolset(); ZoneIdType zoneId = 0x21; PoolIdType poolId = 0x11; PrepareAddPhysicalPool(poolId); @@ -1602,6 +1663,7 @@ TEST_F(TestTopologyServiceManager, test_DeleteZone_ZoneNotFound) { } TEST_F(TestTopologyServiceManager, test_DeleteZone_ByNameFail) { + PrepareAddPoolset(); ZoneIdType zoneId = 0x21; PoolIdType poolId = 0x11; PrepareAddPhysicalPool(poolId, "pool1"); @@ -1620,6 +1682,7 @@ TEST_F(TestTopologyServiceManager, test_DeleteZone_ByNameFail) { } TEST_F(TestTopologyServiceManager, test_DeleteZone_InvalidParam) { + PrepareAddPoolset(); ZoneIdType zoneId = 0x21; PoolIdType poolId = 0x11; PrepareAddPhysicalPool(poolId); @@ -1628,7 +1691,6 @@ TEST_F(TestTopologyServiceManager, test_DeleteZone_InvalidParam) { poolId); ZoneRequest request; - ZoneResponse response; serviceManager_->DeleteZone(&request, &response); @@ -1636,6 +1698,7 @@ TEST_F(TestTopologyServiceManager, test_DeleteZone_InvalidParam) { } TEST_F(TestTopologyServiceManager, test_GetZone_Success) { + PrepareAddPoolset(); ZoneIdType zoneId = 0x21; PoolIdType poolId = 0x11; PrepareAddPhysicalPool(poolId); @@ -1659,6 +1722,7 @@ TEST_F(TestTopologyServiceManager, test_GetZone_Success) { } TEST_F(TestTopologyServiceManager, test_GetZone_ByNameSuccess) { + PrepareAddPoolset(); ZoneIdType zoneId = 0x21; PoolIdType poolId = 0x11; PrepareAddPhysicalPool(poolId, "pool1"); @@ -1683,6 +1747,7 @@ TEST_F(TestTopologyServiceManager, test_GetZone_ByNameSuccess) { } TEST_F(TestTopologyServiceManager, test_GetZone_ZoneNotFound) { + PrepareAddPoolset(); ZoneIdType zoneId = 0x21; PoolIdType poolId = 0x11; PrepareAddPhysicalPool(poolId); @@ -1702,6 +1767,7 @@ TEST_F(TestTopologyServiceManager, test_GetZone_ZoneNotFound) { } TEST_F(TestTopologyServiceManager, test_GetZone_ByNameZoneNotFound) { + PrepareAddPoolset(); ZoneIdType zoneId = 0x21; PoolIdType poolId = 0x11; PrepareAddPhysicalPool(poolId, "pool1"); @@ -1721,6 +1787,7 @@ TEST_F(TestTopologyServiceManager, test_GetZone_ByNameZoneNotFound) { } TEST_F(TestTopologyServiceManager, test_GetZone_InvalidParam) { + PrepareAddPoolset(); ZoneIdType zoneId = 0x21; PoolIdType poolId = 0x11; PrepareAddPhysicalPool(poolId); @@ -1739,6 +1806,7 @@ TEST_F(TestTopologyServiceManager, test_GetZone_InvalidParam) { } TEST_F(TestTopologyServiceManager, test_ListPoolZone_ByIdSuccess) { + PrepareAddPoolset(); ZoneIdType zoneId = 0x21; ZoneIdType zoneId2 = 0x22; PoolIdType poolId = 0x11; @@ -1774,6 +1842,7 @@ TEST_F(TestTopologyServiceManager, test_ListPoolZone_ByIdSuccess) { } TEST_F(TestTopologyServiceManager, test_ListPoolZone_ByNameSuccess) { + PrepareAddPoolset(); ZoneIdType zoneId = 0x21; ZoneIdType zoneId2 = 0x22; PoolIdType poolId = 0x11; @@ -1809,6 +1878,7 @@ TEST_F(TestTopologyServiceManager, test_ListPoolZone_ByNameSuccess) { } TEST_F(TestTopologyServiceManager, test_ListPoolZone_ByNameFail) { + PrepareAddPoolset(); ZoneIdType zoneId = 0x21; ZoneIdType zoneId2 = 0x22; PoolIdType poolId = 0x11; @@ -1833,6 +1903,7 @@ TEST_F(TestTopologyServiceManager, test_ListPoolZone_ByNameFail) { } TEST_F(TestTopologyServiceManager, test_ListPoolZone_PhysicalPoolNotFound) { + PrepareAddPoolset(); ZoneIdType zoneId = 0x21; ZoneIdType zoneId2 = 0x22; PoolIdType poolId = 0x11; @@ -1858,6 +1929,7 @@ TEST_F(TestTopologyServiceManager, test_ListPoolZone_PhysicalPoolNotFound) { } TEST_F(TestTopologyServiceManager, test_ListPoolZone_InvalidParam) { + PrepareAddPoolset(); ZoneIdType zoneId = 0x21; ZoneIdType zoneId2 = 0x22; PoolIdType poolId = 0x11; @@ -1884,11 +1956,16 @@ TEST_F(TestTopologyServiceManager, test_ListPoolZone_InvalidParam) { TEST_F(TestTopologyServiceManager, test_createPhysicalPool_Success) { PhysicalPoolRequest request; request.set_physicalpoolname("default"); + request.set_poolsetname("ssdPoolset1"); request.set_desc("just for test"); + PoolsetIdType poolsetId = 0x61; PoolIdType physicalPoolId = 0x12; + PrepareAddPoolset(poolsetId, "ssdPoolset1"); + EXPECT_CALL(*idGenerator_, GenPhysicalPoolId()) .WillOnce(Return(physicalPoolId)); + EXPECT_CALL(*storage_, StoragePhysicalPool(_)) .WillOnce(Return(true)); @@ -1900,10 +1977,13 @@ TEST_F(TestTopologyServiceManager, test_createPhysicalPool_Success) { ASSERT_EQ(physicalPoolId, response.physicalpoolinfo().physicalpoolid()); ASSERT_EQ(request.physicalpoolname(), response.physicalpoolinfo().physicalpoolname()); + ASSERT_EQ(poolsetId, response.physicalpoolinfo().poolsetid()); + ASSERT_EQ(request.poolsetname(), + response.physicalpoolinfo().poolsetname()); ASSERT_EQ(request.desc(), response.physicalpoolinfo().desc()); } -TEST_F(TestTopologyServiceManager, test_createPhysicalPool_InvalidParam) { +TEST_F(TestTopologyServiceManager, test_CreatePhysicalPool_InvalidParam) { PhysicalPoolRequest request; PhysicalPoolResponse response; serviceManager_->CreatePhysicalPool(&request, &response); @@ -1912,12 +1992,14 @@ TEST_F(TestTopologyServiceManager, test_createPhysicalPool_InvalidParam) { ASSERT_TRUE(false == response.has_physicalpoolinfo()); } -TEST_F(TestTopologyServiceManager, test_createPhysicalPool_Fail) { +TEST_F(TestTopologyServiceManager, test_CreatePhysicalPool_AllocateIdFail) { + PrepareAddPoolset(); PhysicalPoolRequest request; PhysicalPoolResponse response; request.set_physicalpoolname("default"); request.set_desc("just for test"); + request.set_poolsetname("testPoolset"); EXPECT_CALL(*idGenerator_, GenPhysicalPoolId()) .WillOnce(Return(UNINTIALIZE_ID)); @@ -1928,14 +2010,18 @@ TEST_F(TestTopologyServiceManager, test_createPhysicalPool_Fail) { ASSERT_TRUE(false == response.has_physicalpoolinfo()); } -TEST_F(TestTopologyServiceManager, test_createPhysicalPool_StorageFail) { +TEST_F(TestTopologyServiceManager, test_CreatePhysicalPool_StorageFail) { PhysicalPoolRequest request; PhysicalPoolResponse response; request.set_physicalpoolname("default"); + request.set_poolsetname("ssdPoolset1"); request.set_desc("just for test"); + PoolsetIdType poolsetId = 0x61; PoolIdType physicalPoolId = 0x12; + PrepareAddPoolset(poolsetId, "ssdPoolset1"); + EXPECT_CALL(*idGenerator_, GenPhysicalPoolId()) .WillOnce(Return(physicalPoolId)); EXPECT_CALL(*storage_, StoragePhysicalPool(_)) @@ -1944,15 +2030,57 @@ TEST_F(TestTopologyServiceManager, test_createPhysicalPool_StorageFail) { serviceManager_->CreatePhysicalPool(&request, &response); ASSERT_EQ(kTopoErrCodeStorgeFail, response.statuscode()); - ASSERT_TRUE(false == response.has_physicalpoolinfo()); + ASSERT_FALSE(response.has_physicalpoolinfo()); +} + +TEST_F(TestTopologyServiceManager, test_CreatePhysicalPool_PoolsetNotFound) { + PoolsetIdType poolsetId = 0x61; + PrepareAddPoolset(poolsetId, "ssdPoolset1"); + + PhysicalPoolRequest request; + request.set_poolsetname("ssd1"); + request.set_physicalpoolname("pool1"); + request.set_desc("desc1"); + + PhysicalPoolResponse response; + + serviceManager_->CreatePhysicalPool(&request, &response); + ASSERT_EQ(kTopoErrCodePoolsetNotFound, response.statuscode()); + ASSERT_FALSE(response.has_physicalpoolinfo()); +} + +TEST_F(TestTopologyServiceManager, + test_CreatePhysicalPool_AddPhysicalPoolFail) { + PoolsetIdType poolsetId = 0x61; + PoolIdType physicalPoolId = 0x11; + PrepareAddPoolset(poolsetId, "ssdPoolset1"); + + PhysicalPoolRequest request; + request.set_poolsetname("ssdPoolset1"); + request.set_physicalpoolname("poolname1"); + request.set_desc("desc1"); + + EXPECT_CALL(*idGenerator_, GenPhysicalPoolId()) + .WillOnce(Return(physicalPoolId)); + + EXPECT_CALL(*storage_, StoragePhysicalPool(_)) + .WillOnce(Return(false)); + + PhysicalPoolResponse response; + + serviceManager_->CreatePhysicalPool(&request, &response); + ASSERT_EQ(kTopoErrCodeStorgeFail, response.statuscode()); } TEST_F(TestTopologyServiceManager, test_DeletePhysicalPool_ByIdSuccess) { - PoolIdType pid = 0x12; + PoolIdType id = 0x12; + PoolsetIdType pid = 0x61; + PrepareAddPoolset(pid); + PrepareAddPhysicalPool(id, "default", pid); + PhysicalPoolRequest request; - request.set_physicalpoolid(pid); + request.set_physicalpoolid(id); - PrepareAddPhysicalPool(pid, "default"); EXPECT_CALL(*storage_, DeletePhysicalPool(_)) .WillOnce(Return(true)); @@ -1963,13 +2091,14 @@ TEST_F(TestTopologyServiceManager, test_DeletePhysicalPool_ByIdSuccess) { ASSERT_EQ(kTopoErrCodeSuccess, response.statuscode()); } -TEST_F(TestTopologyServiceManager, - test_DeletePhysicalPool_PhysicalPoolNotFound) { - PoolIdType pid = 0x12; - PhysicalPoolRequest request; - request.set_physicalpoolid(pid); +TEST_F(TestTopologyServiceManager, test_DeletePhysicalPooById_NotFound) { + PoolIdType id = 0x12; + PoolsetIdType pid = 0x61; + PrepareAddPoolset(pid); + PrepareAddPhysicalPool(id, "default"); - PrepareAddPhysicalPool(++pid, "default"); + PhysicalPoolRequest request; + request.set_physicalpoolid(++id); PhysicalPoolResponse response; serviceManager_->DeletePhysicalPool(&request, &response); @@ -1977,12 +2106,33 @@ TEST_F(TestTopologyServiceManager, ASSERT_EQ(kTopoErrCodePhysicalPoolNotFound, response.statuscode()); } +TEST_F(TestTopologyServiceManager, test_DeletePhysicalPooByName_Success) { + PrepareAddPoolset(); + PoolIdType id = 0x12; + PrepareAddPhysicalPool(id, "default"); + + PhysicalPoolRequest request; + request.set_physicalpoolname("default"); + request.set_poolsetname("testPoolset"); + + PhysicalPoolResponse response; + EXPECT_CALL(*storage_, DeletePhysicalPool(_)) + .WillOnce(Return(true)); + serviceManager_->DeletePhysicalPool(&request, &response); + + ASSERT_EQ(kTopoErrCodeSuccess, response.statuscode()) + << response.DebugString(); +} + TEST_F(TestTopologyServiceManager, test_DeletePhysicalPool_StorageFail) { - PoolIdType pid = 0x12; + PoolIdType id = 0x12; + PoolsetIdType pid = 0x61; + PrepareAddPoolset(pid); + PhysicalPoolRequest request; - request.set_physicalpoolid(pid); + request.set_physicalpoolid(id); - PrepareAddPhysicalPool(pid, "default"); + PrepareAddPhysicalPool(id, "default"); EXPECT_CALL(*storage_, DeletePhysicalPool(_)) .WillOnce(Return(false)); @@ -1993,11 +2143,13 @@ TEST_F(TestTopologyServiceManager, test_DeletePhysicalPool_StorageFail) { ASSERT_EQ(kTopoErrCodeStorgeFail, response.statuscode()); } -TEST_F(TestTopologyServiceManager, test_DeletePhysicalPool_ByNameSuccess) { +TEST_F(TestTopologyServiceManager, test_DeletePhysicalPoolByName_Success) { std::string physicalPoolName = "testpool1"; + std::string poolsetName = "ssdPoolset1"; PhysicalPoolRequest request; request.set_physicalpoolname(physicalPoolName); - + request.set_poolsetname(poolsetName); + PrepareAddPoolset(0x61, poolsetName); PrepareAddPhysicalPool(0x12, physicalPoolName); EXPECT_CALL(*storage_, DeletePhysicalPool(_)) @@ -2009,11 +2161,47 @@ TEST_F(TestTopologyServiceManager, test_DeletePhysicalPool_ByNameSuccess) { ASSERT_EQ(kTopoErrCodeSuccess, response.statuscode()); } -TEST_F(TestTopologyServiceManager, test_DeletePhysicalPool_ByNameFail) { +TEST_F(TestTopologyServiceManager, + test_DeletePhysicalPoolWithoutPoolsetName_Fail) { std::string physicalPoolName = "testpool1"; + std::string poolsetName = "ssd1"; + PhysicalPoolRequest request; + request.set_physicalpoolname("testpool1"); + PrepareAddPoolset(0x61, poolsetName); + PrepareAddPhysicalPool(0x12, physicalPoolName); + + PhysicalPoolResponse response; + serviceManager_->DeletePhysicalPool(&request, &response); + + ASSERT_EQ(kTopoErrCodeInvalidParam , response.statuscode()); +} + +TEST_F(TestTopologyServiceManager, + test_DeletePhysicalPoolwithWrongPhysicalName_Fail) { + std::string physicalPoolName = "testpool1"; + std::string poolsetName = "ssd1"; PhysicalPoolRequest request; request.set_physicalpoolname("testpool2"); + request.set_poolsetname("ssd1"); + + PrepareAddPoolset(0x61, "ssd1"); + PrepareAddPhysicalPool(0x12, physicalPoolName); + + PhysicalPoolResponse response; + serviceManager_->DeletePhysicalPool(&request, &response); + + ASSERT_EQ(kTopoErrCodePhysicalPoolNotFound, response.statuscode()); +} + +TEST_F(TestTopologyServiceManager, +test_DeletePhysicalPoolwithCorrectPhysicalNameAndWrongPoolsetName_Fail) { + std::string physicalPoolName = "testpool1"; + std::string psName = "ssd1"; + PhysicalPoolRequest request; + request.set_physicalpoolname("testpool1"); + request.set_poolsetname("ssd3"); + PrepareAddPoolset(0x61); PrepareAddPhysicalPool(0x12, physicalPoolName); PhysicalPoolResponse response; @@ -2022,7 +2210,9 @@ TEST_F(TestTopologyServiceManager, test_DeletePhysicalPool_ByNameFail) { ASSERT_EQ(kTopoErrCodePhysicalPoolNotFound, response.statuscode()); } -TEST_F(TestTopologyServiceManager, test_DeletePhysicalPool_InvalidParam) { +TEST_F(TestTopologyServiceManager, + test_DeletePhysicalPool_InvalidParam_WithoutPhysicalPoolName) { + PrepareAddPoolset(); std::string physicalPoolName = "testpool1"; PhysicalPoolRequest request; @@ -2035,6 +2225,7 @@ TEST_F(TestTopologyServiceManager, test_DeletePhysicalPool_InvalidParam) { } TEST_F(TestTopologyServiceManager, test_GetPhysicalPool_ByIdSuccess) { + PrepareAddPoolset(); PoolIdType pid = 0x12; std::string pName = "test1"; @@ -2054,6 +2245,7 @@ TEST_F(TestTopologyServiceManager, test_GetPhysicalPool_ByIdSuccess) { TEST_F(TestTopologyServiceManager, test_GetPhysicalPool_InvalidParam) { + PrepareAddPoolset(); PoolIdType pid = 0x12; PhysicalPoolRequest request; @@ -2067,6 +2259,7 @@ TEST_F(TestTopologyServiceManager, test_GetPhysicalPool_InvalidParam) { } TEST_F(TestTopologyServiceManager, test_GetPhysicalPool_PhysicalPoolNotFound) { + PrepareAddPoolset(); PoolIdType pid = 0x12; PhysicalPoolRequest request; request.set_physicalpoolid(pid); @@ -2081,6 +2274,7 @@ TEST_F(TestTopologyServiceManager, test_GetPhysicalPool_PhysicalPoolNotFound) { } TEST_F(TestTopologyServiceManager, test_GetPhysicalPool_ByNameSuccess) { + PrepareAddPoolset(); PoolIdType pid = 0x12; std::string pName = "test1"; @@ -2099,6 +2293,7 @@ TEST_F(TestTopologyServiceManager, test_GetPhysicalPool_ByNameSuccess) { } TEST_F(TestTopologyServiceManager, test_GetPhysicalPool_ByNameFail) { + PrepareAddPoolset(); PoolIdType pid = 0x12; std::string pName = "test1"; @@ -2116,7 +2311,7 @@ TEST_F(TestTopologyServiceManager, test_GetPhysicalPool_ByNameFail) { TEST_F(TestTopologyServiceManager, test_listPhysicalPool_success) { ListPhysicalPoolRequest request; ListPhysicalPoolResponse response; - + PrepareAddPoolset(); PrepareAddPhysicalPool(0x01, "test1"); PrepareAddPhysicalPool(0x02, "test2"); @@ -2129,10 +2324,15 @@ TEST_F(TestTopologyServiceManager, test_listPhysicalPool_success) { AnyOf(0x01, 0x02)); ASSERT_THAT(response.physicalpoolinfos(0).physicalpoolname(), AnyOf("test1", "test2")); + ASSERT_THAT(response.physicalpoolinfos(0).poolsetid(), 0x61); + ASSERT_THAT(response.physicalpoolinfos(0).poolsetname(), "testPoolset"); + ASSERT_THAT(response.physicalpoolinfos(1).physicalpoolid(), AnyOf(0x01, 0x02)); ASSERT_THAT(response.physicalpoolinfos(1).physicalpoolname(), AnyOf("test1", "test2")); + ASSERT_THAT(response.physicalpoolinfos(1).poolsetid(), 0x61); + ASSERT_THAT(response.physicalpoolinfos(1).poolsetname(), "testPoolset"); } static void CreateCopysetNodeFunc(::google::protobuf::RpcController *controller, @@ -2143,8 +2343,134 @@ static void CreateCopysetNodeFunc(::google::protobuf::RpcController *controller, brpc::ClosureGuard doneGuard(done); } +TEST_F(TestTopologyServiceManager, + test_ListPhysicalPoolsInPoolset_ByIdSuccess) { + PoolIdType poolId1 = 0x11; + PoolIdType poolId2 = 0x12; + PoolIdType poolId3 = 0x13; + PoolIdType poolId4 = 0x14; + PoolsetIdType poolsetId1 = 0x61; + PoolsetIdType poolsetId2 = 0x62; + + PrepareAddPoolset(poolsetId1, "ssd1"); + PrepareAddPoolset(poolsetId2, "ssd2"); + + PrepareAddPhysicalPool(poolId1, "pool1", poolsetId1, "desc1"); + PrepareAddPhysicalPool(poolId2, "pool2", poolsetId1, "desc2"); + PrepareAddPhysicalPool(poolId3, "pool3", poolsetId2, "desc3"); + PrepareAddPhysicalPool(poolId4, "pool4", poolsetId2, "desc4"); + + ListPhysicalPoolsInPoolsetRequest request; + request.add_poolsetid(poolsetId1); + request.add_poolsetid(poolsetId2); + + ListPhysicalPoolResponse response; + serviceManager_->ListPhysicalPoolsInPoolset(&request, &response); + + ASSERT_EQ(kTopoErrCodeSuccess, response.statuscode()); + ASSERT_EQ(4, response.physicalpoolinfos_size()); + + ASSERT_THAT(response.physicalpoolinfos(0).physicalpoolid(), + AnyOf(poolId1, poolId2, poolId3, poolId4)); + ASSERT_THAT(response.physicalpoolinfos(0).physicalpoolname(), + AnyOf("pool1", "pool2", "pool3", "pool4")); + ASSERT_THAT(response.physicalpoolinfos(0).poolsetid(), + AnyOf(poolsetId1, poolsetId2)); + ASSERT_THAT(response.physicalpoolinfos(0).poolsetname(), + AnyOf("ssd1", "ssd2")); + ASSERT_THAT(response.physicalpoolinfos(0).desc(), + AnyOf("desc1", "desc2", "desc3", "desc4")); + + ASSERT_THAT(response.physicalpoolinfos(1).physicalpoolid(), + AnyOf(poolId1, poolId2, poolId3, poolId4)); + ASSERT_THAT(response.physicalpoolinfos(1).physicalpoolname(), + AnyOf("pool1", "pool2", "pool3", "pool4")); + ASSERT_THAT(response.physicalpoolinfos(1).poolsetid(), + AnyOf(poolsetId1, poolsetId2)); + ASSERT_THAT(response.physicalpoolinfos(1).poolsetname(), + AnyOf("ssd1", "ssd2")); + ASSERT_THAT(response.physicalpoolinfos(1).desc(), + AnyOf("desc1", "desc2", "desc3", "desc4")); + + ASSERT_THAT(response.physicalpoolinfos(2).physicalpoolid(), + AnyOf(poolId1, poolId2, poolId3, poolId4)); + ASSERT_THAT(response.physicalpoolinfos(2).physicalpoolname(), + AnyOf("pool1", "pool2", "pool3", "pool4")); + ASSERT_THAT(response.physicalpoolinfos(2).poolsetid(), + AnyOf(poolsetId1, poolsetId2)); + ASSERT_THAT(response.physicalpoolinfos(2).poolsetname(), + AnyOf("ssd1", "ssd2")); + ASSERT_THAT(response.physicalpoolinfos(2).desc(), + AnyOf("desc1", "desc2", "desc3", "desc4")); + + ASSERT_THAT(response.physicalpoolinfos(3).physicalpoolid(), + AnyOf(poolId1, poolId2, poolId3, poolId4)); + ASSERT_THAT(response.physicalpoolinfos(3).physicalpoolname(), + AnyOf("pool1", "pool2", "pool3", "pool4")); + ASSERT_THAT(response.physicalpoolinfos(3).poolsetid(), + AnyOf(poolsetId1, poolsetId2)); + ASSERT_THAT(response.physicalpoolinfos(3).poolsetname(), + AnyOf("ssd1", "ssd2")); + ASSERT_THAT(response.physicalpoolinfos(3).desc(), + AnyOf("desc1", "desc2", "desc3", "desc4")); +} + +TEST_F(TestTopologyServiceManager, + test_ListPhysicalPoolsInPoolset_InvalidParam) { + PoolIdType poolId1 = 0x11; + PoolIdType poolId2 = 0x12; + PoolIdType poolId3 = 0x13; + PoolIdType poolId4 = 0x14; + PoolsetIdType poolsetId1 = 0x61; + PoolsetIdType poolsetId2 = 0x62; + + PrepareAddPoolset(poolsetId1, "ssd1"); + PrepareAddPoolset(poolsetId2, "ssd2"); + + PrepareAddPhysicalPool(poolId1, "pool1", poolsetId1, "desc1"); + PrepareAddPhysicalPool(poolId2, "pool2", poolsetId1, "desc2"); + PrepareAddPhysicalPool(poolId3, "pool3", poolsetId2, "desc3"); + PrepareAddPhysicalPool(poolId4, "pool4", poolsetId2, "desc4"); + + ListPhysicalPoolsInPoolsetRequest request; + + ListPhysicalPoolResponse response; + serviceManager_->ListPhysicalPoolsInPoolset(&request, &response); + + ASSERT_EQ(kTopoErrCodeInvalidParam, response.statuscode()) << + response.DebugString(); + ASSERT_EQ(0, response.physicalpoolinfos_size()); +} + +TEST_F(TestTopologyServiceManager, + test_ListPhysicalPoolsInPoolset_PoolsetNotFound) { + PoolIdType poolId1 = 0x11; + PoolIdType poolId2 = 0x12; + PoolIdType poolId3 = 0x13; + PoolIdType poolId4 = 0x14; + PoolsetIdType poolsetId1 = 0x61; + PoolsetIdType poolsetId2 = 0x62; + + PrepareAddPoolset(poolsetId1, "ssd1"); + PrepareAddPoolset(poolsetId2, "ssd2"); + + PrepareAddPhysicalPool(poolId1, "pool1", poolsetId1, "desc1"); + PrepareAddPhysicalPool(poolId2, "pool2", poolsetId1, "desc2"); + PrepareAddPhysicalPool(poolId3, "pool3", poolsetId2, "desc3"); + PrepareAddPhysicalPool(poolId4, "pool4", poolsetId2, "desc4"); + + ListPhysicalPoolsInPoolsetRequest request; + request.add_poolsetid(0x99); + + ListPhysicalPoolResponse response; + serviceManager_->ListPhysicalPoolsInPoolset(&request, &response); + + ASSERT_EQ(kTopoErrCodePoolsetNotFound, response.statuscode()); + ASSERT_EQ(0, response.physicalpoolinfos_size()); +} TEST_F(TestTopologyServiceManager, test_CreateLogicalPool_Success) { + PrepareAddPoolset(); PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); @@ -2203,6 +2529,7 @@ TEST_F(TestTopologyServiceManager, test_CreateLogicalPool_Success) { } TEST_F(TestTopologyServiceManager, test_CreateLogicalPool_ByNameSuccess) { + PrepareAddPoolset(); PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId, "pPool1"); @@ -2262,6 +2589,7 @@ TEST_F(TestTopologyServiceManager, test_CreateLogicalPool_ByNameSuccess) { TEST_F(TestTopologyServiceManager, test_CreateLogicalPool_PhysicalPoolNotFound) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); @@ -2280,6 +2608,7 @@ TEST_F(TestTopologyServiceManager, TEST_F(TestTopologyServiceManager, test_CreateLogicalPool_ByNamePhysicalPoolNotFound) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId, "pPool1"); @@ -2298,6 +2627,7 @@ TEST_F(TestTopologyServiceManager, TEST_F(TestTopologyServiceManager, test_CreateLogicalPool_InvalidParam) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); @@ -2314,6 +2644,7 @@ TEST_F(TestTopologyServiceManager, } TEST_F(TestTopologyServiceManager, test_DeleteLogicalPool_ByIdSuccess) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); PoolIdType id = 0x01; @@ -2331,6 +2662,7 @@ TEST_F(TestTopologyServiceManager, test_DeleteLogicalPool_ByIdSuccess) { } TEST_F(TestTopologyServiceManager, test_DeleteLogicalPool_ByNameSuccess) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId, "physicalpoolname"); PoolIdType id = 0x01; @@ -2349,6 +2681,7 @@ TEST_F(TestTopologyServiceManager, test_DeleteLogicalPool_ByNameSuccess) { } TEST_F(TestTopologyServiceManager, test_DeleteLogicalPool_LogicalPoolNotFound) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); PoolIdType id = 0x01; @@ -2363,6 +2696,7 @@ TEST_F(TestTopologyServiceManager, test_DeleteLogicalPool_LogicalPoolNotFound) { } TEST_F(TestTopologyServiceManager, test_DeleteLogicalPool_ByNameFail) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId, "physicalpoolname"); PoolIdType id = 0x01; @@ -2378,6 +2712,7 @@ TEST_F(TestTopologyServiceManager, test_DeleteLogicalPool_ByNameFail) { } TEST_F(TestTopologyServiceManager, test_DeleteLogicalPool_InvalidParam) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); PoolIdType id = 0x01; @@ -2390,8 +2725,9 @@ TEST_F(TestTopologyServiceManager, test_DeleteLogicalPool_InvalidParam) { } TEST_F(TestTopologyServiceManager, test_GetLogicalPool_ByIdSuccess) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; - PrepareAddPhysicalPool(physicalPoolId); + PrepareAddPhysicalPool(physicalPoolId, "testPhysicalPool"); PoolIdType id = 0x01; PrepareAddLogicalPool(id, "name", physicalPoolId, PAGEFILE); @@ -2410,6 +2746,7 @@ TEST_F(TestTopologyServiceManager, test_GetLogicalPool_ByIdSuccess) { TEST_F(TestTopologyServiceManager, test_GetLogicalPool_ByNameSuccess) { PoolIdType physicalPoolId = 0x11; + PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId, "physicalpoolname"); PoolIdType id = 0x01; PrepareAddLogicalPool(id, "name", physicalPoolId, PAGEFILE); @@ -2429,6 +2766,7 @@ TEST_F(TestTopologyServiceManager, test_GetLogicalPool_ByNameSuccess) { } TEST_F(TestTopologyServiceManager, test_GetLogicalPool_LogicalPoolNotFound) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); PoolIdType id = 0x01; @@ -2444,8 +2782,10 @@ TEST_F(TestTopologyServiceManager, test_GetLogicalPool_LogicalPoolNotFound) { } TEST_F(TestTopologyServiceManager, test_GetLogicalPool_ByNameFail) { + PoolsetIdType pid = 0x61; PoolIdType physicalPoolId = 0x11; - PrepareAddPhysicalPool(physicalPoolId, "physicalpoolname"); + PrepareAddPoolset(pid); + PrepareAddPhysicalPool(physicalPoolId, "physicalpoolname", pid); PoolIdType id = 0x01; PrepareAddLogicalPool(id, "name", physicalPoolId, PAGEFILE); @@ -2459,8 +2799,10 @@ TEST_F(TestTopologyServiceManager, test_GetLogicalPool_ByNameFail) { } TEST_F(TestTopologyServiceManager, test_GetLogicalPool_InvalidParam) { + PoolsetIdType pid = 0x61; PoolIdType physicalPoolId = 0x11; - PrepareAddPhysicalPool(physicalPoolId); + PrepareAddPoolset(pid); + PrepareAddPhysicalPool(physicalPoolId, "testPhysicalPool", pid); PoolIdType id = 0x01; PrepareAddLogicalPool(id, "name", physicalPoolId, PAGEFILE); @@ -2473,8 +2815,10 @@ TEST_F(TestTopologyServiceManager, test_GetLogicalPool_InvalidParam) { } TEST_F(TestTopologyServiceManager, test_ListLogicalPool_ByIdSuccess) { + PoolsetIdType pid = 0x61; PoolIdType physicalPoolId = 0x11; - PrepareAddPhysicalPool(physicalPoolId); + PrepareAddPoolset(pid); + PrepareAddPhysicalPool(physicalPoolId, "testPhysicalPool", pid); PoolIdType id = 0x01; PoolIdType id2 = 0x02; PrepareAddLogicalPool(id, "name", physicalPoolId, PAGEFILE); @@ -2504,6 +2848,7 @@ TEST_F(TestTopologyServiceManager, test_ListLogicalPool_ByIdSuccess) { } TEST_F(TestTopologyServiceManager, test_ListLogicalPool_ByNameSuccess) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId, "physicalPoolName"); PoolIdType id = 0x01; @@ -2535,6 +2880,7 @@ TEST_F(TestTopologyServiceManager, test_ListLogicalPool_ByNameSuccess) { } TEST_F(TestTopologyServiceManager, test_ListLogicalPool_PhysicalPoolNotFound) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); PoolIdType id = 0x01; @@ -2553,6 +2899,7 @@ TEST_F(TestTopologyServiceManager, test_ListLogicalPool_PhysicalPoolNotFound) { } TEST_F(TestTopologyServiceManager, test_ListLogicalPool_ByNameFail) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId, "physicalPoolName"); PoolIdType id = 0x01; @@ -2570,6 +2917,7 @@ TEST_F(TestTopologyServiceManager, test_ListLogicalPool_ByNameFail) { } TEST_F(TestTopologyServiceManager, test_ListLogicalPool_InvalidParam) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); PoolIdType id = 0x01; @@ -2586,6 +2934,7 @@ TEST_F(TestTopologyServiceManager, test_ListLogicalPool_InvalidParam) { } TEST_F(TestTopologyServiceManager, test_SetLogicalPool_success) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); PoolIdType id = 0x01; @@ -2604,6 +2953,7 @@ TEST_F(TestTopologyServiceManager, test_SetLogicalPool_success) { } TEST_F(TestTopologyServiceManager, test_SetLogicalPool_LogicalPoolNotFound) { + PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); PoolIdType id = 0x01; @@ -2619,9 +2969,11 @@ TEST_F(TestTopologyServiceManager, test_SetLogicalPool_LogicalPoolNotFound) { } TEST_F(TestTopologyServiceManager, TestSetLogicalPoolScanState) { + PoolsetIdType pid = 0x61; PoolIdType ppid = 1; // physicalPoolId PoolIdType lpid = 1; // logicalPoolId - PrepareAddPhysicalPool(ppid); + PrepareAddPoolset(pid); + PrepareAddPhysicalPool(ppid, "testPhysicalPool", pid); PrepareAddLogicalPool(lpid, "name", ppid); SetLogicalPoolScanStateRequest request; @@ -2643,13 +2995,186 @@ TEST_F(TestTopologyServiceManager, TestSetLogicalPoolScanState) { ASSERT_EQ(response.statuscode(), kTopoErrCodeSuccess); } + +TEST_F(TestTopologyServiceManager, test_createPoolset_Success) { + PoolsetRequest request; + request.set_poolsetname("default"); + request.set_type("SSD"); + request.set_desc("just for test"); + + PoolsetIdType poolsetId = 0x12; + EXPECT_CALL(*idGenerator_, GenPoolsetId()) + .WillOnce(Return(poolsetId)); + EXPECT_CALL(*storage_, StoragePoolset(_)) + .WillOnce(Return(true)); + + PoolsetResponse response; + serviceManager_->CreatePoolset(&request, &response); + + ASSERT_EQ(kTopoErrCodeSuccess, response.statuscode()); + ASSERT_TRUE(response.has_poolsetinfo()); + ASSERT_EQ(poolsetId, response.poolsetinfo().poolsetid()); + ASSERT_EQ(request.poolsetname(), response.poolsetinfo().poolsetname()); + ASSERT_EQ(request.desc(), response.poolsetinfo().desc()); +} + +TEST_F(TestTopologyServiceManager, test_createPoolset_InvalidParam) { + PoolsetRequest request; + PoolsetResponse response; + serviceManager_->CreatePoolset(&request, &response); + + ASSERT_EQ(kTopoErrCodeInvalidParam, response.statuscode()); + ASSERT_FALSE(response.has_poolsetinfo()); +} + +TEST_F(TestTopologyServiceManager, test_GetPoolsetById_Success) { + PoolsetIdType id = 0x61; + std::string name = "ssdPoolset1"; + + PoolsetRequest request; + request.set_poolsetid(id); + + PrepareAddPoolset(id, name); + + PoolsetResponse response; + serviceManager_->GetPoolset(&request, &response); + + ASSERT_EQ(kTopoErrCodeSuccess, response.statuscode()); + ASSERT_TRUE(response.has_poolsetinfo()); + ASSERT_EQ(id, response.poolsetinfo().poolsetid()); + ASSERT_EQ(name, response.poolsetinfo().poolsetname()); + ASSERT_EQ("SSD", response.poolsetinfo().type()); +} + +TEST_F(TestTopologyServiceManager, test_GetPoolsetByInvalidParam) { + PrepareAddPoolset(0x61, "ssdPoolset1"); + PoolsetRequest request; + + PoolsetResponse response; + serviceManager_->GetPoolset(&request, &response); + + ASSERT_EQ(kTopoErrCodeInvalidParam, response.statuscode()); + ASSERT_FALSE(response.has_poolsetinfo()); +} + +TEST_F(TestTopologyServiceManager, test_GetPoolsetById_NotFound) { + PrepareAddPoolset(0x61, "ssdPoolset1"); + PoolsetRequest request; + PoolsetIdType id = 0x01; + request.set_poolsetid(id); + request.set_poolsetname("ssdPoolset1"); + + PoolsetResponse response; + serviceManager_->GetPoolset(&request, &response); + + ASSERT_EQ(kTopoErrCodePoolsetNotFound, response.statuscode()); + ASSERT_FALSE(response.has_poolsetinfo()); +} + +TEST_F(TestTopologyServiceManager, test_GetPoolsetByName_NotFound) { + PrepareAddPoolset(0x61, "ssdPoolset1"); + + PoolsetRequest request; + + request.set_poolsetname("default"); + + PoolsetResponse response; + serviceManager_->GetPoolset(&request, &response); + + ASSERT_EQ(kTopoErrCodePoolsetNotFound, response.statuscode()); + ASSERT_FALSE(response.has_poolsetinfo()); +} + +TEST_F(TestTopologyServiceManager, test_ListPoolset_success) { + ListPoolsetRequest request; + ListPoolsetResponse response; + + PrepareAddPoolset(0x12, "test1"); + PrepareAddPoolset(0x13, "test2"); + + serviceManager_->ListPoolset(&request, &response); + + ASSERT_EQ(kTopoErrCodeSuccess, response.statuscode()); + ASSERT_EQ(2, response.poolsetinfos_size()); + + ASSERT_THAT(response.poolsetinfos(0).poolsetid(), + AnyOf(0x12, 0x13)); + ASSERT_THAT(response.poolsetinfos(0).poolsetname(), + AnyOf("test1", "test2")); + ASSERT_THAT(response.poolsetinfos(1).poolsetid(), + AnyOf(0x12, 0x13)); + ASSERT_THAT(response.poolsetinfos(1).poolsetname(), + AnyOf("test1", "test2")); +} + +TEST_F(TestTopologyServiceManager, test_DeletePoolsetById_Success) { + PoolsetIdType pid = 0x61; + PoolsetRequest request; + request.set_poolsetid(pid); + + PrepareAddPoolset(pid, "default"); + + EXPECT_CALL(*storage_, DeletePoolset(_)) + .WillOnce(Return(true)); + + PoolsetResponse response; + serviceManager_->DeletePoolset(&request, &response); + + ASSERT_EQ(kTopoErrCodeSuccess, response.statuscode()); +} + +TEST_F(TestTopologyServiceManager, test_DeletePoolset_IdNotFound) { + PoolsetIdType id = 0x12; + PoolsetRequest request; + request.set_poolsetid(id); + + PrepareAddPoolset(++id, "default"); + + PoolsetResponse response; + serviceManager_->DeletePoolset(&request, &response); + + ASSERT_EQ(kTopoErrCodePoolsetNotFound, response.statuscode()); +} + +TEST_F(TestTopologyServiceManager, test_DeletePoolset_StorageSuccess) { + PoolsetIdType id = 0x61; + PoolsetRequest request; + request.set_poolsetid(id); + + PrepareAddPoolset(id); + + EXPECT_CALL(*storage_, DeletePoolset(_)).WillOnce(Return(true)); + + PoolsetResponse response; + serviceManager_->DeletePoolset(&request, &response); + + ASSERT_EQ(kTopoErrCodeSuccess, response.statuscode()); +} + +TEST_F(TestTopologyServiceManager, test_DeletePoolset_StorageFail) { + PoolsetIdType id = 0x61; + PoolsetRequest request; + request.set_poolsetid(id); + + PrepareAddPoolset(id); + + EXPECT_CALL(*storage_, DeletePoolset(_)) + .WillOnce(Return(false)); + + PoolsetResponse response; + serviceManager_->DeletePoolset(&request, &response); + + ASSERT_EQ(kTopoErrCodeStorgeFail, response.statuscode()); +} + TEST_F(TestTopologyServiceManager, test_GetChunkServerListInCopySets_success) { PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; CopySetIdType copysetId = 0x51; - PrepareAddPhysicalPool(physicalPoolId); + PrepareAddPoolset(0x61), + PrepareAddPhysicalPool(physicalPoolId, "testPhysicalPool", 0x61); PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); @@ -2686,12 +3211,14 @@ TEST_F(TestTopologyServiceManager, TEST_F(TestTopologyServiceManager, test_GetChunkServerListInCopySets_CopysetNotFound) { - + PoolsetIdType pid = 0x61; PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; CopySetIdType copysetId = 0x51; - PrepareAddPhysicalPool(physicalPoolId); + PrepareAddPoolset(pid); + PrepareAddPhysicalPool(physicalPoolId, + "testPhysicalPool", pid); PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); @@ -2719,7 +3246,7 @@ TEST_F(TestTopologyServiceManager, TEST_F(TestTopologyServiceManager, test_GetChunkServerListInCopySets_InternalError) { - + PrepareAddPoolset(); PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; CopySetIdType copysetId = 0x51; @@ -2752,10 +3279,13 @@ TEST_F(TestTopologyServiceManager, TEST_F(TestTopologyServiceManager, test_GetCopySetsInChunkServer_ByIdSuccess) { + PoolsetIdType pid = 0x61; PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; - PrepareAddPhysicalPool(physicalPoolId); + PrepareAddPoolset(pid); + PrepareAddPhysicalPool(physicalPoolId, + "testPhysicalPool", pid); PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); @@ -2789,6 +3319,7 @@ TEST_F(TestTopologyServiceManager, TEST_F(TestTopologyServiceManager, test_GetCopySetsInChunkServer_ByIpSuccess) { + PrepareAddPoolset(); PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; @@ -2836,6 +3367,7 @@ TEST_F(TestTopologyServiceManager, TEST_F(TestTopologyServiceManager, test_GetCopySetsInChunkServer_ByIdChunkserverNotFound) { + PrepareAddPoolset(); PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; @@ -2857,10 +3389,12 @@ TEST_F(TestTopologyServiceManager, TEST_F(TestTopologyServiceManager, test_GetCopySetsInChunkServer_ByIpChunkserverNotFound) { + PoolsetIdType poolsetId = 0x61; PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; - PrepareAddPhysicalPool(physicalPoolId); + PrepareAddPoolset(poolsetId); + PrepareAddPhysicalPool(physicalPoolId, "testPool", poolsetId); PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddServer(0x31, "server1", "10.187.0.1", "10.187.0.1", 0x21, 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "10.187.0.1"); @@ -2889,6 +3423,7 @@ TEST_F(TestTopologyServiceManager, } TEST_F(TestTopologyServiceManager, TestGetCopySetsInCluster) { + PrepareAddPoolset(); PoolIdType ppid1 = 1, ppid2 = 2; // physical pool id PoolIdType lpid1 = 1, lpid2 = 2; // logical pool id PrepareAddPhysicalPool(ppid1); @@ -2938,6 +3473,7 @@ TEST_F(TestTopologyServiceManager, TestGetCopySetsInCluster) { } TEST_F(TestTopologyServiceManager, TestGetCopyset) { + PrepareAddPoolset(); PoolIdType ppid = 1; // physical pool id PoolIdType lpid = 1; // logical pool id PrepareAddPhysicalPool(ppid); @@ -2981,9 +3517,11 @@ TEST_F(TestTopologyServiceManager, TestGetCopyset) { } TEST_F(TestTopologyServiceManager, test_SetCopysetsAvailFlag) { + PoolsetIdType poolsetId = 0x61; PoolIdType logicalPoolId1 = 0x1; PoolIdType physicalPoolId1 = 0x11; - PrepareAddPhysicalPool(physicalPoolId1); + PrepareAddPoolset(poolsetId); + PrepareAddPhysicalPool(physicalPoolId1, "testPool", poolsetId); PrepareAddLogicalPool(logicalPoolId1, "logicalPool1", physicalPoolId1); std::set replicas; replicas.insert(0x41); @@ -3060,7 +3598,9 @@ TEST_F(TestTopologyServiceManager, test_SetCopysetsAvailFlag) { TEST_F(TestTopologyServiceManager, test_ListUnAvailCopySets) { PoolIdType logicalPoolId1 = 0x1; PoolIdType physicalPoolId1 = 0x11; - PrepareAddPhysicalPool(physicalPoolId1); + PoolsetIdType poolsetId = 0x61; + PrepareAddPoolset(poolsetId); + PrepareAddPhysicalPool(physicalPoolId1, "testPool", poolsetId); PrepareAddLogicalPool(logicalPoolId1, "logicalPool1", physicalPoolId1); std::set replicas; replicas.insert(0x41); diff --git a/test/mds/topology/test_topology_stat.cpp b/test/mds/topology/test_topology_stat.cpp index 9b930f5fcc..29af525410 100644 --- a/test/mds/topology/test_topology_stat.cpp +++ b/test/mds/topology/test_topology_stat.cpp @@ -114,7 +114,6 @@ TEST_F(TestTopologyStat, TestUpdateAndGetChunkServerStat) { cstat3.writeIOPS = 3; stat3.copysetStats.push_back(cstat3); - PoolIdType pPid = 2; EXPECT_CALL(*topology_, GetBelongPhysicalPoolId(_, _)) .WillRepeatedly(DoAll(SetArgPointee<1>(2), Return(kTopoErrCodeSuccess))); diff --git a/test/mds/topology/test_topology_storage_codec.cpp b/test/mds/topology/test_topology_storage_codec.cpp index e30bb86f6b..981c38b2fa 100644 --- a/test/mds/topology/test_topology_storage_codec.cpp +++ b/test/mds/topology/test_topology_storage_codec.cpp @@ -65,9 +65,23 @@ TEST(TestTopologyStorageCodec, ASSERT_TRUE(JudgeLogicalPoolEqual(data, out)); } +TEST(TestTopologyStorageCodec, + TestPoolsetEncodeDecodeEqual) { + Poolset data(0x61, "ssdPoolset1", "SSD", "desc"); + + TopologyStorageCodec testObj; + std::string value; + ASSERT_TRUE(testObj.EncodePoolsetData(data, &value)); + + Poolset out; + ASSERT_TRUE(testObj.DecodePoolsetData(value, &out)); + + ASSERT_TRUE(JudgePoolsetEqual(data, out)); +} + TEST(TestTopologyStorageCodec, TestPhysicalPoolEncodeDecodeEqual) { - PhysicalPool data(0x21, "pPool", "desc"); + PhysicalPool data(0x21, "pPool", 0x61, "desc"); TopologyStorageCodec testObj; std::string value; diff --git a/test/mds/topology/test_topology_storage_etcd.cpp b/test/mds/topology/test_topology_storage_etcd.cpp index 8a65f972f8..e896abb2a0 100644 --- a/test/mds/topology/test_topology_storage_etcd.cpp +++ b/test/mds/topology/test_topology_storage_etcd.cpp @@ -161,7 +161,7 @@ TEST_F(TestTopologyStorageEtcd, test_LoadLogicalPool_IdDuplicated) { } TEST_F(TestTopologyStorageEtcd, test_LoadPhysicalPool_success) { - PhysicalPool data(0x21, "pPool", "desc"); + PhysicalPool data(0x21, "pPool", 0x61, "desc"); std::string key = codec_->EncodePhysicalPoolKey(data.GetId()); std::string value; @@ -214,7 +214,7 @@ TEST_F(TestTopologyStorageEtcd, test_LoadPhysicalPool_success_decodeError) { } TEST_F(TestTopologyStorageEtcd, test_LoadPhysicalPool_IdDuplicated) { - PhysicalPool data(0x21, "pPool", "desc"); + PhysicalPool data(0x21, "pPool", 0x61, "desc"); std::string key = codec_->EncodePhysicalPoolKey(data.GetId()); std::string value; @@ -565,6 +565,42 @@ TEST_F(TestTopologyStorageEtcd, test_LoadCopyset_IdDuplicated) { ASSERT_FALSE(ret); } +TEST_F(TestTopologyStorageEtcd, test_StoragePoolset_success) { + Poolset data(0x21, "ssdPoolset1", "SSD", "desc"); + + EXPECT_CALL(*kvStorageClient_, Put(_, _)) + .WillOnce(Return(EtcdErrCode::EtcdOK)); + + bool ret = storage_->StoragePoolset(data); + ASSERT_TRUE(ret); +} + +TEST_F(TestTopologyStorageEtcd, test_StoragePoolset_putInfoEtcdFail) { + Poolset data(0x21, "ssdPoolset1", "SSD", "desc"); + + EXPECT_CALL(*kvStorageClient_, Put(_, _)) + .WillOnce(Return(EtcdErrCode::EtcdUnknown)); + + bool ret = storage_->StoragePoolset(data); + ASSERT_FALSE(ret); +} + +TEST_F(TestTopologyStorageEtcd, test_DeletePoolset_success) { + EXPECT_CALL(*kvStorageClient_, Delete(_)) + .WillOnce(Return(EtcdErrCode::EtcdOK)); + + bool ret = storage_->DeletePoolset(0x61); + ASSERT_TRUE(ret); +} + +TEST_F(TestTopologyStorageEtcd, test_DeletePoolset_fail) { + EXPECT_CALL(*kvStorageClient_, Delete(_)) + .WillOnce(Return(EtcdErrCode::EtcdUnknown)); + + bool ret = storage_->DeletePoolset(0x61); + ASSERT_FALSE(ret); +} + TEST_F(TestTopologyStorageEtcd, test_StotageLogicalPool_success) { LogicalPool::RedundanceAndPlaceMentPolicy rap; rap.pageFileRAP.replicaNum = 3; @@ -602,7 +638,7 @@ TEST_F(TestTopologyStorageEtcd, test_StotageLogicalPool_putInfoEtcdFail) { } TEST_F(TestTopologyStorageEtcd, test_StotagePhysicalPool_success) { - PhysicalPool data(0x21, "pPool", "desc"); + PhysicalPool data(0x21, "pPool", 0x61, "desc"); EXPECT_CALL(*kvStorageClient_, Put(_, _)) .WillOnce(Return(EtcdErrCode::EtcdOK)); @@ -612,7 +648,7 @@ TEST_F(TestTopologyStorageEtcd, test_StotagePhysicalPool_success) { } TEST_F(TestTopologyStorageEtcd, test_StotagePhysicalPool_putInfoEtcdFail) { - PhysicalPool data(0x21, "pPool", "desc"); + PhysicalPool data(0x21, "pPool", 0x61, "desc"); EXPECT_CALL(*kvStorageClient_, Put(_, _)) .WillOnce(Return(EtcdErrCode::EtcdUnknown)); @@ -889,4 +925,3 @@ TEST_F(TestTopologyStorageEtcd, test_StotageClusterInfo_fail) { } // namespace topology } // namespace mds } // namespace curve - diff --git a/test/snapshotcloneserver/mock_snapshot_server.h b/test/snapshotcloneserver/mock_snapshot_server.h index ac63d7d1f9..02b867673b 100644 --- a/test/snapshotcloneserver/mock_snapshot_server.h +++ b/test/snapshotcloneserver/mock_snapshot_server.h @@ -195,7 +195,7 @@ class MockCurveFsClient : public CurveFsClient { int(const ChunkIDInfo &cidinfo, ChunkInfoDetail *chunkInfo)); - MOCK_METHOD9(CreateCloneFile, + MOCK_METHOD10(CreateCloneFile, int(const std::string &source, const std::string &filename, const std::string &user, @@ -204,6 +204,7 @@ class MockCurveFsClient : public CurveFsClient { uint32_t chunkSize, uint64_t stripeUnit, uint64_t stripeCount, + const std::string& poolset, FInfo* fileInfo)); MOCK_METHOD6(CreateCloneChunk, @@ -309,10 +310,11 @@ class MockCloneServiceManager : public CloneServiceManager { CloneServiceManager(nullptr, nullptr, nullptr) {} ~MockCloneServiceManager() {} - MOCK_METHOD6(CloneFile, + MOCK_METHOD7(CloneFile, int(const UUID &source, const std::string &user, const std::string &destination, + const std::string &poolset, bool lazyFlag, std::shared_ptr entity, TaskIdType *taskId)); @@ -362,12 +364,13 @@ class MockCloneServiceManager : public CloneServiceManager { class MockCloneCore : public CloneCore { public: - MOCK_METHOD6(CloneOrRecoverPre, + MOCK_METHOD7(CloneOrRecoverPre, int(const UUID &source, const std::string &user, const std::string &destination, bool lazyFlag, CloneTaskType taskType, + std::string poolset, CloneInfo *info)); MOCK_METHOD1(HandleCloneOrRecoverTask, diff --git a/test/snapshotcloneserver/test_clone_core.cpp b/test/snapshotcloneserver/test_clone_core.cpp index 25265ecb6a..f57c2d15c0 100644 --- a/test/snapshotcloneserver/test_clone_core.cpp +++ b/test/snapshotcloneserver/test_clone_core.cpp @@ -43,6 +43,8 @@ using ::testing::DoAll; namespace curve { namespace snapshotcloneserver { +static const char* kDefaultPoolset = "poolset"; + class TestCloneCoreImpl : public ::testing::Test { public: TestCloneCoreImpl() {} @@ -178,7 +180,7 @@ TEST_F(TestCloneCoreImpl, TestClonePreForSnapSuccess) { int ret = core_->CloneOrRecoverPre( source, user, destination, lazyFlag, - CloneTaskType::kClone, &cloneInfoOut); + CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(1, core_->GetSnapshotRef()->GetSnapshotRef(source)); @@ -198,6 +200,7 @@ TEST_F(TestCloneCoreImpl, TestClonePreForSnapTaskExist) { CloneTaskType::kClone, source, destination, + kDefaultPoolset, 100, 101, 0, @@ -213,7 +216,7 @@ TEST_F(TestCloneCoreImpl, TestClonePreForSnapTaskExist) { int ret = core_->CloneOrRecoverPre( source, user, destination, lazyFlag, - CloneTaskType::kClone, &cloneInfoOut); + CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); ASSERT_EQ(kErrCodeTaskExist, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); @@ -233,6 +236,7 @@ TEST_F(TestCloneCoreImpl, TestClonePreForSnapFailOnFileExist) { CloneTaskType::kRecover, source, destination, + "", 100, 101, 0, @@ -248,7 +252,7 @@ TEST_F(TestCloneCoreImpl, TestClonePreForSnapFailOnFileExist) { int ret = core_->CloneOrRecoverPre( source, user, destination, lazyFlag, - CloneTaskType::kClone, &cloneInfoOut); + CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); ASSERT_EQ(kErrCodeFileExist, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); @@ -277,6 +281,7 @@ TEST_F(TestCloneCoreImpl, TestClonePreForFileSuccess) { fInfo.owner = "user1"; fInfo.filename = "file1"; fInfo.filestatus = FileStatus::Created; + fInfo.poolset = kDefaultPoolset; EXPECT_CALL(*client_, GetFileInfo(source, option.mdsRootUser, _)) .WillOnce(DoAll(SetArgPointee<2>(fInfo), Return(LIBCURVE_ERROR::OK))); @@ -286,7 +291,7 @@ TEST_F(TestCloneCoreImpl, TestClonePreForFileSuccess) { int ret = core_->CloneOrRecoverPre( source, user, destination, lazyFlag, - CloneTaskType::kClone, &cloneInfoOut); + CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); @@ -312,7 +317,7 @@ TEST_F(TestCloneCoreImpl, TestClonePreForSnapInvalidSnapshot) { int ret = core_->CloneOrRecoverPre( source, user, destination, lazyFlag, - CloneTaskType::kClone, &cloneInfoOut); + CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); ASSERT_EQ(kErrCodeInvalidSnapshot, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); @@ -337,7 +342,7 @@ TEST_F(TestCloneCoreImpl, TestClonePreForSnapInvalidUser) { int ret = core_->CloneOrRecoverPre( source, user, destination, lazyFlag, - CloneTaskType::kClone, &cloneInfoOut); + CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); ASSERT_EQ(kErrCodeInvalidUser, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); @@ -364,7 +369,7 @@ TEST_F(TestCloneCoreImpl, TestClonePreAddCloneInfoFail) { int ret = core_->CloneOrRecoverPre( source, user, destination, lazyFlag, - CloneTaskType::kClone, &cloneInfoOut); + CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); ASSERT_EQ(kErrCodeInternalError, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); @@ -387,7 +392,7 @@ TEST_F(TestCloneCoreImpl, TestClonePreForFileNotExist) { int ret = core_->CloneOrRecoverPre( source, user, destination, lazyFlag, - CloneTaskType::kClone, &cloneInfoOut); + CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); ASSERT_EQ(kErrCodeFileNotExist, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); @@ -410,7 +415,7 @@ TEST_F(TestCloneCoreImpl, TestClonePreForFileFail) { int ret = core_->CloneOrRecoverPre( source, user, destination, lazyFlag, - CloneTaskType::kClone, &cloneInfoOut); + CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); ASSERT_EQ(kErrCodeInternalError, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); @@ -428,7 +433,7 @@ TEST_F(TestCloneCoreImpl, TestClonePreDestinationExist) { int ret = core_->CloneOrRecoverPre( source, user, destination, lazyFlag, - CloneTaskType::kClone, &cloneInfoOut); + CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); ASSERT_EQ(kErrCodeFileExist, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); @@ -448,6 +453,7 @@ TEST_F(TestCloneCoreImpl, TestClonePreDestinationAndTaskExist) { CloneTaskType::kClone, source, destination, + kDefaultPoolset, 100, destId, 0, @@ -470,7 +476,7 @@ TEST_F(TestCloneCoreImpl, TestClonePreDestinationAndTaskExist) { int ret = core_->CloneOrRecoverPre( source, user, destination, lazyFlag, - CloneTaskType::kClone, &cloneInfoOut); + CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); ASSERT_EQ(kErrCodeTaskExist, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); @@ -490,6 +496,7 @@ TEST_F(TestCloneCoreImpl, TestClonePreDestinationExistButInodeidNotEqual) { CloneTaskType::kClone, source, destination, + kDefaultPoolset, 100, destId, 0, @@ -512,7 +519,7 @@ TEST_F(TestCloneCoreImpl, TestClonePreDestinationExistButInodeidNotEqual) { int ret = core_->CloneOrRecoverPre( source, user, destination, lazyFlag, - CloneTaskType::kClone, &cloneInfoOut); + CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); ASSERT_EQ(kErrCodeFileExist, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); @@ -530,7 +537,7 @@ TEST_F(TestCloneCoreImpl, TestRecoverPreDestinationNotExist) { int ret = core_->CloneOrRecoverPre( source, user, destination, lazyFlag, - CloneTaskType::kRecover, &cloneInfoOut); + CloneTaskType::kRecover, "", &cloneInfoOut); ASSERT_EQ(kErrCodeFileNotExist, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); @@ -558,7 +565,7 @@ TEST_F(TestCloneCoreImpl, TestRecoverPreForSnapSuccess) { int ret = core_->CloneOrRecoverPre( source, user, destination, lazyFlag, - CloneTaskType::kRecover, &cloneInfoOut); + CloneTaskType::kRecover, "", &cloneInfoOut); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(1, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); @@ -583,7 +590,7 @@ TEST_F(TestCloneCoreImpl, TestRecoverPreForSnapDestNotMatch) { int ret = core_->CloneOrRecoverPre( source, user, destination, lazyFlag, - CloneTaskType::kRecover, &cloneInfoOut); + CloneTaskType::kRecover, "", &cloneInfoOut); ASSERT_EQ(kErrCodeInvalidSnapshot, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); @@ -601,7 +608,7 @@ TEST_F(TestCloneCoreImpl, TestClonePreDestinationFileInternalError) { int ret = core_->CloneOrRecoverPre( source, user, destination, lazyFlag, - CloneTaskType::kClone, &cloneInfoOut); + CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); ASSERT_EQ(kErrCodeInternalError, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); @@ -626,7 +633,7 @@ TEST_F(TestCloneCoreImpl, TestClonePreForFileSourceFileStatusInvalid) { int ret = core_->CloneOrRecoverPre( source, user, destination, lazyFlag, - CloneTaskType::kClone, &cloneInfoOut); + CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); ASSERT_EQ(kErrCodeFileStatusInvalid, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(0, core_->GetCloneRef()->GetRef(source)); @@ -668,7 +675,7 @@ TEST_F(TestCloneCoreImpl, TestClonePreForFileSetCloneFileStatusReturnNotExist) { int ret = core_->CloneOrRecoverPre( source, user, destination, lazyFlag, - CloneTaskType::kClone, &cloneInfoOut); + CloneTaskType::kClone, kDefaultPoolset, &cloneInfoOut); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(0, core_->GetSnapshotRef()->GetSnapshotRef(source)); ASSERT_EQ(1, core_->GetCloneRef()->GetRef(source)); @@ -677,7 +684,7 @@ TEST_F(TestCloneCoreImpl, TestClonePreForFileSetCloneFileStatusReturnNotExist) { TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskStage1SuccessForCloneBySnapshot) { CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", CloneFileType::kSnapshot, true); + "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -699,9 +706,9 @@ TEST_F(TestCloneCoreImpl, TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskStage2SuccessForCloneBySnapshot) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", 1, 2, 100, CloneFileType::kSnapshot, true, - CloneStep::kRecoverChunk, CloneStatus::cloning); + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, 1, 2, 100, CloneFileType::kSnapshot, true, + CloneStep::kRecoverChunk, CloneStatus::cloning); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -720,8 +727,8 @@ TEST_F(TestCloneCoreImpl, TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskSuccessForCloneBySnapshotNotLazy) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", CloneFileType::kSnapshot, false); + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kSnapshot, false); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -747,7 +754,7 @@ TEST_F(TestCloneCoreImpl, TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnBuildFileInfoFromSnapshot) { CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", CloneFileType::kSnapshot, true); + "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -765,7 +772,7 @@ TEST_F(TestCloneCoreImpl, TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnGetSnapshotInfo) { CloneInfo cinfo("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", CloneFileType::kSnapshot, true); + "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); cinfo.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -786,7 +793,8 @@ TEST_F(TestCloneCoreImpl, uint64_t time = 100; Status status = Status::done; SnapshotInfo info(uuid, user, fileName, desc, - seqnum, chunksize, segmentsize, filelength, 0, 0, time, status); + seqnum, chunksize, segmentsize, filelength, 0, 0, "default", + time, status); EXPECT_CALL(*metaStore_, GetSnapshotInfo(_, _)) .WillRepeatedly(DoAll( @@ -799,7 +807,7 @@ TEST_F(TestCloneCoreImpl, TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskStage1SuccessForRecoverBySnapshot) { CloneInfo info("id1", "user1", CloneTaskType::kRecover, - "snapid1", "file1", CloneFileType::kSnapshot, true); + "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::recovering); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -820,9 +828,9 @@ TEST_F(TestCloneCoreImpl, TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskStage2SuccessForRecoverBySnapshot) { - CloneInfo info("id1", "user1", CloneTaskType::kRecover, - "snapid1", "file1", 1, 2, 100, CloneFileType::kSnapshot, true, - CloneStep::kRecoverChunk, CloneStatus::recovering); + CloneInfo info("id1", "user1", CloneTaskType::kRecover, "snapid1", "file1", + kDefaultPoolset, 1, 2, 100, CloneFileType::kSnapshot, true, + CloneStep::kRecoverChunk, CloneStatus::recovering); info.SetStatus(CloneStatus::recovering); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -842,7 +850,7 @@ TEST_F(TestCloneCoreImpl, TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCreateCloneFile) { CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", CloneFileType::kSnapshot, true); + "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -860,7 +868,7 @@ TEST_F(TestCloneCoreImpl, TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCloneMeta) { CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", CloneFileType::kSnapshot, true); + "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -879,7 +887,7 @@ TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCloneMeta) { TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCreateCloneChunk) { CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", CloneFileType::kSnapshot, true); + "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -899,7 +907,7 @@ TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCreateCloneChunk) { TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCompleteCloneMeta) { CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", CloneFileType::kSnapshot, true); + "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -920,7 +928,7 @@ TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCompleteCloneMeta) { TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnChangeOwner) { CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", CloneFileType::kSnapshot, true); + "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -942,7 +950,7 @@ TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnChangeOwner) { TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFileOnRenameCloneFile) { CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", CloneFileType::kSnapshot, true); + "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -963,9 +971,9 @@ TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFileOnRenameCloneFile) { } TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFileOnRecoverChunk) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", 1, 2, 100, CloneFileType::kSnapshot, true, - CloneStep::kRecoverChunk, CloneStatus::cloning); + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, 1, 2, 100, CloneFileType::kSnapshot, true, + CloneStep::kRecoverChunk, CloneStatus::cloning); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -983,9 +991,9 @@ TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFileOnRecoverChunk) { } TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCompleteCloneFail) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", 1, 2, 100, CloneFileType::kSnapshot, true, - CloneStep::kRecoverChunk, CloneStatus::cloning); + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, 1, 2, 100, CloneFileType::kSnapshot, true, + CloneStep::kRecoverChunk, CloneStatus::cloning); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1005,8 +1013,8 @@ TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskFailOnCompleteCloneFail) { TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskStage1SuccessForCloneByFile) { - CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", CloneFileType::kFile, true); + CloneInfo info("id1", "user1", CloneTaskType::kClone, "snapid1", "file1", + kDefaultPoolset, CloneFileType::kFile, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1029,7 +1037,7 @@ TEST_F(TestCloneCoreImpl, TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskStage2SuccessForCloneByFile) { CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", 1, 2, 100, CloneFileType::kFile, true, + "snapid1", "file1", kDefaultPoolset, 1, 2, 100, CloneFileType::kFile, true, CloneStep::kRecoverChunk, CloneStatus::cloning); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); @@ -1050,7 +1058,7 @@ TEST_F(TestCloneCoreImpl, TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskForCloneByFileFailOnBuildFileInfoFromFile) { CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", CloneFileType::kFile, true); + "snapid1", "file1", kDefaultPoolset, CloneFileType::kFile, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1068,7 +1076,7 @@ TEST_F(TestCloneCoreImpl, TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskForCloneByFileFailOnInvalidSegmentSize) { CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", CloneFileType::kFile, true); + "snapid1", "file1", kDefaultPoolset, CloneFileType::kFile, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1097,7 +1105,7 @@ TEST_F(TestCloneCoreImpl, TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskForCloneByFileFailOnInvalidFileLen) { CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", CloneFileType::kFile, true); + "snapid1", "file1", kDefaultPoolset, CloneFileType::kFile, true); info.SetStatus(CloneStatus::cloning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1126,7 +1134,7 @@ TEST_F(TestCloneCoreImpl, TEST_F(TestCloneCoreImpl, HandleCloneOrRecoverTaskStepUnknown) { CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", CloneFileType::kSnapshot, true); + "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::cloning); info.SetNextStep(static_cast(8)); auto cloneMetric = std::make_shared("id1"); @@ -1156,7 +1164,8 @@ void TestCloneCoreImpl::MockBuildFileInfoFromSnapshotSuccess( uint64_t time = 100; Status status = Status::done; SnapshotInfo info(uuid, user, fileName, desc, - seqnum, chunksize, segmentsize, filelength, 0, 0, time, status); + seqnum, chunksize, segmentsize, filelength, 0, 0, kDefaultPoolset, + time, status); EXPECT_CALL(*metaStore_, GetSnapshotInfo(_, _)) .WillRepeatedly(DoAll( @@ -1167,6 +1176,7 @@ void TestCloneCoreImpl::MockBuildFileInfoFromSnapshotSuccess( FInfo fInfo; fInfo.id = 100; fInfo.seqnum = 100; + fInfo.poolset = kDefaultPoolset; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) .WillRepeatedly(DoAll( SetArgPointee<2>(fInfo), @@ -1188,6 +1198,7 @@ void TestCloneCoreImpl::MockBuildFileInfoFromSnapshotSuccess( FInfo fInfo; fInfo.id = 100; fInfo.seqnum = 100; + fInfo.poolset = kDefaultPoolset; EXPECT_CALL(*client_, GetFileInfo(_, _, _)) .WillRepeatedly(DoAll( SetArgPointee<2>(fInfo), @@ -1214,8 +1225,8 @@ void TestCloneCoreImpl::MockCreateCloneFileSuccess( std::shared_ptr task) { FInfo fInfoOut; fInfoOut.id = 100; - EXPECT_CALL(*client_, CreateCloneFile(_, _, _, _, _, _, _, _, _)) - .WillOnce(DoAll(SetArgPointee<8>(fInfoOut), + EXPECT_CALL(*client_, CreateCloneFile(_, _, _, _, _, _, _, _, _, _)) + .WillOnce(DoAll(SetArgPointee<9>(fInfoOut), Return(LIBCURVE_ERROR::OK))); } @@ -1223,7 +1234,6 @@ void TestCloneCoreImpl::MockCloneMetaSuccess( std::shared_ptr task) { uint32_t chunksize = 1024 * 1024; uint64_t segmentsize = 2 * chunksize; - uint64_t filelength = 1 * segmentsize; SegmentInfo segInfoOut; segInfoOut.segmentsize = segmentsize; segInfoOut.chunksize = chunksize; @@ -1329,7 +1339,8 @@ void TestCloneCoreImpl::MockBuildFileInfoFromSnapshotFail( uint64_t time = 100; Status status = Status::done; SnapshotInfo info(uuid, user, fileName, desc, - seqnum, chunksize, segmentsize, filelength, 0, 0, time, status); + seqnum, chunksize, segmentsize, filelength, 0, 0, "default", + time, status); EXPECT_CALL(*metaStore_, GetSnapshotInfo(_, _)) .WillRepeatedly(DoAll( @@ -1376,8 +1387,8 @@ void TestCloneCoreImpl::MockCreateCloneFileFail( std::shared_ptr task) { FInfo fInfoOut; fInfoOut.id = 100; - EXPECT_CALL(*client_, CreateCloneFile(_, _, _, _, _, _, _, _, _)) - .WillOnce(DoAll(SetArgPointee<8>(fInfoOut), + EXPECT_CALL(*client_, CreateCloneFile(_, _, _, _, _, _, _, _, _, _)) + .WillOnce(DoAll(SetArgPointee<9>(fInfoOut), Return(-LIBCURVE_ERROR::FAILED))); } @@ -1568,7 +1579,7 @@ TEST_F(TestCloneCoreImpl, TestCleanOrRecoverTaskPreUpdateCloneInfoFail) { TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskSuccess) { CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", CloneFileType::kSnapshot, false); + "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, false); info.SetStatus(CloneStatus::errorCleaning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1585,7 +1596,7 @@ TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskSuccess) { TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskSuccess2) { CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", CloneFileType::kSnapshot, false); + "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, false); info.SetStatus(CloneStatus::errorCleaning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1602,7 +1613,7 @@ TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskSuccess2) { TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskLazySuccess) { CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", CloneFileType::kSnapshot, true); + "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, true); info.SetStatus(CloneStatus::errorCleaning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1620,7 +1631,7 @@ TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskLazySuccess) { TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskFail1) { CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", CloneFileType::kSnapshot, false); + "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, false); info.SetStatus(CloneStatus::errorCleaning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1640,7 +1651,7 @@ TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskFail1) { TEST_F(TestCloneCoreImpl, TestHandleCleanCloneOrRecoverTaskCleanNotErrorSuccess) { CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", CloneFileType::kSnapshot, false); + "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, false); info.SetStatus(CloneStatus::cleaning); auto cloneMetric = std::make_shared("id1"); auto cloneClosure = std::make_shared(); @@ -1685,7 +1696,7 @@ TEST_F(TestCloneCoreImpl, TEST_F(TestCloneCoreImpl, TestHandleDeleteCloneInfoSnapDeleteCloneInfoFail) { CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", CloneFileType::kSnapshot, false); + "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, false); EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)) .WillOnce(Return(-1)); snapshotRef_->IncrementSnapshotRef("snapid1"); @@ -1696,7 +1707,7 @@ TEST_F(TestCloneCoreImpl, TEST_F(TestCloneCoreImpl, TestHandleDeleteCloneInfoSnapSuccess) { CloneInfo info("id1", "user1", CloneTaskType::kClone, - "snapid1", "file1", CloneFileType::kSnapshot, false); + "snapid1", "file1", kDefaultPoolset, CloneFileType::kSnapshot, false); info.SetStatus(CloneStatus::metaInstalled); EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)) .WillOnce(Return(0)); @@ -1708,7 +1719,7 @@ TEST_F(TestCloneCoreImpl, TEST_F(TestCloneCoreImpl, TestHandleDeleteCloneInfoFileRefReturnMetainstalledNotTo0) { CloneInfo info("id1", "user1", CloneTaskType::kClone, - "source1", "file1", CloneFileType::kFile, false); + "source1", "file1", kDefaultPoolset, CloneFileType::kFile, false); info.SetStatus(CloneStatus::metaInstalled); EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)) .WillOnce(Return(0)); @@ -1722,7 +1733,7 @@ TEST_F(TestCloneCoreImpl, TEST_F(TestCloneCoreImpl, TestHandleDeleteCloneInfoFileSetStatusFail) { CloneInfo info("id1", "user1", CloneTaskType::kClone, - "source1", "file1", CloneFileType::kFile, false); + "source1", "file1", kDefaultPoolset, CloneFileType::kFile, false); info.SetStatus(CloneStatus::metaInstalled); cloneRef_->IncrementRef("source1"); ASSERT_EQ(cloneRef_->GetRef("source1"), 1); @@ -1735,7 +1746,7 @@ TEST_F(TestCloneCoreImpl, TEST_F(TestCloneCoreImpl, TestHandleDeleteCloneInfoFileDeleteCloneInfoFail) { CloneInfo info("id1", "user1", CloneTaskType::kClone, - "source1", "file1", CloneFileType::kFile, false); + "source1", "file1", kDefaultPoolset, CloneFileType::kFile, false); info.SetStatus(CloneStatus::metaInstalled); EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)) .WillOnce(Return(-1)); @@ -1750,7 +1761,7 @@ TEST_F(TestCloneCoreImpl, TEST_F(TestCloneCoreImpl, TestHandleDeleteCloneInfoFileSuccess) { CloneInfo info("id1", "user1", CloneTaskType::kClone, - "source1", "file1", CloneFileType::kFile, false); + "source1", "file1", kDefaultPoolset, CloneFileType::kFile, false); info.SetStatus(CloneStatus::metaInstalled); EXPECT_CALL(*metaStore_, DeleteCloneInfo(_)) .WillOnce(Return(0)); @@ -1764,4 +1775,3 @@ TEST_F(TestCloneCoreImpl, } // namespace snapshotcloneserver } // namespace curve - diff --git a/test/snapshotcloneserver/test_clone_service_manager.cpp b/test/snapshotcloneserver/test_clone_service_manager.cpp index 5be64a7327..d5a6583c51 100644 --- a/test/snapshotcloneserver/test_clone_service_manager.cpp +++ b/test/snapshotcloneserver/test_clone_service_manager.cpp @@ -43,6 +43,8 @@ using ::testing::Property; namespace curve { namespace snapshotcloneserver { +static const char* kDefaultPoolset = "poolset"; + class TestCloneServiceManager : public ::testing::Test { public: TestCloneServiceManager() {} @@ -114,12 +116,13 @@ TEST_F(TestCloneServiceManager, bool lazyFlag = true; CloneInfo cloneInfo("uuid1", user, CloneTaskType::kClone, - source, destination, CloneFileType::kSnapshot, lazyFlag); + source, destination, "", CloneFileType::kSnapshot, lazyFlag); EXPECT_CALL(*cloneCore_, CloneOrRecoverPre( - source, user, destination, lazyFlag, CloneTaskType::kClone, _)) + source, user, destination, lazyFlag, CloneTaskType::kClone, + kDefaultPoolset, _)) .WillOnce(DoAll( - SetArgPointee<5>(cloneInfo), + SetArgPointee<6>(cloneInfo), Return(kErrCodeSuccess))); CountDownEvent cond1(2); @@ -138,6 +141,7 @@ TEST_F(TestCloneServiceManager, source, user, destination, + kDefaultPoolset, lazyFlag, closure, &taskId); @@ -174,9 +178,10 @@ TEST_F(TestCloneServiceManager, CloneInfo cloneInfo; EXPECT_CALL(*cloneCore_, CloneOrRecoverPre( - source, user, destination, lazyFlag, CloneTaskType::kClone, _)) + source, user, destination, lazyFlag, CloneTaskType::kClone, + kDefaultPoolset, _)) .WillOnce(DoAll( - SetArgPointee<5>(cloneInfo), + SetArgPointee<6>(cloneInfo), Return(kErrCodeInternalError))); TaskIdType taskId; @@ -185,6 +190,7 @@ TEST_F(TestCloneServiceManager, source, user, destination, + kDefaultPoolset, lazyFlag, closure, &taskId); @@ -217,9 +223,10 @@ TEST_F(TestCloneServiceManager, TestCloneFileSuccessByTaskExist) { cloneInfo.SetTaskId(expectUuid); EXPECT_CALL(*cloneCore_, CloneOrRecoverPre( - source, user, destination, lazyFlag, CloneTaskType::kClone, _)) + source, user, destination, lazyFlag, CloneTaskType::kClone, + kDefaultPoolset, _)) .WillOnce(DoAll( - SetArgPointee<5>(cloneInfo), + SetArgPointee<6>(cloneInfo), Return(kErrCodeTaskExist))); TaskIdType taskId; @@ -228,6 +235,7 @@ TEST_F(TestCloneServiceManager, TestCloneFileSuccessByTaskExist) { source, user, destination, + kDefaultPoolset, lazyFlag, closure, &taskId); @@ -246,12 +254,14 @@ TEST_F(TestCloneServiceManager, bool lazyFlag = true; CloneInfo cloneInfo("uuid1", user, CloneTaskType::kClone, - source, destination, CloneFileType::kSnapshot, lazyFlag); + source, destination, kDefaultPoolset, + CloneFileType::kSnapshot, lazyFlag); EXPECT_CALL(*cloneCore_, CloneOrRecoverPre( - source, user, destination, lazyFlag, CloneTaskType::kClone, _)) + source, user, destination, lazyFlag, CloneTaskType::kClone, + kDefaultPoolset, _)) .WillRepeatedly(DoAll( - SetArgPointee<5>(cloneInfo), + SetArgPointee<6>(cloneInfo), Return(kErrCodeSuccess))); CountDownEvent cond1(1); @@ -269,6 +279,7 @@ TEST_F(TestCloneServiceManager, source, user, destination, + kDefaultPoolset, lazyFlag, closure, &taskId); @@ -278,6 +289,7 @@ TEST_F(TestCloneServiceManager, source, user, destination, + kDefaultPoolset, lazyFlag, closure, &taskId); @@ -300,12 +312,13 @@ TEST_F(TestCloneServiceManager, bool lazyFlag = true; CloneInfo cloneInfo("uuid1", user, CloneTaskType::kRecover, - source, destination, CloneFileType::kSnapshot, lazyFlag); + source, destination, "", CloneFileType::kSnapshot, lazyFlag); EXPECT_CALL(*cloneCore_, CloneOrRecoverPre( - source, user, destination, lazyFlag, CloneTaskType::kRecover, _)) + source, user, destination, lazyFlag, CloneTaskType::kRecover, + "", _)) .WillOnce(DoAll( - SetArgPointee<5>(cloneInfo), + SetArgPointee<6>(cloneInfo), Return(kErrCodeSuccess))); CountDownEvent cond1(2); @@ -357,9 +370,10 @@ TEST_F(TestCloneServiceManager, CloneInfo cloneInfo; EXPECT_CALL(*cloneCore_, CloneOrRecoverPre( - source, user, destination, lazyFlag, CloneTaskType::kRecover, _)) + source, user, destination, lazyFlag, CloneTaskType::kRecover, + "", _)) .WillOnce(DoAll( - SetArgPointee<5>(cloneInfo), + SetArgPointee<6>(cloneInfo), Return(kErrCodeInternalError))); TaskIdType taskId; @@ -386,9 +400,10 @@ TEST_F(TestCloneServiceManager, cloneInfo.SetTaskId(expectUuid); EXPECT_CALL(*cloneCore_, CloneOrRecoverPre( - source, user, destination, lazyFlag, CloneTaskType::kRecover, _)) + source, user, destination, lazyFlag, CloneTaskType::kRecover, + "", _)) .WillOnce(DoAll( - SetArgPointee<5>(cloneInfo), + SetArgPointee<6>(cloneInfo), Return(kErrCodeTaskExist))); TaskIdType taskId; @@ -415,12 +430,14 @@ TEST_F(TestCloneServiceManager, bool lazyFlag = true; CloneInfo cloneInfo("uuid1", user, CloneTaskType::kRecover, - source, destination, CloneFileType::kSnapshot, lazyFlag); + source, destination, "", + CloneFileType::kSnapshot, lazyFlag); EXPECT_CALL(*cloneCore_, CloneOrRecoverPre( - source, user, destination, lazyFlag, CloneTaskType::kRecover, _)) + source, user, destination, lazyFlag, CloneTaskType::kRecover, + "", _)) .WillRepeatedly(DoAll( - SetArgPointee<5>(cloneInfo), + SetArgPointee<6>(cloneInfo), Return(kErrCodeSuccess))); CountDownEvent cond1(1); @@ -468,12 +485,14 @@ TEST_F(TestCloneServiceManager, TestGetCloneTaskInfoSuccess) { bool lazyFlag = true; CloneInfo cloneInfo("uuid1", user, CloneTaskType::kClone, - source, destination, CloneFileType::kSnapshot, lazyFlag); + source, destination, kDefaultPoolset, + CloneFileType::kSnapshot, lazyFlag); EXPECT_CALL(*cloneCore_, CloneOrRecoverPre( - source, user, destination, lazyFlag, CloneTaskType::kClone, _)) + source, user, destination, lazyFlag, CloneTaskType::kClone, + kDefaultPoolset, _)) .WillOnce(DoAll( - SetArgPointee<5>(cloneInfo), + SetArgPointee<6>(cloneInfo), Return(kErrCodeSuccess))); CountDownEvent cond1(1); @@ -493,6 +512,7 @@ TEST_F(TestCloneServiceManager, TestGetCloneTaskInfoSuccess) { source, user, destination, + kDefaultPoolset, lazyFlag, closure, &taskId); @@ -530,7 +550,8 @@ TEST_F(TestCloneServiceManager, GetCloneTaskInfoByFilterSuccess) { bool lazyFlag = true; CloneInfo cloneInfo("uuid1", user, CloneTaskType::kClone, - source, destination, CloneFileType::kSnapshot, lazyFlag); + source, destination, kDefaultPoolset, + CloneFileType::kSnapshot, lazyFlag); cloneInfo.SetStatus(CloneStatus::done); std::vector cloneInfos; @@ -564,7 +585,8 @@ TEST_F(TestCloneServiceManager, GetCloneTaskInfoByFilterFail) { bool lazyFlag = true; CloneInfo cloneInfo("uuid1", user, CloneTaskType::kClone, - source, destination, CloneFileType::kSnapshot, lazyFlag); + source, destination, kDefaultPoolset, + CloneFileType::kSnapshot, lazyFlag); cloneInfo.SetStatus(CloneStatus::done); std::vector cloneInfos; @@ -605,12 +627,14 @@ TEST_F(TestCloneServiceManager, TestGetCloneTaskInfoByUUIDSuccess) { bool lazyFlag = true; CloneInfo cloneInfo(uuid, user, CloneTaskType::kClone, - source, destination, CloneFileType::kSnapshot, lazyFlag); + source, destination, kDefaultPoolset, + CloneFileType::kSnapshot, lazyFlag); EXPECT_CALL(*cloneCore_, CloneOrRecoverPre( - source, user, destination, lazyFlag, CloneTaskType::kClone, _)) + source, user, destination, lazyFlag, CloneTaskType::kClone, + kDefaultPoolset, _)) .WillOnce(DoAll( - SetArgPointee<5>(cloneInfo), + SetArgPointee<6>(cloneInfo), Return(kErrCodeSuccess))); CountDownEvent cond1(1); @@ -630,6 +654,7 @@ TEST_F(TestCloneServiceManager, TestGetCloneTaskInfoByUUIDSuccess) { source, user, destination, + kDefaultPoolset, lazyFlag, closure, &taskId); @@ -665,12 +690,14 @@ TEST_F(TestCloneServiceManager, TestGetCloneTaskInfoByFileNameSuccess) { bool lazyFlag = true; CloneInfo cloneInfo("uuid1", user, CloneTaskType::kClone, - source, destination, CloneFileType::kSnapshot, lazyFlag); + source, destination, kDefaultPoolset, + CloneFileType::kSnapshot, lazyFlag); EXPECT_CALL(*cloneCore_, CloneOrRecoverPre( - source, user, destination, lazyFlag, CloneTaskType::kClone, _)) + source, user, destination, lazyFlag, CloneTaskType::kClone, + kDefaultPoolset, _)) .WillOnce(DoAll( - SetArgPointee<5>(cloneInfo), + SetArgPointee<6>(cloneInfo), Return(kErrCodeSuccess))); CountDownEvent cond1(1); @@ -690,6 +717,7 @@ TEST_F(TestCloneServiceManager, TestGetCloneTaskInfoByFileNameSuccess) { source, user, destination, + kDefaultPoolset, lazyFlag, closure, &taskId); @@ -763,11 +791,13 @@ TEST_F(TestCloneServiceManager, TestRecoverCloneTaskSuccess) { const std::string destination = "file1"; bool lazyFlag = true; CloneInfo cloneInfo("uuid1", user, CloneTaskType::kClone, - source, destination, CloneFileType::kSnapshot, lazyFlag); + source, destination, kDefaultPoolset, + CloneFileType::kSnapshot, lazyFlag); // for Flatten CloneInfo cloneInfo2("uuid2", user, CloneTaskType::kClone, - source, destination, 1, 1, 1, CloneFileType::kSnapshot, lazyFlag, + source, destination, kDefaultPoolset, + 1, 1, 1, CloneFileType::kSnapshot, lazyFlag, CloneStep::kRecoverChunk, CloneStatus::cloning); std::vector cloneInfos; @@ -813,12 +843,14 @@ TEST_F(TestCloneServiceManager, TestCloneServiceNotStart) { bool lazyFlag = true; CloneInfo cloneInfo("uuid1", user, CloneTaskType::kClone, - source, destination, CloneFileType::kSnapshot, lazyFlag); + source, destination, kDefaultPoolset, + CloneFileType::kSnapshot, lazyFlag); EXPECT_CALL(*cloneCore_, CloneOrRecoverPre( - source, user, destination, lazyFlag, CloneTaskType::kClone, _)) + source, user, destination, lazyFlag, CloneTaskType::kClone, + kDefaultPoolset, _)) .WillOnce(DoAll( - SetArgPointee<5>(cloneInfo), + SetArgPointee<6>(cloneInfo), Return(kErrCodeSuccess))); TaskIdType taskId; @@ -827,6 +859,7 @@ TEST_F(TestCloneServiceManager, TestCloneServiceNotStart) { source, user, destination, + kDefaultPoolset, lazyFlag, closure, &taskId); @@ -840,7 +873,8 @@ TEST_F(TestCloneServiceManager, TestGetCloneTaskInfoSuccessCloneTaskDone) { bool lazyFlag = true; CloneInfo cloneInfo("uuid1", user, CloneTaskType::kClone, - source, destination, CloneFileType::kSnapshot, lazyFlag); + source, destination, kDefaultPoolset, + CloneFileType::kSnapshot, lazyFlag); cloneInfo.SetStatus(CloneStatus::done); std::vector cloneInfos; @@ -872,7 +906,8 @@ TEST_F(TestCloneServiceManager, TestGetCloneTaskInfoSuccessCloneTaskError) { bool lazyFlag = true; CloneInfo cloneInfo("uuid1", user, CloneTaskType::kClone, - source, destination, CloneFileType::kSnapshot, lazyFlag); + source, destination, kDefaultPoolset, + CloneFileType::kSnapshot, lazyFlag); cloneInfo.SetStatus(CloneStatus::error); std::vector cloneInfos; @@ -904,7 +939,8 @@ TEST_F(TestCloneServiceManager, TestGetCloneTaskInfoSuccessCloneTaskDone2) { bool lazyFlag = true; CloneInfo cloneInfo("uuid1", user, CloneTaskType::kClone, - source, destination, CloneFileType::kSnapshot, lazyFlag); + source, destination, kDefaultPoolset, + CloneFileType::kSnapshot, lazyFlag); cloneInfo.SetStatus(CloneStatus::cloning); std::vector cloneInfos; @@ -941,7 +977,8 @@ TEST_F(TestCloneServiceManager, TestGetCloneTaskInfoSuccessCloneTaskError2) { bool lazyFlag = true; CloneInfo cloneInfo("uuid1", user, CloneTaskType::kClone, - source, destination, CloneFileType::kSnapshot, lazyFlag); + source, destination, kDefaultPoolset, + CloneFileType::kSnapshot, lazyFlag); cloneInfo.SetStatus(CloneStatus::cloning); std::vector cloneInfos; @@ -978,7 +1015,8 @@ TEST_F(TestCloneServiceManager, TestGetCloneTaskInfoFailCanNotReach) { bool lazyFlag = true; CloneInfo cloneInfo("uuid1", user, CloneTaskType::kClone, - source, destination, CloneFileType::kSnapshot, lazyFlag); + source, destination, kDefaultPoolset, + CloneFileType::kSnapshot, lazyFlag); cloneInfo.SetStatus(CloneStatus::cloning); std::vector cloneInfos; @@ -1004,7 +1042,8 @@ TEST_F(TestCloneServiceManager, TestGetCloneTaskInfoFailOnGetCloneInfo) { bool lazyFlag = true; CloneInfo cloneInfo("uuid1", user, CloneTaskType::kClone, - source, destination, CloneFileType::kSnapshot, lazyFlag); + source, destination, kDefaultPoolset, + CloneFileType::kSnapshot, lazyFlag); cloneInfo.SetStatus(CloneStatus::cloning); std::vector cloneInfos; @@ -1029,7 +1068,8 @@ TEST_F(TestCloneServiceManager, TestRecoverCloneTaskGetCloneInfoListFail) { const std::string destination = "file1"; bool lazyFlag = true; CloneInfo cloneInfo("uuid1", user, CloneTaskType::kClone, - source, destination, CloneFileType::kSnapshot, lazyFlag); + source, destination, kDefaultPoolset, + CloneFileType::kSnapshot, lazyFlag); std::vector cloneInfos; cloneInfos.push_back(cloneInfo); @@ -1047,7 +1087,8 @@ TEST_F(TestCloneServiceManager, TestRecoverCloneTaskPushTaskFail) { const std::string destination = "file1"; bool lazyFlag = true; CloneInfo cloneInfo("uuid1", user, CloneTaskType::kClone, - source, destination, CloneFileType::kSnapshot, lazyFlag); + source, destination, kDefaultPoolset, + CloneFileType::kSnapshot, lazyFlag); std::vector cloneInfos; cloneInfos.push_back(cloneInfo); @@ -1078,7 +1119,8 @@ TEST_F(TestCloneServiceManager, TestRecoverCloneTaskDefaultSuccess) { const std::string destination = "file1"; bool lazyFlag = true; CloneInfo cloneInfo("uuid1", user, CloneTaskType::kClone, - source, destination, CloneFileType::kSnapshot, lazyFlag); + source, destination, kDefaultPoolset, + CloneFileType::kSnapshot, lazyFlag); cloneInfo.SetStatus(CloneStatus::done); std::vector cloneInfos; @@ -1100,10 +1142,12 @@ TEST_F(TestCloneServiceManager, TestGetCloneRefStatusSuccessNoRef) { // only done record CloneInfo cloneInfo1("uuid1", user, CloneTaskType::kClone, - source, destination, CloneFileType::kFile, lazyFlag); + source, destination, kDefaultPoolset, + CloneFileType::kFile, lazyFlag); cloneInfo1.SetStatus(CloneStatus::done); CloneInfo cloneInfo2("uuid2", user, CloneTaskType::kClone, - source2, destination, CloneFileType::kFile, lazyFlag); + source2, destination, kDefaultPoolset, + CloneFileType::kFile, lazyFlag); cloneInfo2.SetStatus(CloneStatus::metaInstalled); std::vector list; @@ -1138,13 +1182,16 @@ TEST_F(TestCloneServiceManager, TestGetCloneRefStatusSuccessHasRef) { bool lazyFlag = true; CloneInfo cloneInfo1("uuid1", user, CloneTaskType::kClone, - source, destination, CloneFileType::kFile, lazyFlag); + source, destination, kDefaultPoolset, + CloneFileType::kFile, lazyFlag); cloneInfo1.SetStatus(CloneStatus::done); CloneInfo cloneInfo2("uuid2", user, CloneTaskType::kClone, - source, destination, CloneFileType::kFile, lazyFlag); + source, destination, kDefaultPoolset, + CloneFileType::kFile, lazyFlag); cloneInfo2.SetStatus(CloneStatus::cloning); CloneInfo cloneInfo3("uuid3", user, CloneTaskType::kClone, - source, destination, CloneFileType::kFile, lazyFlag); + source, destination, kDefaultPoolset, + CloneFileType::kFile, lazyFlag); cloneInfo3.SetStatus(CloneStatus::metaInstalled); std::vector list; @@ -1172,13 +1219,16 @@ TEST_F(TestCloneServiceManager, TestGetCloneRefStatusSuccessNeedCheck) { bool lazyFlag = true; CloneInfo cloneInfo1("uuid1", user, CloneTaskType::kClone, - source , destination, CloneFileType::kFile, lazyFlag); + source , destination, kDefaultPoolset, + CloneFileType::kFile, lazyFlag); cloneInfo1.SetStatus(CloneStatus::done); CloneInfo cloneInfo2("uuid2", user, CloneTaskType::kClone, - source2, destination, CloneFileType::kFile, lazyFlag); + source2, destination, kDefaultPoolset, + CloneFileType::kFile, lazyFlag); cloneInfo2.SetStatus(CloneStatus::cloning); CloneInfo cloneInfo3("uuid3", user, CloneTaskType::kClone, - source, destination, CloneFileType::kFile, lazyFlag); + source, destination, kDefaultPoolset, + CloneFileType::kFile, lazyFlag); cloneInfo3.SetStatus(CloneStatus::metaInstalled); std::vector list; @@ -1264,9 +1314,11 @@ TEST_F(TestCloneServiceManagerBackend, TEST_F(TestCloneServiceManagerBackend, TestNoMetaInstalledClone) { CloneInfo cloneInfo1("taskId1", "user1", CloneTaskType::kClone, - "source1", "destination1", CloneFileType::kSnapshot, true); + "source1", "destination1", kDefaultPoolset, + CloneFileType::kSnapshot, true); CloneInfo cloneInfo2("taskId2", "user2", CloneTaskType::kRecover, - "source2", "destination2", CloneFileType::kFile, false); + "source2", "destination2", kDefaultPoolset, + CloneFileType::kFile, false); std::vector cloneInfos; cloneInfos.push_back(cloneInfo1); cloneInfos.push_back(cloneInfo2); @@ -1288,9 +1340,11 @@ TEST_F(TestCloneServiceManagerBackend, TEST_F(TestCloneServiceManagerBackend, TestMetaInstalledCloneNotExist) { CloneInfo cloneInfo1("taskId1", "user1", CloneTaskType::kClone, - "source1", "destination1", CloneFileType::kSnapshot, true); + "source1", "destination1", kDefaultPoolset, + CloneFileType::kSnapshot, true); CloneInfo cloneInfo2("taskId2", "user2", CloneTaskType::kRecover, - "source2", "destination2", 0, 0, 0, CloneFileType::kFile, false, + "source2", "destination2", "", + 0, 0, 0, CloneFileType::kFile, false, CloneStep::kRecoverChunk, CloneStatus::metaInstalled); std::vector cloneInfos; cloneInfos.push_back(cloneInfo1); @@ -1315,9 +1369,11 @@ TEST_F(TestCloneServiceManagerBackend, TEST_F(TestCloneServiceManagerBackend, TestMetaInstalledCloneNotMetaInstalled) { CloneInfo cloneInfo1("taskId1", "user1", CloneTaskType::kClone, - "source1", "destination1", CloneFileType::kSnapshot, true); + "source1", "destination1", kDefaultPoolset, + CloneFileType::kSnapshot, true); CloneInfo cloneInfo2("taskId2", "user2", CloneTaskType::kRecover, - "source2", "destination2", 0, 0, 0, CloneFileType::kFile, false, + "source2", "destination2", "", + 0, 0, 0, CloneFileType::kFile, false, CloneStep::kRecoverChunk, CloneStatus::metaInstalled); std::vector cloneInfos; cloneInfos.push_back(cloneInfo1); @@ -1343,9 +1399,11 @@ TEST_F(TestCloneServiceManagerBackend, TEST_F(TestCloneServiceManagerBackend, TestMetaInstalledCloneFileExist) { CloneInfo cloneInfo1("taskId1", "user1", CloneTaskType::kClone, - "source1", "destination1", CloneFileType::kSnapshot, true); + "source1", "destination1", kDefaultPoolset, + CloneFileType::kSnapshot, true); CloneInfo cloneInfo2("taskId2", "user2", CloneTaskType::kRecover, - "source2", "destination2", 0, 0, 0, CloneFileType::kFile, false, + "source2", "destination2", "", + 0, 0, 0, CloneFileType::kFile, false, CloneStep::kRecoverChunk, CloneStatus::metaInstalled); std::vector cloneInfos; cloneInfos.push_back(cloneInfo1); @@ -1373,9 +1431,11 @@ TEST_F(TestCloneServiceManagerBackend, TEST_F(TestCloneServiceManagerBackend, TestMetaInstalledCloneDeleteFail) { CloneInfo cloneInfo1("taskId1", "user1", CloneTaskType::kClone, - "source1", "destination1", CloneFileType::kSnapshot, true); + "source1", "destination1", kDefaultPoolset, + CloneFileType::kSnapshot, true); CloneInfo cloneInfo2("taskId2", "user2", CloneTaskType::kRecover, - "source2", "destination2", 0, 0, 0, CloneFileType::kFile, false, + "source2", "destination2", "", + 0, 0, 0, CloneFileType::kFile, false, CloneStep::kRecoverChunk, CloneStatus::metaInstalled); std::vector cloneInfos; cloneInfos.push_back(cloneInfo1); @@ -1405,9 +1465,11 @@ TEST_F(TestCloneServiceManagerBackend, TEST_F(TestCloneServiceManagerBackend, TestMetaInstalledCloneDeleteSuccess) { CloneInfo cloneInfo1("taskId1", "user1", CloneTaskType::kClone, - "source1", "destination1", CloneFileType::kSnapshot, true); + "source1", "destination1", kDefaultPoolset, + CloneFileType::kSnapshot, true); CloneInfo cloneInfo2("taskId2", "user2", CloneTaskType::kRecover, - "source2", "destination2", 0, 0, 0, CloneFileType::kFile, false, + "source2", "destination2", "", + 0, 0, 0, CloneFileType::kFile, false, CloneStep::kRecoverChunk, CloneStatus::metaInstalled); std::vector cloneInfos; cloneInfos.push_back(cloneInfo1); @@ -1436,4 +1498,3 @@ TEST_F(TestCloneServiceManagerBackend, } // namespace snapshotcloneserver } // namespace curve - diff --git a/test/snapshotcloneserver/test_curvefs_client.cpp b/test/snapshotcloneserver/test_curvefs_client.cpp index 27181995e0..b4e79b17b0 100644 --- a/test/snapshotcloneserver/test_curvefs_client.cpp +++ b/test/snapshotcloneserver/test_curvefs_client.cpp @@ -128,11 +128,11 @@ TEST_F(TestCurveFsClientImpl, TestClientInterfaceFail) { ASSERT_LT(ret, 0); ret = client_->CreateCloneFile( - "source1", "file1", "user1", 1024, 1, 1024, 0, 0, &fInfo); + "source1", "file1", "user1", 1024, 1, 1024, 0, 0, "default", &fInfo); ASSERT_LT(ret, 0); ret = client_->CreateCloneFile( "source1", "file1", clientOption_.mdsRootUser, 1024, 1, 1024, - 0, 0, &fInfo); + 0, 0, "default", &fInfo); ASSERT_LT(ret, 0); TestClosure *cb = new TestClosure(); @@ -191,4 +191,3 @@ TEST_F(TestCurveFsClientImpl, TestClientInterfaceFail) { } // namespace snapshotcloneserver } // namespace curve - diff --git a/test/snapshotcloneserver/test_snapshot_core.cpp b/test/snapshotcloneserver/test_snapshot_core.cpp index 014f776d5d..a30db66c7f 100644 --- a/test/snapshotcloneserver/test_snapshot_core.cpp +++ b/test/snapshotcloneserver/test_snapshot_core.cpp @@ -989,7 +989,7 @@ TEST_F(TestSnapshotCoreImpl, std::vector snapInfos; SnapshotInfo info2(uuid2, user, fileName, desc2); info.SetSeqNum(seqNum); - info2.SetSeqNum(seqNum - 1); //上一个快照 + info2.SetSeqNum(seqNum - 1); // 上一个快照 info2.SetStatus(Status::done); snapInfos.push_back(info); snapInfos.push_back(info2); diff --git a/test/snapshotcloneserver/test_snapshotclone_meta_store_etcd.cpp b/test/snapshotcloneserver/test_snapshotclone_meta_store_etcd.cpp index a32c7303f7..7f2045c6f3 100644 --- a/test/snapshotcloneserver/test_snapshotclone_meta_store_etcd.cpp +++ b/test/snapshotcloneserver/test_snapshotclone_meta_store_etcd.cpp @@ -52,6 +52,8 @@ using ::testing::Matcher; namespace curve { namespace snapshotcloneserver { +static const char* kDefaultPoolset = "poolset"; + class TestSnapshotCloneMetaStoreEtcd : public ::testing::Test { public: TestSnapshotCloneMetaStoreEtcd() {} @@ -104,6 +106,7 @@ bool JudgeSnapshotInfoEqual(const SnapshotInfo &left, left.GetFileLength() == right.GetFileLength() && left.GetStripeUnit() == right.GetStripeUnit() && left.GetStripeCount() == right.GetStripeCount() && + left.GetPoolset() == right.GetPoolset() && left.GetCreateTime() == right.GetCreateTime() && left.GetStatus() == right.GetStatus()) { return true; @@ -116,7 +119,7 @@ bool JudgeSnapshotInfoEqual(const SnapshotInfo &left, TEST_F(TestSnapshotCloneMetaStoreEtcd, TestAddSnapInfoAndGetSuccess) { SnapshotInfo snapInfo("snapuuid", "snapuser", "file1", "snapxxx", 100, - 1024, 2048, 4096, 0, 0, 0, + 1024, 2048, 4096, 0, 0, kDefaultPoolset, 0, Status::pending); EXPECT_CALL(*kvStorageClient_, Put(_, _)) @@ -135,7 +138,7 @@ TEST_F(TestSnapshotCloneMetaStoreEtcd, TEST_F(TestSnapshotCloneMetaStoreEtcd, TestAddSnapshotInfoPutInfoEtcdFail) { SnapshotInfo snapInfo("snapuuid", "snapuser", "file1", "snapxxx", 100, - 1024, 2048, 4096, 0, 0, 0, + 1024, 2048, 4096, 0, 0, kDefaultPoolset, 0, Status::pending); EXPECT_CALL(*kvStorageClient_, Put(_, _)) @@ -148,7 +151,7 @@ TEST_F(TestSnapshotCloneMetaStoreEtcd, TEST_F(TestSnapshotCloneMetaStoreEtcd, TestDeleteSnapshotSuccess) { SnapshotInfo snapInfo("snapuuid", "snapuser", "file1", "snapxxx", 100, - 1024, 2048, 4096, 0, 0, 0, + 1024, 2048, 4096, 0, 0, kDefaultPoolset, 0, Status::pending); EXPECT_CALL(*kvStorageClient_, Put(_, _)) @@ -168,7 +171,7 @@ TEST_F(TestSnapshotCloneMetaStoreEtcd, TEST_F(TestSnapshotCloneMetaStoreEtcd, TestDeleteSnapshotDeleteFromEtcdFail) { SnapshotInfo snapInfo("snapuuid", "snapuser", "file1", "snapxxx", 100, - 1024, 2048, 4096, 0, 0, 0, + 1024, 2048, 4096, 0, 0, kDefaultPoolset, 0, Status::pending); EXPECT_CALL(*kvStorageClient_, Put(_, _)) @@ -188,7 +191,7 @@ TEST_F(TestSnapshotCloneMetaStoreEtcd, TEST_F(TestSnapshotCloneMetaStoreEtcd, TestUpdateSnapshotAndGetSuccess) { SnapshotInfo snapInfo("snapuuid", "snapuser", "file1", "snapxxx", 100, - 1024, 2048, 4096, 0, 0, 0, + 1024, 2048, 4096, 0, 0, kDefaultPoolset, 0, Status::pending); EXPECT_CALL(*kvStorageClient_, Put(_, _)) @@ -198,7 +201,7 @@ TEST_F(TestSnapshotCloneMetaStoreEtcd, ASSERT_EQ(0, ret); SnapshotInfo snapInfo2("snapuuid", "snapuser2", "file2", "snapxxx2", 101, - 1025, 2049, 4097, 1, 0, 0, + 1025, 2049, 4097, 1, 0, kDefaultPoolset, 0, Status::done); EXPECT_CALL(*kvStorageClient_, Put(_, _)) @@ -217,7 +220,7 @@ TEST_F(TestSnapshotCloneMetaStoreEtcd, TEST_F(TestSnapshotCloneMetaStoreEtcd, TestUpdateSnapshotPutInfoEtcdFail) { SnapshotInfo snapInfo("snapuuid", "snapuser", "file1", "snapxxx", 100, - 1024, 2048, 4096, 0, 0, 0, + 1024, 2048, 4096, 0, 0, kDefaultPoolset, 0, Status::pending); EXPECT_CALL(*kvStorageClient_, Put(_, _)) @@ -227,7 +230,7 @@ TEST_F(TestSnapshotCloneMetaStoreEtcd, ASSERT_EQ(0, ret); SnapshotInfo snapInfo2("snapuuid", "snapuser2", "file2", "snapxxx2", 101, - 1025, 2049, 4097, 0, 0, 1, + 1025, 2049, 4097, 0, 0, kDefaultPoolset, 1, Status::done); EXPECT_CALL(*kvStorageClient_, Put(_, _)) @@ -240,7 +243,7 @@ TEST_F(TestSnapshotCloneMetaStoreEtcd, TEST_F(TestSnapshotCloneMetaStoreEtcd, TestUpdateSnapshotNotExistAndGetSuccess) { SnapshotInfo snapInfo("snapuuid", "snapuser", "file1", "snapxxx", 100, - 1024, 2048, 4096, 0, 0, 0, + 1024, 2048, 4096, 0, 0, kDefaultPoolset, 0, Status::pending); EXPECT_CALL(*kvStorageClient_, Put(_, _)) @@ -258,7 +261,8 @@ TEST_F(TestSnapshotCloneMetaStoreEtcd, TEST_F(TestSnapshotCloneMetaStoreEtcd, TestCASSnapshot) { SnapshotInfo snapInfo( - "uuid", "user", "", "", 0, 0, 0, 0, 0, 0, 0, Status::pending); + "uuid", "user", "", "", 0, 0, 0, 0, 0, 0, kDefaultPoolset, + 0, Status::pending); auto setUp = [&]() { EXPECT_CALL(*kvStorageClient_, Put(_, _)) @@ -290,7 +294,8 @@ TEST_F(TestSnapshotCloneMetaStoreEtcd, TestCASSnapshot) { setUp(); SnapshotInfo setInfo( - "uuid", "user1", "", "", 1, 1, 1, 1, 1, 1, 1, Status::done); + "uuid", "user1", "", "", 1, 1, 1, 1, 1, 1, kDefaultPoolset, + 1, Status::done); auto cas = [&setInfo](SnapshotInfo* snapinfo) -> SnapshotInfo* { return &setInfo; }; @@ -347,7 +352,8 @@ TEST_F(TestSnapshotCloneMetaStoreEtcd, TestCASSnapshot) { setUp(); SnapshotInfo setInfo( - "uuid", "user1", "", "", 1, 1, 1, 1, 1, 1, 1, Status::done); + "uuid", "user1", "", "", 1, 1, 1, 1, 1, 1, kDefaultPoolset, + 1, Status::done); auto cas = [&setInfo](SnapshotInfo* snapInfo) -> SnapshotInfo* { setInfo.SetStatus(snapInfo->GetStatus()); return &setInfo; @@ -376,7 +382,7 @@ TEST_F(TestSnapshotCloneMetaStoreEtcd, TEST_F(TestSnapshotCloneMetaStoreEtcd, TestGetSnapshotList1Success) { SnapshotInfo snapInfo("snapuuid", "snapuser", "file1", "snapxxx", 100, - 1024, 2048, 4096, 0, 0, 0, + 1024, 2048, 4096, 0, 0, kDefaultPoolset, 0, Status::pending); EXPECT_CALL(*kvStorageClient_, Put(_, _)) @@ -403,7 +409,7 @@ TEST_F(TestSnapshotCloneMetaStoreEtcd, TEST_F(TestSnapshotCloneMetaStoreEtcd, TestGetSnapshotList2Success) { SnapshotInfo snapInfo("snapuuid", "snapuser", "file1", "snapxxx", 100, - 1024, 2048, 4096, 0, 0, 0, + 1024, 2048, 4096, 0, 0, kDefaultPoolset, 0, Status::pending); EXPECT_CALL(*kvStorageClient_, Put(_, _)) @@ -433,7 +439,7 @@ TEST_F(TestSnapshotCloneMetaStoreEtcd, TestAddCloneInfoAndGetSuccess) { CloneInfo cloneInfo("uuid1", "user1", CloneTaskType::kClone, "src1", - "dst1", 1, 2, 3, + "dst1", kDefaultPoolset, 1, 2, 3, CloneFileType::kFile, false, CloneStep::kCompleteCloneFile, CloneStatus::cloning); @@ -455,7 +461,7 @@ TEST_F(TestSnapshotCloneMetaStoreEtcd, TestAddCloneInfoPutInfoEtcdFail) { CloneInfo cloneInfo("uuid1", "user1", CloneTaskType::kClone, "src1", - "dst1", 1, 2, 3, + "dst1", kDefaultPoolset, 1, 2, 3, CloneFileType::kFile, false, CloneStep::kCompleteCloneFile, CloneStatus::cloning); @@ -471,7 +477,7 @@ TEST_F(TestSnapshotCloneMetaStoreEtcd, TestDeleteCloneInfoSuccess) { CloneInfo cloneInfo("uuid1", "user1", CloneTaskType::kClone, "src1", - "dst1", 1, 2, 3, + "dst1", kDefaultPoolset, 1, 2, 3, CloneFileType::kFile, false, CloneStep::kCompleteCloneFile, CloneStatus::cloning); @@ -493,7 +499,7 @@ TEST_F(TestSnapshotCloneMetaStoreEtcd, TestDeleteCloneInfoDeleteFromEtcdFail) { CloneInfo cloneInfo("uuid1", "user1", CloneTaskType::kClone, "src1", - "dst1", 1, 2, 3, + "dst1", kDefaultPoolset, 1, 2, 3, CloneFileType::kFile, false, CloneStep::kCompleteCloneFile, CloneStatus::cloning); @@ -515,7 +521,7 @@ TEST_F(TestSnapshotCloneMetaStoreEtcd, TestUpdateCloneInfoAndGetSuccess) { CloneInfo cloneInfo("uuid1", "user1", CloneTaskType::kClone, "src1", - "dst1", 1, 2, 3, + "dst1", kDefaultPoolset, 1, 2, 3, CloneFileType::kFile, false, CloneStep::kCompleteCloneFile, CloneStatus::cloning); @@ -534,7 +540,7 @@ TEST_F(TestSnapshotCloneMetaStoreEtcd, CloneInfo cloneInfo2("uuid1", "user2", CloneTaskType::kClone, "src2", - "dst2", 2, 3, 4, + "dst2", kDefaultPoolset, 2, 3, 4, CloneFileType::kFile, false, CloneStep::kEnd, CloneStatus::done); @@ -553,7 +559,7 @@ TEST_F(TestSnapshotCloneMetaStoreEtcd, TestUpdateCloneInfoPutInfoEtcdFail) { CloneInfo cloneInfo("uuid1", "user1", CloneTaskType::kClone, "src1", - "dst1", 1, 2, 3, + "dst1", kDefaultPoolset, 1, 2, 3, CloneFileType::kFile, false, CloneStep::kCompleteCloneFile, CloneStatus::cloning); @@ -578,7 +584,7 @@ TEST_F(TestSnapshotCloneMetaStoreEtcd, TestUpdateCloneInfoNotExist) { CloneInfo cloneInfo("uuid1", "user1", CloneTaskType::kClone, "src1", - "dst1", 1, 2, 3, + "dst1", kDefaultPoolset, 1, 2, 3, CloneFileType::kFile, false, CloneStep::kCompleteCloneFile, CloneStatus::cloning); @@ -601,7 +607,7 @@ TEST_F(TestSnapshotCloneMetaStoreEtcd, TestGetCloneInfoByFileNameSuccess) { CloneInfo cloneInfo("uuid1", "user1", CloneTaskType::kClone, "src1", - "dst1", 1, 2, 3, + "dst1", kDefaultPoolset, 1, 2, 3, CloneFileType::kFile, false, CloneStep::kCompleteCloneFile, CloneStatus::cloning); @@ -631,7 +637,7 @@ TEST_F(TestSnapshotCloneMetaStoreEtcd, TestGetCloneInfoListSuccess) { CloneInfo cloneInfo("uuid1", "user1", CloneTaskType::kClone, "src1", - "dst1", 1, 2, 3, + "dst1", kDefaultPoolset, 1, 2, 3, CloneFileType::kFile, false, CloneStep::kCompleteCloneFile, CloneStatus::cloning); @@ -660,7 +666,7 @@ TEST_F(TestSnapshotCloneMetaStoreEtcd, TEST_F(TestSnapshotCloneMetaStoreEtcd, TestInitSuccess) { SnapshotInfo snapInfo("snapuuid", "snapuser", "file1", "snapxxx", 100, - 1024, 2048, 4096, 0, 0, 0, + 1024, 2048, 4096, 0, 0, kDefaultPoolset, 0, Status::pending); SnapshotCloneCodec codec; std::string value; @@ -670,7 +676,7 @@ TEST_F(TestSnapshotCloneMetaStoreEtcd, CloneInfo cloneInfo("uuid1", "user1", CloneTaskType::kClone, "src1", - "dst1", 1, 2, 3, + "dst1", kDefaultPoolset, 1, 2, 3, CloneFileType::kFile, false, CloneStep::kCompleteCloneFile, CloneStatus::cloning); @@ -702,7 +708,7 @@ TEST_F(TestSnapshotCloneMetaStoreEtcd, TEST_F(TestSnapshotCloneMetaStoreEtcd, TestInitListCloneInfoFail) { SnapshotInfo snapInfo("snapuuid", "snapuser", "file1", "snapxxx", 100, - 1024, 2048, 4096, 0, 0, 0, + 1024, 2048, 4096, 0, 0, "default", 0, Status::pending); SnapshotCloneCodec codec; std::string value; @@ -735,7 +741,7 @@ TEST_F(TestSnapshotCloneMetaStoreEtcd, TEST_F(TestSnapshotCloneMetaStoreEtcd, TestInitDecodeCloneInfoFail) { SnapshotInfo snapInfo("snapuuid", "snapuser", "file1", "snapxxx", 100, - 1024, 2048, 4096, 0, 0, 0, + 1024, 2048, 4096, 0, 0, "default", 0, Status::pending); SnapshotCloneCodec codec; std::string value; diff --git a/test/snapshotcloneserver/test_snapshotclone_service.cpp b/test/snapshotcloneserver/test_snapshotclone_service.cpp index 2d31f1f0c5..5e452c2fdd 100644 --- a/test/snapshotcloneserver/test_snapshotclone_service.cpp +++ b/test/snapshotcloneserver/test_snapshotclone_service.cpp @@ -43,6 +43,8 @@ using ::testing::DoAll; namespace curve { namespace snapshotcloneserver { +static const char* kDefaultPoolset = "poolset"; + class TestSnapshotCloneServiceImpl : public ::testing::Test { protected: TestSnapshotCloneServiceImpl() {} @@ -207,6 +209,7 @@ TEST_F(TestSnapshotCloneServiceImpl, TestGetFileSnapshotInfoSuccess) { 2048, 0, 0, + "default", 100, Status::pending); info.SetSnapshotInfo(sinfo); @@ -346,6 +349,7 @@ TEST_F(TestSnapshotCloneServiceImpl, TestGetFileSnapshotListSuccess) { 2048, 0, 0, + "default", 100, Status::pending); info.SetSnapshotInfo(sinfo); @@ -882,10 +886,11 @@ TEST_F(TestSnapshotCloneServiceImpl, TestCloneFileSuccess) { bool lazyFlag = false; EXPECT_CALL(*cloneManager_, CloneFile( - source, user, destination, lazyFlag, _, _)) + source, user, destination, "", lazyFlag, _, _)) .WillOnce(Invoke([](const UUID &source, const std::string &user, const std::string &destination, + const std::string &poolset, bool lazyFlag, std::shared_ptr closure, TaskIdType *taskId){ @@ -922,6 +927,44 @@ TEST_F(TestSnapshotCloneServiceImpl, TestCloneFileSuccess) { LOG(ERROR) << cntl.response_attachment(); } +TEST_F(TestSnapshotCloneServiceImpl, TestCloneFileMissingPoolset) { + UUID uuid = "uuid1"; + std::string user = "user1"; + std::string source = "abc"; + std::string destination = "file1"; + bool lazyFlag = false; + + EXPECT_CALL(*cloneManager_, CloneFile( + source, user, destination, _, lazyFlag, _, _)) + .Times(0); + + brpc::Channel channel; + brpc::ChannelOptions option; + option.protocol = "http"; + + std::string url = std::string("http://127.0.0.1:") + + std::to_string(listenAddr_.port) + + "/" + kServiceName + "?" + + kActionStr + "=" +kCloneAction + "&" + + kVersionStr + "=1&" + + kUserStr + "=" + user + "&" + + kSourceStr + "=" + source + "&" + + kDestinationStr + "=" + destination + "&" + + kLazyStr + "=" + (lazyFlag ? "True" : "False") + + kPoolset + "="; + + if (channel.Init(url.c_str(), "", &option) != 0) { + FAIL() << "Fail to init channel" + << std::endl; + } + + brpc::Controller cntl; + cntl.http_request().uri() = url.c_str(); + + channel.CallMethod(NULL, &cntl, NULL, NULL, NULL); + ASSERT_TRUE(cntl.Failed()) << cntl.ErrorText(); +} + TEST_F(TestSnapshotCloneServiceImpl, TestRecoverFileSuccess) { UUID uuid = "uuid1"; std::string user = "user1"; @@ -981,6 +1024,7 @@ TEST_F(TestSnapshotCloneServiceImpl, TestGetCloneTaskSuccess) { CloneTaskType::kClone, "source", "dest", + kDefaultPoolset, 100, 200, 100, @@ -1336,10 +1380,11 @@ TEST_F(TestSnapshotCloneServiceImpl, TestCloneFileFail) { bool lazyFlag = false; EXPECT_CALL(*cloneManager_, CloneFile( - source, user, destination, lazyFlag, _, _)) + source, user, destination, "", lazyFlag, _, _)) .WillOnce(Invoke([](const UUID &source, const std::string &user, const std::string &destination, + const std::string &poolset, bool lazyFlag, std::shared_ptr closure, TaskIdType *taskId){ @@ -2008,4 +2053,3 @@ TEST_F(TestSnapshotCloneServiceImpl, TestGetCloneRefStatusFail) { } } // namespace snapshotcloneserver } // namespace curve - diff --git a/test/snapshotcloneserver/test_snapshotclonecodec.cpp b/test/snapshotcloneserver/test_snapshotclonecodec.cpp index 96f9ce3b36..2952ec237c 100644 --- a/test/snapshotcloneserver/test_snapshotclonecodec.cpp +++ b/test/snapshotcloneserver/test_snapshotclonecodec.cpp @@ -39,9 +39,11 @@ using ::testing::DoAll; namespace curve { namespace snapshotcloneserver { +static const char* kDefaultPoolset = "poolset"; + TEST(TestSnapshotCloneServerCodec, TestSnapInfoEncodeDecodeEqual) { SnapshotInfo snapInfo("snapuuid", "snapuser", "file1", "snapxxx", 100, - 1024, 2048, 4096, 4096, 8, 0, + 1024, 2048, 4096, 4096, 8, kDefaultPoolset, 0, Status::pending); SnapshotCloneCodec testObj; std::string value; @@ -60,6 +62,7 @@ TEST(TestSnapshotCloneServerCodec, TestSnapInfoEncodeDecodeEqual) { ASSERT_EQ(snapInfo.GetFileLength(), decodedSnapInfo.GetFileLength()); ASSERT_EQ(snapInfo.GetStripeUnit(), decodedSnapInfo.GetStripeUnit()); ASSERT_EQ(snapInfo.GetStripeCount(), decodedSnapInfo.GetStripeCount()); + ASSERT_EQ(snapInfo.GetPoolset(), decodedSnapInfo.GetPoolset()); ASSERT_EQ(snapInfo.GetCreateTime(), decodedSnapInfo.GetCreateTime()); ASSERT_EQ(snapInfo.GetStatus(), decodedSnapInfo.GetStatus()); } @@ -67,7 +70,7 @@ TEST(TestSnapshotCloneServerCodec, TestSnapInfoEncodeDecodeEqual) { TEST(TestSnapshotCloneServerCodec, TestCloneInfoEncodeDecodeEqual) { CloneInfo cloneInfo("cloneuuid", "cloneuser", CloneTaskType::kRecover, "srcfile", - "dstfile", 1, 2, 3, + "dstfile", kDefaultPoolset, 1, 2, 3, CloneFileType::kSnapshot, false, CloneStep::kCompleteCloneFile, CloneStatus::recovering); diff --git a/test/tools/mds_client_test.cpp b/test/tools/mds_client_test.cpp index 4d8b465f68..e261d43895 100644 --- a/test/tools/mds_client_test.cpp +++ b/test/tools/mds_client_test.cpp @@ -28,36 +28,36 @@ #include "test/tools/mock/mock_topology_service.h" #include "test/tools/mock/mock_schedule_service.h" -using curve::mds::topology::DiskState; -using curve::mds::topology::OnlineState; +using curve::mds::schedule::QueryChunkServerRecoverStatusRequest; +using curve::mds::schedule::QueryChunkServerRecoverStatusResponse; +using curve::mds::schedule::RapidLeaderScheduleResponse; using curve::mds::topology::AllocateStatus; -using curve::mds::topology::LogicalPoolType; -using curve::mds::topology::ListPhysicalPoolRequest; -using curve::mds::topology::ListPhysicalPoolResponse; -using curve::mds::topology::ListLogicalPoolRequest; -using curve::mds::topology::ListLogicalPoolResponse; +using curve::mds::topology::DiskState; using curve::mds::topology::GetChunkServerListInCopySetsRequest; using curve::mds::topology::GetChunkServerListInCopySetsResponse; +using curve::mds::topology::GetCopysetRequest; +using curve::mds::topology::GetCopysetResponse; using curve::mds::topology::GetCopySetsInChunkServerRequest; using curve::mds::topology::GetCopySetsInChunkServerResponse; using curve::mds::topology::GetCopySetsInClusterRequest; using curve::mds::topology::GetCopySetsInClusterResponse; -using curve::mds::topology::GetCopysetRequest; -using curve::mds::topology::GetCopysetResponse; -using curve::mds::topology::SetCopysetsAvailFlagRequest; -using curve::mds::topology::SetCopysetsAvailFlagResponse; +using curve::mds::topology::ListLogicalPoolRequest; +using curve::mds::topology::ListLogicalPoolResponse; +using curve::mds::topology::ListPhysicalPoolRequest; +using curve::mds::topology::ListPhysicalPoolResponse; using curve::mds::topology::ListUnAvailCopySetsRequest; using curve::mds::topology::ListUnAvailCopySetsResponse; +using curve::mds::topology::LogicalPoolType; +using curve::mds::topology::OnlineState; +using curve::mds::topology::SetCopysetsAvailFlagRequest; +using curve::mds::topology::SetCopysetsAvailFlagResponse; using curve::mds::topology::SetLogicalPoolScanStateRequest; using curve::mds::topology::SetLogicalPoolScanStateResponse; -using curve::mds::schedule::RapidLeaderScheduleResponse; -using curve::mds::schedule::QueryChunkServerRecoverStatusRequest; -using curve::mds::schedule::QueryChunkServerRecoverStatusResponse; using ::testing::_; -using ::testing::Return; -using ::testing::Invoke; using ::testing::DoAll; +using ::testing::Invoke; +using ::testing::Return; using ::testing::SetArgPointee; DECLARE_string(mdsDummyPort); @@ -67,11 +67,9 @@ namespace tool { const char mdsAddr[] = "127.0.0.1:9191,127.0.0.1:9192"; -template -void callback(RpcController* controller, - const Req* request, - Resp* response, - Closure* done) { +template +void callback(RpcController *controller, const Req *request, Resp *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); } @@ -84,11 +82,11 @@ class ToolMDSClientTest : public ::testing::Test { topoService = new curve::mds::topology::MockTopologyService(); scheduleService = new curve::mds::schedule::MockScheduleService(); ASSERT_EQ(0, server->AddService(nameService, - brpc::SERVER_DOESNT_OWN_SERVICE)); + brpc::SERVER_DOESNT_OWN_SERVICE)); ASSERT_EQ(0, server->AddService(topoService, - brpc::SERVER_DOESNT_OWN_SERVICE)); + brpc::SERVER_DOESNT_OWN_SERVICE)); ASSERT_EQ(0, server->AddService(scheduleService, - brpc::SERVER_DOESNT_OWN_SERVICE)); + brpc::SERVER_DOESNT_OWN_SERVICE)); ASSERT_EQ(0, server->Start("127.0.0.1:9192", nullptr)); brpc::StartDummyServerAt(9193); @@ -96,13 +94,13 @@ class ToolMDSClientTest : public ::testing::Test { curve::mds::topology::ListPhysicalPoolResponse response; response.set_statuscode(kTopoErrCodeSuccess); EXPECT_CALL(*topoService, ListPhysicalPool(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), + .WillOnce(DoAll( + SetArgPointee<2>(response), Invoke([](RpcController *controller, const ListPhysicalPoolRequest *request, - ListPhysicalPoolResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + ListPhysicalPoolResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + }))); ASSERT_EQ(0, mdsClient.Init(mdsAddr, "9194,9193")); } void TearDown() { @@ -118,7 +116,7 @@ class ToolMDSClientTest : public ::testing::Test { scheduleService = nullptr; } - void GetFileInfoForTest(uint64_t id, FileInfo* fileInfo) { + void GetFileInfoForTest(uint64_t id, FileInfo *fileInfo) { fileInfo->set_id(id); fileInfo->set_filename("test"); fileInfo->set_parentid(0); @@ -129,12 +127,11 @@ class ToolMDSClientTest : public ::testing::Test { fileInfo->set_ctime(1573546993000000); } - void GetCopysetInfoForTest(CopySetServerInfo* info, - int num, uint32_t copysetId = 1) { + void GetCopysetInfoForTest(CopySetServerInfo *info, int num, + uint32_t copysetId = 1) { info->Clear(); for (int i = 0; i < num; ++i) { - curve::common::ChunkServerLocation *csLoc = - info->add_cslocs(); + curve::common::ChunkServerLocation *csLoc = info->add_cslocs(); csLoc->set_chunkserverid(i); csLoc->set_hostip("127.0.0.1"); csLoc->set_port(9191 + i); @@ -142,21 +139,23 @@ class ToolMDSClientTest : public ::testing::Test { info->set_copysetid(copysetId); } - void GetSegmentForTest(PageFileSegment* segment) { + void GetSegmentForTest(PageFileSegment *segment) { segment->set_logicalpoolid(1); segment->set_segmentsize(DefaultSegmentSize); segment->set_chunksize(kChunkSize); segment->set_startoffset(0); } - void GetPhysicalPoolInfoForTest(PoolIdType id, PhysicalPoolInfo* pool) { + void GetPhysicalPoolInfoForTest(PoolIdType id, PhysicalPoolInfo *pool) { pool->set_physicalpoolid(id); pool->set_physicalpoolname("testPool"); pool->set_desc("physical pool for test"); + pool->set_poolsetid(1); + pool->set_poolsetname("default"); } void GetLogicalPoolForTest(PoolIdType id, - curve::mds::topology::LogicalPoolInfo *lpInfo) { + curve::mds::topology::LogicalPoolInfo *lpInfo) { lpInfo->set_logicalpoolid(id); lpInfo->set_logicalpoolname("defaultLogicalPool"); lpInfo->set_physicalpoolid(1); @@ -207,10 +206,10 @@ class ToolMDSClientTest : public ::testing::Test { csInfo->set_diskcapacity(1024); csInfo->set_diskused(512); } - brpc::Server* server; - curve::mds::MockNameService* nameService; - curve::mds::topology::MockTopologyService* topoService; - curve::mds::schedule::MockScheduleService* scheduleService; + brpc::Server *server; + curve::mds::MockNameService *nameService; + curve::mds::topology::MockTopologyService *topoService; + curve::mds::schedule::MockScheduleService *scheduleService; MDSClient mdsClient; const uint64_t kChunkSize = 16777216; const uint64_t DefaultSegmentSize = 1024 * 1024 * 1024; @@ -237,42 +236,40 @@ TEST_F(ToolMDSClientTest, GetFileInfo) { EXPECT_CALL(*nameService, GetFileInfo(_, _, _, _)) .Times(6) .WillRepeatedly(Invoke([](RpcController *controller, - const curve::mds::GetFileInfoRequest *request, - curve::mds::GetFileInfoResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + const curve::mds::GetFileInfoRequest *request, + curve::mds::GetFileInfoResponse *response, + Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.GetFileInfo(filename, &outFileInfo)); // 返回码不为OK curve::mds::GetFileInfoResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, GetFileInfo(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetFileInfoRequest *request, - curve::mds::GetFileInfoResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::GetFileInfoRequest *request, + curve::mds::GetFileInfoResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.GetFileInfo(filename, &outFileInfo)); // 正常情况 - curve::mds::FileInfo * info = new curve::mds::FileInfo; + curve::mds::FileInfo *info = new curve::mds::FileInfo; GetFileInfoForTest(1, info); response.set_allocated_fileinfo(info); response.set_statuscode(curve::mds::StatusCode::kOK); EXPECT_CALL(*nameService, GetFileInfo(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetFileInfoRequest *request, - curve::mds::GetFileInfoResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::GetFileInfoRequest *request, + curve::mds::GetFileInfoResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.GetFileInfo(filename, &outFileInfo)); ASSERT_EQ(info->DebugString(), outFileInfo.DebugString()); } @@ -283,48 +280,48 @@ TEST_F(ToolMDSClientTest, GetAllocatedSize) { // 发送RPC失败 EXPECT_CALL(*nameService, GetAllocatedSize(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const curve::mds::GetAllocatedSizeRequest *request, - curve::mds::GetAllocatedSizeResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly(Invoke( + [](RpcController *controller, + const curve::mds::GetAllocatedSizeRequest *request, + curve::mds::GetAllocatedSizeResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.GetAllocatedSize(filename, &allocSize)); // 返回码不为OK curve::mds::GetAllocatedSizeResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, GetAllocatedSize(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetAllocatedSizeRequest *request, - curve::mds::GetAllocatedSizeResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::GetAllocatedSizeRequest *request, + curve::mds::GetAllocatedSizeResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.GetAllocatedSize(filename, &allocSize)); // 正常情况 response.set_allocatedsize(DefaultSegmentSize * 3); for (int i = 1; i <= 3; ++i) { - response.mutable_allocsizemap()->insert({i, DefaultSegmentSize}); + response.mutable_allocsizemap()->insert( + {static_cast<::google::protobuf::uint32>(i), DefaultSegmentSize}); } response.set_statuscode(curve::mds::StatusCode::kOK); EXPECT_CALL(*nameService, GetAllocatedSize(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetAllocatedSizeRequest *request, - curve::mds::GetAllocatedSizeResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::GetAllocatedSizeRequest *request, + curve::mds::GetAllocatedSizeResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); AllocMap allocMap; ASSERT_EQ(0, mdsClient.GetAllocatedSize(filename, &allocSize, &allocMap)); ASSERT_EQ(DefaultSegmentSize * 3, allocSize); - AllocMap expected = {{1, DefaultSegmentSize}, {2, DefaultSegmentSize}, + AllocMap expected = {{1, DefaultSegmentSize}, + {2, DefaultSegmentSize}, {3, DefaultSegmentSize}}; ASSERT_EQ(expected, allocMap); } @@ -336,28 +333,27 @@ TEST_F(ToolMDSClientTest, ListDir) { // 发送RPC失败 EXPECT_CALL(*nameService, ListDir(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const curve::mds::ListDirRequest *request, - curve::mds::ListDirResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly( + Invoke([](RpcController *controller, + const curve::mds::ListDirRequest *request, + curve::mds::ListDirResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.ListDir(fileName, &fileInfoVec)); // 返回码不为OK curve::mds::ListDirResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, ListDir(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ListDirRequest *request, - curve::mds::ListDirResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::ListDirRequest *request, + curve::mds::ListDirResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListDir(fileName, &fileInfoVec)); // 正常情况 response.set_statuscode(curve::mds::StatusCode::kOK); @@ -366,13 +362,12 @@ TEST_F(ToolMDSClientTest, ListDir) { GetFileInfoForTest(i, fileInfo); } EXPECT_CALL(*nameService, ListDir(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ListDirRequest *request, - curve::mds::ListDirResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::ListDirRequest *request, + curve::mds::ListDirResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListDir(fileName, &fileInfoVec)); for (int i = 0; i < 5; i++) { FileInfo expected; @@ -389,72 +384,69 @@ TEST_F(ToolMDSClientTest, GetSegmentInfo) { // 发送RPC失败 EXPECT_CALL(*nameService, GetOrAllocateSegment(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const curve::mds::GetOrAllocateSegmentRequest *request, - curve::mds::GetOrAllocateSegmentResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly( + Invoke([](RpcController *controller, + const curve::mds::GetOrAllocateSegmentRequest *request, + curve::mds::GetOrAllocateSegmentResponse *response, + Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(GetSegmentRes::kOtherError, - mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); + mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); // segment不存在 curve::mds::GetOrAllocateSegmentResponse response; response.set_statuscode(curve::mds::StatusCode::kSegmentNotAllocated); EXPECT_CALL(*nameService, GetOrAllocateSegment(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetOrAllocateSegmentRequest *request, - curve::mds::GetOrAllocateSegmentResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::GetOrAllocateSegmentRequest *request, + curve::mds::GetOrAllocateSegmentResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(GetSegmentRes::kSegmentNotAllocated, - mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); + mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); // 文件不存在 response.set_statuscode(curve::mds::StatusCode::kFileNotExists); EXPECT_CALL(*nameService, GetOrAllocateSegment(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetOrAllocateSegmentRequest *request, - curve::mds::GetOrAllocateSegmentResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::GetOrAllocateSegmentRequest *request, + curve::mds::GetOrAllocateSegmentResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(GetSegmentRes::kFileNotExists, - mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); + mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); // 其他错误 response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, GetOrAllocateSegment(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetOrAllocateSegmentRequest *request, - curve::mds::GetOrAllocateSegmentResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::GetOrAllocateSegmentRequest *request, + curve::mds::GetOrAllocateSegmentResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(GetSegmentRes::kOtherError, - mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); + mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); // 正常情况 - PageFileSegment* segment = new PageFileSegment(); + PageFileSegment *segment = new PageFileSegment(); GetSegmentForTest(segment); response.set_statuscode(curve::mds::StatusCode::kOK); response.set_allocated_pagefilesegment(segment); EXPECT_CALL(*nameService, GetOrAllocateSegment(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetOrAllocateSegmentRequest *request, - curve::mds::GetOrAllocateSegmentResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::GetOrAllocateSegmentRequest *request, + curve::mds::GetOrAllocateSegmentResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(GetSegmentRes::kOK, - mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); + mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); ASSERT_EQ(segment->DebugString(), outSegment.DebugString()); } @@ -464,48 +456,55 @@ TEST_F(ToolMDSClientTest, DeleteFile) { // 发送RPC失败 EXPECT_CALL(*nameService, DeleteFile(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const curve::mds::DeleteFileRequest *request, - curve::mds::DeleteFileResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly( + Invoke([](RpcController *controller, + const curve::mds::DeleteFileRequest *request, + curve::mds::DeleteFileResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.DeleteFile(fileName)); // 返回码不为OK curve::mds::DeleteFileResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, DeleteFile(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::DeleteFileRequest *request, - curve::mds::DeleteFileResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::DeleteFileRequest *request, + curve::mds::DeleteFileResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.DeleteFile(fileName)); // 正常情况 response.set_statuscode(curve::mds::StatusCode::kOK); EXPECT_CALL(*nameService, DeleteFile(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::DeleteFileRequest *request, - curve::mds::DeleteFileResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::DeleteFileRequest *request, + curve::mds::DeleteFileResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.DeleteFile(fileName)); } TEST_F(ToolMDSClientTest, CreateFile) { std::string fileName = "/test"; uint64_t length = 10 * DefaultSegmentSize; - uint64_t stripeUnit = 32 * 1024 *1024; + uint64_t stripeUnit = 32 * 1024 * 1024; uint64_t stripeCount = 32; + + CreateFileContext context; + context.type = curve::mds::FileType::INODE_PAGEFILE; + context.name = fileName; + context.length = length; + context.stripeUnit = stripeUnit; + context.stripeCount = stripeCount; + context.poolset = ""; + // 发送RPC失败 EXPECT_CALL(*nameService, CreateFile(_, _, _, _)) .Times(6) @@ -518,8 +517,7 @@ TEST_F(ToolMDSClientTest, CreateFile) { dynamic_cast(controller); cntl->SetFailed("test"); })); - ASSERT_EQ(-1, mdsClient.CreateFile(fileName, length, - stripeUnit, stripeCount)); + ASSERT_EQ(-1, mdsClient.CreateFile(context)); // 返回码不为OK curve::mds::CreateFileResponse response; @@ -532,8 +530,7 @@ TEST_F(ToolMDSClientTest, CreateFile) { Closure *done){ brpc::ClosureGuard doneGuard(done); }))); - ASSERT_EQ(-1, mdsClient.CreateFile(fileName, length, - stripeUnit, stripeCount)); + ASSERT_EQ(-1, mdsClient.CreateFile(context)); // 正常情况 response.set_statuscode(curve::mds::StatusCode::kOK); @@ -545,8 +542,7 @@ TEST_F(ToolMDSClientTest, CreateFile) { Closure *done){ brpc::ClosureGuard doneGuard(done); }))); - ASSERT_EQ(0, mdsClient.CreateFile(fileName, length, - stripeUnit, stripeCount)); + ASSERT_EQ(0, mdsClient.CreateFile(context)); } TEST_F(ToolMDSClientTest, ExtendVolume_success) { @@ -555,13 +551,12 @@ TEST_F(ToolMDSClientTest, ExtendVolume_success) { curve::mds::ExtendFileResponse response; response.set_statuscode(curve::mds::StatusCode::kOK); EXPECT_CALL(*nameService, ExtendFile(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ExtendFileRequest *request, - curve::mds::ExtendFileResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::ExtendFileRequest *request, + curve::mds::ExtendFileResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ExtendVolume(fileName, length)); } @@ -572,15 +567,15 @@ TEST_F(ToolMDSClientTest, ExtendVolume_Fail) { // 发送RPC失败 EXPECT_CALL(*nameService, ExtendFile(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const curve::mds::ExtendFileRequest *request, - curve::mds::ExtendFileResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly( + Invoke([](RpcController *controller, + const curve::mds::ExtendFileRequest *request, + curve::mds::ExtendFileResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.ExtendVolume(fileName, length)); return; @@ -589,13 +584,12 @@ TEST_F(ToolMDSClientTest, ExtendVolume_Fail) { curve::mds::ExtendFileResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, ExtendFile(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ExtendFileRequest *request, - curve::mds::ExtendFileResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::ExtendFileRequest *request, + curve::mds::ExtendFileResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ExtendVolume(fileName, length)); } @@ -607,31 +601,30 @@ TEST_F(ToolMDSClientTest, GetChunkServerListInCopySets) { // 发送rpc失败 EXPECT_CALL(*topoService, GetChunkServerListInCopySets(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const GetChunkServerListInCopySetsRequest *request, - GetChunkServerListInCopySetsResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); - ASSERT_EQ(-1, mdsClient.GetChunkServerListInCopySet( - logicalPoolId, copysetId, &csLocs)); + .WillRepeatedly(Invoke( + [](RpcController *controller, + const GetChunkServerListInCopySetsRequest *request, + GetChunkServerListInCopySetsResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); + ASSERT_EQ(-1, mdsClient.GetChunkServerListInCopySet(logicalPoolId, + copysetId, &csLocs)); // 返回码不为OK GetChunkServerListInCopySetsResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, GetChunkServerListInCopySets(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetChunkServerListInCopySetsRequest *request, - GetChunkServerListInCopySetsResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); - ASSERT_EQ(-1, mdsClient.GetChunkServerListInCopySet( - logicalPoolId, copysetId, &csLocs)); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const GetChunkServerListInCopySetsRequest *request, + GetChunkServerListInCopySetsResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + ASSERT_EQ(-1, mdsClient.GetChunkServerListInCopySet(logicalPoolId, + copysetId, &csLocs)); // 正常情况 response.set_statuscode(kTopoErrCodeSuccess); @@ -640,15 +633,14 @@ TEST_F(ToolMDSClientTest, GetChunkServerListInCopySets) { auto infoPtr = response.add_csinfo(); infoPtr->CopyFrom(csInfo); EXPECT_CALL(*topoService, GetChunkServerListInCopySets(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetChunkServerListInCopySetsRequest *request, - GetChunkServerListInCopySetsResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); - ASSERT_EQ(0, mdsClient.GetChunkServerListInCopySet( - logicalPoolId, copysetId, &csLocs)); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const GetChunkServerListInCopySetsRequest *request, + GetChunkServerListInCopySetsResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + ASSERT_EQ(0, mdsClient.GetChunkServerListInCopySet(logicalPoolId, copysetId, + &csLocs)); ASSERT_EQ(csInfo.cslocs_size(), csLocs.size()); for (uint32_t i = 0; i < csLocs.size(); ++i) { ASSERT_EQ(csInfo.cslocs(i).DebugString(), csLocs[i].DebugString()); @@ -668,15 +660,14 @@ TEST_F(ToolMDSClientTest, GetChunkServerListInCopySets) { std::vector copysets = {100, 101, 102}; std::vector csServerInfos; EXPECT_CALL(*topoService, GetChunkServerListInCopySets(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetChunkServerListInCopySetsRequest *request, - GetChunkServerListInCopySetsResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); - ASSERT_EQ(0, mdsClient.GetChunkServerListInCopySets( - logicalPoolId, copysets, &csServerInfos)); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const GetChunkServerListInCopySetsRequest *request, + GetChunkServerListInCopySetsResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + ASSERT_EQ(0, mdsClient.GetChunkServerListInCopySets(logicalPoolId, copysets, + &csServerInfos)); ASSERT_EQ(expected.size(), csServerInfos.size()); for (uint32_t i = 0; i < expected.size(); ++i) { ASSERT_EQ(expected[i].DebugString(), csServerInfos[i].DebugString()); @@ -689,28 +680,28 @@ TEST_F(ToolMDSClientTest, ListPhysicalPoolsInCluster) { // 发送rpc失败 EXPECT_CALL(*topoService, ListPhysicalPool(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const ListPhysicalPoolRequest *request, - ListPhysicalPoolResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly( + Invoke([](RpcController *controller, + const ListPhysicalPoolRequest *request, + ListPhysicalPoolResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.ListPhysicalPoolsInCluster(&pools)); // 返回码不为OK ListPhysicalPoolResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, ListPhysicalPool(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListPhysicalPoolRequest *request, - ListPhysicalPoolResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce( + DoAll(SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const ListPhysicalPoolRequest *request, + ListPhysicalPoolResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + }))); ASSERT_EQ(-1, mdsClient.ListPhysicalPoolsInCluster(&pools)); // 正常情况 @@ -720,13 +711,13 @@ TEST_F(ToolMDSClientTest, ListPhysicalPoolsInCluster) { GetPhysicalPoolInfoForTest(i, poolInfo); } EXPECT_CALL(*topoService, ListPhysicalPool(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListPhysicalPoolRequest *request, - ListPhysicalPoolResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce( + DoAll(SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const ListPhysicalPoolRequest *request, + ListPhysicalPoolResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + }))); ASSERT_EQ(0, mdsClient.ListPhysicalPoolsInCluster(&pools)); ASSERT_EQ(3, pools.size()); for (int i = 0; i < 3; ++i) { @@ -743,28 +734,27 @@ TEST_F(ToolMDSClientTest, ListLogicalPoolsInPhysicalPool) { // 发送rpc失败 EXPECT_CALL(*topoService, ListLogicalPool(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const ListLogicalPoolRequest *request, - ListLogicalPoolResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly(Invoke( + [](RpcController *controller, const ListLogicalPoolRequest *request, + ListLogicalPoolResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.ListLogicalPoolsInPhysicalPool(poolId, &pools)); // 返回码不为OK ListLogicalPoolResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, ListLogicalPool(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListLogicalPoolRequest *request, - ListLogicalPoolResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce( + DoAll(SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const ListLogicalPoolRequest *request, + ListLogicalPoolResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + }))); ASSERT_EQ(-1, mdsClient.ListLogicalPoolsInPhysicalPool(poolId, &pools)); // 正常情况 @@ -774,13 +764,13 @@ TEST_F(ToolMDSClientTest, ListLogicalPoolsInPhysicalPool) { GetLogicalPoolForTest(i, poolInfo); } EXPECT_CALL(*topoService, ListLogicalPool(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListLogicalPoolRequest *request, - ListLogicalPoolResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce( + DoAll(SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const ListLogicalPoolRequest *request, + ListLogicalPoolResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + }))); ASSERT_EQ(0, mdsClient.ListLogicalPoolsInPhysicalPool(poolId, &pools)); ASSERT_EQ(3, pools.size()); for (int i = 0; i < 3; ++i) { @@ -796,28 +786,28 @@ TEST_F(ToolMDSClientTest, ListZoneInPhysicalPool) { // 发送rpc失败 EXPECT_CALL(*topoService, ListPoolZone(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const curve::mds::topology::ListPoolZoneRequest *request, - curve::mds::topology::ListPoolZoneResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly( + Invoke([](RpcController *controller, + const curve::mds::topology::ListPoolZoneRequest *request, + curve::mds::topology::ListPoolZoneResponse *response, + Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.ListZoneInPhysicalPool(poolId, &zones)); // 返回码不为OK curve::mds::topology::ListPoolZoneResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, ListPoolZone(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::topology::ListPoolZoneRequest *request, - curve::mds::topology::ListPoolZoneResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::topology::ListPoolZoneRequest *request, + curve::mds::topology::ListPoolZoneResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListZoneInPhysicalPool(poolId, &zones)); // 正常情况 response.set_statuscode(kTopoErrCodeSuccess); @@ -826,13 +816,12 @@ TEST_F(ToolMDSClientTest, ListZoneInPhysicalPool) { GetZoneInfoForTest(i, zoneInfo); } EXPECT_CALL(*topoService, ListPoolZone(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::topology::ListPoolZoneRequest *request, - curve::mds::topology::ListPoolZoneResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::topology::ListPoolZoneRequest *request, + curve::mds::topology::ListPoolZoneResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListZoneInPhysicalPool(poolId, &zones)); ASSERT_EQ(3, zones.size()); for (int i = 0; i < 3; ++i) { @@ -849,28 +838,29 @@ TEST_F(ToolMDSClientTest, ListServersInZone) { // 发送rpc失败 EXPECT_CALL(*topoService, ListZoneServer(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const curve::mds::topology::ListZoneServerRequest *request, - curve::mds::topology::ListZoneServerResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly(Invoke( + [](RpcController *controller, + const curve::mds::topology::ListZoneServerRequest *request, + curve::mds::topology::ListZoneServerResponse *response, + Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.ListServersInZone(zoneId, &servers)); // 返回码不为OK curve::mds::topology::ListZoneServerResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, ListZoneServer(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::topology::ListZoneServerRequest *request, - curve::mds::topology::ListZoneServerResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke( + [](RpcController *controller, + const curve::mds::topology::ListZoneServerRequest *request, + curve::mds::topology::ListZoneServerResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListServersInZone(zoneId, &servers)); // 正常情况 @@ -880,13 +870,13 @@ TEST_F(ToolMDSClientTest, ListServersInZone) { GetServerInfoForTest(i, serverInfo); } EXPECT_CALL(*topoService, ListZoneServer(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::topology::ListZoneServerRequest *request, - curve::mds::topology::ListZoneServerResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke( + [](RpcController *controller, + const curve::mds::topology::ListZoneServerRequest *request, + curve::mds::topology::ListZoneServerResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListServersInZone(zoneId, &servers)); ASSERT_EQ(3, servers.size()); for (int i = 0; i < 3; ++i) { @@ -903,28 +893,29 @@ TEST_F(ToolMDSClientTest, ListChunkServersOnServer) { // 发送rpc失败 EXPECT_CALL(*topoService, ListChunkServer(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const curve::mds::topology::ListChunkServerRequest *request, - curve::mds::topology::ListChunkServerResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly(Invoke( + [](RpcController *controller, + const curve::mds::topology::ListChunkServerRequest *request, + curve::mds::topology::ListChunkServerResponse *response, + Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.ListChunkServersOnServer(serverId, &chunkservers)); // 返回码不为OK curve::mds::topology::ListChunkServerResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, ListChunkServer(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::topology::ListChunkServerRequest *request, - curve::mds::topology::ListChunkServerResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke( + [](RpcController *controller, + const curve::mds::topology::ListChunkServerRequest *request, + curve::mds::topology::ListChunkServerResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListChunkServersOnServer(serverId, &chunkservers)); // 正常情况,两个chunkserver正常,一个chunkserver retired @@ -934,13 +925,13 @@ TEST_F(ToolMDSClientTest, ListChunkServersOnServer) { GetChunkServerInfoForTest(i, csInfo, i == 2); } EXPECT_CALL(*topoService, ListChunkServer(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::topology::ListChunkServerRequest *request, - curve::mds::topology::ListChunkServerResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke( + [](RpcController *controller, + const curve::mds::topology::ListChunkServerRequest *request, + curve::mds::topology::ListChunkServerResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListChunkServersOnServer(serverId, &chunkservers)); ASSERT_EQ(2, chunkservers.size()); for (int i = 0; i < 2; ++i) { @@ -958,15 +949,16 @@ TEST_F(ToolMDSClientTest, GetChunkServerInfo) { // 发送rpc失败 EXPECT_CALL(*topoService, GetChunkServer(_, _, _, _)) .Times(12) - .WillRepeatedly(Invoke([](RpcController *controller, - const curve::mds::topology::GetChunkServerInfoRequest *request, - curve::mds::topology::GetChunkServerInfoResponse *response, - Closure *done){ + .WillRepeatedly(Invoke( + [](RpcController *controller, + const curve::mds::topology::GetChunkServerInfoRequest *request, + curve::mds::topology::GetChunkServerInfoResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.GetChunkServerInfo(csId, &chunkserver)); ASSERT_EQ(-1, mdsClient.GetChunkServerInfo(csAddr, &chunkserver)); @@ -975,30 +967,32 @@ TEST_F(ToolMDSClientTest, GetChunkServerInfo) { response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, GetChunkServer(_, _, _, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::topology::GetChunkServerInfoRequest *request, - curve::mds::topology::GetChunkServerInfoResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillRepeatedly(DoAll( + SetArgPointee<2>(response), + Invoke( + [](RpcController *controller, + const curve::mds::topology::GetChunkServerInfoRequest + *request, + curve::mds::topology::GetChunkServerInfoResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.GetChunkServerInfo(csId, &chunkserver)); ASSERT_EQ(-1, mdsClient.GetChunkServerInfo(csAddr, &chunkserver)); // 正常情况 response.set_statuscode(kTopoErrCodeSuccess); - ChunkServerInfo* csInfo = new ChunkServerInfo(); + ChunkServerInfo *csInfo = new ChunkServerInfo(); GetChunkServerInfoForTest(1, csInfo); response.set_allocated_chunkserverinfo(csInfo); EXPECT_CALL(*topoService, GetChunkServer(_, _, _, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::topology::GetChunkServerInfoRequest *request, - curve::mds::topology::GetChunkServerInfoResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillRepeatedly(DoAll( + SetArgPointee<2>(response), + Invoke( + [](RpcController *controller, + const curve::mds::topology::GetChunkServerInfoRequest + *request, + curve::mds::topology::GetChunkServerInfoResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.GetChunkServerInfo(csId, &chunkserver)); ASSERT_EQ(0, mdsClient.GetChunkServerInfo(csAddr, &chunkserver)); ChunkServerInfo expected; @@ -1022,15 +1016,15 @@ TEST_F(ToolMDSClientTest, GetCopySetsInChunkServer) { // 发送rpc失败 EXPECT_CALL(*topoService, GetCopySetsInChunkServer(_, _, _, _)) .Times(12) - .WillRepeatedly(Invoke([](RpcController *controller, - const GetCopySetsInChunkServerRequest *request, - GetCopySetsInChunkServerResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly(Invoke( + [](RpcController *controller, + const GetCopySetsInChunkServerRequest *request, + GetCopySetsInChunkServerResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.GetCopySetsInChunkServer(csId, ©sets)); ASSERT_EQ(-1, mdsClient.GetCopySetsInChunkServer(csAddr, ©sets)); @@ -1039,13 +1033,12 @@ TEST_F(ToolMDSClientTest, GetCopySetsInChunkServer) { response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, GetCopySetsInChunkServer(_, _, _, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetCopySetsInChunkServerRequest *request, - GetCopySetsInChunkServerResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillRepeatedly(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const GetCopySetsInChunkServerRequest *request, + GetCopySetsInChunkServerResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.GetCopySetsInChunkServer(csId, ©sets)); ASSERT_EQ(-1, mdsClient.GetCopySetsInChunkServer(csAddr, ©sets)); @@ -1058,13 +1051,12 @@ TEST_F(ToolMDSClientTest, GetCopySetsInChunkServer) { } EXPECT_CALL(*topoService, GetCopySetsInChunkServer(_, _, _, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetCopySetsInChunkServerRequest *request, - GetCopySetsInChunkServerResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillRepeatedly(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const GetCopySetsInChunkServerRequest *request, + GetCopySetsInChunkServerResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.GetCopySetsInChunkServer(csId, ©sets)); ASSERT_EQ(5, copysets.size()); copysets.clear(); @@ -1089,15 +1081,15 @@ TEST_F(ToolMDSClientTest, GetCopySetsInCluster) { // 发送rpc失败 EXPECT_CALL(*topoService, GetCopySetsInCluster(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const GetCopySetsInClusterRequest *request, - GetCopySetsInClusterResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly( + Invoke([](RpcController *controller, + const GetCopySetsInClusterRequest *request, + GetCopySetsInClusterResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.GetCopySetsInCluster(©sets)); // 返回码不为OK @@ -1105,13 +1097,12 @@ TEST_F(ToolMDSClientTest, GetCopySetsInCluster) { response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, GetCopySetsInCluster(_, _, _, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetCopySetsInClusterRequest *request, - GetCopySetsInClusterResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillRepeatedly(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const GetCopySetsInClusterRequest *request, + GetCopySetsInClusterResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.GetCopySetsInCluster(©sets)); // 正常情况 @@ -1123,13 +1114,12 @@ TEST_F(ToolMDSClientTest, GetCopySetsInCluster) { } EXPECT_CALL(*topoService, GetCopySetsInCluster(_, _, _, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetCopySetsInClusterRequest *request, - GetCopySetsInClusterResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillRepeatedly(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const GetCopySetsInClusterRequest *request, + GetCopySetsInClusterResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.GetCopySetsInCluster(©sets)); ASSERT_EQ(5, copysets.size()); copysets.clear(); @@ -1141,13 +1131,11 @@ TEST_F(ToolMDSClientTest, GetCopySetsInCluster) { TEST_F(ToolMDSClientTest, GetCopyset) { auto succCallback = callback; - auto failCallback = [](RpcController* controller, - const GetCopysetRequest* request, - GetCopysetResponse* response, - Closure* done) { + auto failCallback = [](RpcController *controller, + const GetCopysetRequest *request, + GetCopysetResponse *response, Closure *done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller *cntl = dynamic_cast(controller); cntl->SetFailed("fail"); }; @@ -1168,8 +1156,8 @@ TEST_F(ToolMDSClientTest, GetCopyset) { CopysetInfo copysetInfo; EXPECT_CALL(*topoService, GetCopyset(_, _, _, _)) .Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(succResp), - Invoke(failCallback))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(succResp), Invoke(failCallback))); ASSERT_EQ(mdsClient.GetCopyset(1, 1, ©setInfo), -1); } @@ -1177,8 +1165,7 @@ TEST_F(ToolMDSClientTest, GetCopyset) { { CopysetInfo copysetInfo; EXPECT_CALL(*topoService, GetCopyset(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(failResp), - Invoke(succCallback))); + .WillOnce(DoAll(SetArgPointee<2>(failResp), Invoke(succCallback))); ASSERT_EQ(mdsClient.GetCopyset(1, 1, ©setInfo), -1); } @@ -1186,8 +1173,7 @@ TEST_F(ToolMDSClientTest, GetCopyset) { { CopysetInfo copysetInfo; EXPECT_CALL(*topoService, GetCopyset(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(succResp), - Invoke(succCallback))); + .WillOnce(DoAll(SetArgPointee<2>(succResp), Invoke(succCallback))); ASSERT_EQ(mdsClient.GetCopyset(1, 1, ©setInfo), 0); ASSERT_EQ(copysetInfo.logicalpoolid(), 1); ASSERT_EQ(copysetInfo.copysetid(), 1); @@ -1201,15 +1187,15 @@ TEST_F(ToolMDSClientTest, RapidLeaderSchedule) { // 发送rpc失败 EXPECT_CALL(*scheduleService, RapidLeaderSchedule(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const RapidLeaderScheduleRequst *request, - RapidLeaderScheduleResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly( + Invoke([](RpcController *controller, + const RapidLeaderScheduleRequst *request, + RapidLeaderScheduleResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.RapidLeaderSchedule(1)); // 返回码不为OK @@ -1217,25 +1203,23 @@ TEST_F(ToolMDSClientTest, RapidLeaderSchedule) { response.set_statuscode( curve::mds::schedule::kScheduleErrCodeInvalidLogicalPool); EXPECT_CALL(*scheduleService, RapidLeaderSchedule(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const RapidLeaderScheduleRequst *request, - RapidLeaderScheduleResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const RapidLeaderScheduleRequst *request, + RapidLeaderScheduleResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.RapidLeaderSchedule(1)); // 成功 response.set_statuscode(curve::mds::schedule::kScheduleErrCodeSuccess); EXPECT_CALL(*scheduleService, RapidLeaderSchedule(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const RapidLeaderScheduleRequst *request, - RapidLeaderScheduleResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const RapidLeaderScheduleRequst *request, + RapidLeaderScheduleResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.RapidLeaderSchedule(1)); } @@ -1251,9 +1235,9 @@ TEST_F(ToolMDSClientTest, SetLogicalPoolScanState) { // CASE 1: Send rpc failed { auto failCallback = [](RpcController *controller, - const SetLogicalPoolScanStateRequest* request, - SetLogicalPoolScanStateResponse* response, - Closure* done) { + const SetLogicalPoolScanStateRequest *request, + SetLogicalPoolScanStateResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); brpc::Controller *cntl = dynamic_cast(controller); @@ -1261,24 +1245,22 @@ TEST_F(ToolMDSClientTest, SetLogicalPoolScanState) { }; EXPECT_CALL(*topoService, SetLogicalPoolScanState(_, _, _, _)) .Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(succResp), - Invoke(failCallback))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(succResp), Invoke(failCallback))); ASSERT_EQ(-1, mdsClient.SetLogicalPoolScanState(1, true)); } // CASE 2: Logical pool not found { EXPECT_CALL(*topoService, SetLogicalPoolScanState(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(failResp), - Invoke(succCallback))); + .WillOnce(DoAll(SetArgPointee<2>(failResp), Invoke(succCallback))); ASSERT_EQ(-1, mdsClient.SetLogicalPoolScanState(1, true)); } // CASE 3: Set success { EXPECT_CALL(*topoService, SetLogicalPoolScanState(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(succResp), - Invoke(succCallback))); + .WillOnce(DoAll(SetArgPointee<2>(succResp), Invoke(succCallback))); ASSERT_EQ(0, mdsClient.SetLogicalPoolScanState(1, true)); } } @@ -1288,44 +1270,42 @@ TEST_F(ToolMDSClientTest, QueryChunkServerRecoverStatus) { // 发送rpc失败 EXPECT_CALL(*scheduleService, QueryChunkServerRecoverStatus(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const QueryChunkServerRecoverStatusRequest *request, - QueryChunkServerRecoverStatusResponse *response, - Closure *done) { - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly(Invoke( + [](RpcController *controller, + const QueryChunkServerRecoverStatusRequest *request, + QueryChunkServerRecoverStatusResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.QueryChunkServerRecoverStatus( - std::vector{}, &statusMap)); + std::vector{}, &statusMap)); // 1. QueryChunkServerRecoverStatus失败的情况 QueryChunkServerRecoverStatusResponse response; response.set_statuscode( curve::mds::schedule::kScheduleErrInvalidQueryChunkserverID); EXPECT_CALL(*scheduleService, QueryChunkServerRecoverStatus(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const QueryChunkServerRecoverStatusRequest *request, - QueryChunkServerRecoverStatusResponse *response, - Closure *done) { - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const QueryChunkServerRecoverStatusRequest *request, + QueryChunkServerRecoverStatusResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.QueryChunkServerRecoverStatus( - std::vector{}, &statusMap)); + std::vector{}, &statusMap)); // 2. QueryChunkServerRecoverStatus成功的情况 response.set_statuscode(curve::mds::schedule::kScheduleErrCodeSuccess); EXPECT_CALL(*scheduleService, QueryChunkServerRecoverStatus(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const QueryChunkServerRecoverStatusRequest *request, - QueryChunkServerRecoverStatusResponse *response, - Closure *done) { - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const QueryChunkServerRecoverStatusRequest *request, + QueryChunkServerRecoverStatusResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.QueryChunkServerRecoverStatus( - std::vector{}, &statusMap)); + std::vector{}, &statusMap)); } TEST_F(ToolMDSClientTest, GetMetric) { @@ -1365,14 +1345,14 @@ TEST_F(ToolMDSClientTest, GetMdsOnlineStatus) { std::map onlineStatus; // 9180在线,9999不在线 value.set_value("{\"conf_name\":\"mds.listen.addr\"," - "\"conf_value\":\"127.0.0.1:9192\"}"); + "\"conf_value\":\"127.0.0.1:9192\"}"); mdsClient.GetMdsOnlineStatus(&onlineStatus); std::map expected = {{"127.0.0.1:9191", false}, {"127.0.0.1:9192", true}}; ASSERT_EQ(expected, onlineStatus); // 9180的服务端口不一致 value.set_value("{\"conf_name\":\"mds.listen.addr\"," - "\"conf_value\":\"127.0.0.1:9188\"}"); + "\"conf_value\":\"127.0.0.1:9188\"}"); mdsClient.GetMdsOnlineStatus(&onlineStatus); expected = {{"127.0.0.1:9191", false}, {"127.0.0.1:9192", false}}; ASSERT_EQ(expected, onlineStatus); @@ -1389,28 +1369,27 @@ TEST_F(ToolMDSClientTest, ListClient) { // 发送rpc失败 EXPECT_CALL(*nameService, ListClient(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const curve::mds::ListClientRequest *request, - curve::mds::ListClientResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly( + Invoke([](RpcController *controller, + const curve::mds::ListClientRequest *request, + curve::mds::ListClientResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.ListClient(&clientAddrs)); // 返回码不为OK curve::mds::ListClientResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, ListClient(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ListClientRequest *request, - curve::mds::ListClientResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::ListClientRequest *request, + curve::mds::ListClientResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListClient(&clientAddrs)); // 正常情况 @@ -1421,19 +1400,18 @@ TEST_F(ToolMDSClientTest, ListClient) { clientInfo->set_port(8888 + i); } EXPECT_CALL(*nameService, ListClient(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ListClientRequest *request, - curve::mds::ListClientResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::ListClientRequest *request, + curve::mds::ListClientResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListClient(&clientAddrs)); ASSERT_EQ(response.clientinfos_size(), clientAddrs.size()); for (int i = 0; i < 5; i++) { - const auto& clientInfo = response.clientinfos(i); - std::string expected = clientInfo.ip() + ":" + - std::to_string(clientInfo.port()); + const auto &clientInfo = response.clientinfos(i); + std::string expected = + clientInfo.ip() + ":" + std::to_string(clientInfo.port()); ASSERT_EQ(expected, clientAddrs[i]); } } @@ -1445,28 +1423,28 @@ TEST_F(ToolMDSClientTest, ListVolumesOnCopyset) { // send rpc fail EXPECT_CALL(*nameService, ListVolumesOnCopysets(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const curve::mds::ListVolumesOnCopysetsRequest *request, - curve::mds::ListVolumesOnCopysetsResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly( + Invoke([](RpcController *controller, + const curve::mds::ListVolumesOnCopysetsRequest *request, + curve::mds::ListVolumesOnCopysetsResponse *response, + Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.ListVolumesOnCopyset(copysets, &fileNames)); // return code not ok curve::mds::ListVolumesOnCopysetsResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, ListVolumesOnCopysets(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ListVolumesOnCopysetsRequest *request, - curve::mds::ListVolumesOnCopysetsResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::ListVolumesOnCopysetsRequest *request, + curve::mds::ListVolumesOnCopysetsResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListVolumesOnCopyset(copysets, &fileNames)); // normal @@ -1476,13 +1454,12 @@ TEST_F(ToolMDSClientTest, ListVolumesOnCopyset) { *fileName = "file" + std::to_string(i); } EXPECT_CALL(*nameService, ListVolumesOnCopysets(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ListVolumesOnCopysetsRequest *request, - curve::mds::ListVolumesOnCopysetsResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::ListVolumesOnCopysetsRequest *request, + curve::mds::ListVolumesOnCopysetsResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListVolumesOnCopyset(copysets, &fileNames)); ASSERT_EQ(response.filenames_size(), fileNames.size()); for (int i = 0; i < 5; i++) { @@ -1500,40 +1477,38 @@ TEST_F(ToolMDSClientTest, SetCopysetsAvailFlag) { // send rpc fail EXPECT_CALL(*topoService, SetCopysetsAvailFlag(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const SetCopysetsAvailFlagRequest *request, - SetCopysetsAvailFlagResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly( + Invoke([](RpcController *controller, + const SetCopysetsAvailFlagRequest *request, + SetCopysetsAvailFlagResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.SetCopysetsAvailFlag(copysets, false)); // return code not ok SetCopysetsAvailFlagResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeStorgeFail); EXPECT_CALL(*topoService, SetCopysetsAvailFlag(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const SetCopysetsAvailFlagRequest *request, - SetCopysetsAvailFlagResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const SetCopysetsAvailFlagRequest *request, + SetCopysetsAvailFlagResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.SetCopysetsAvailFlag(copysets, false)); // normal response.set_statuscode(kTopoErrCodeSuccess); EXPECT_CALL(*topoService, SetCopysetsAvailFlag(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const SetCopysetsAvailFlagRequest *request, - SetCopysetsAvailFlagResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const SetCopysetsAvailFlagRequest *request, + SetCopysetsAvailFlagResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.SetCopysetsAvailFlag(copysets, false)); } @@ -1542,28 +1517,27 @@ TEST_F(ToolMDSClientTest, ListUnAvailCopySets) { // send rpc fail EXPECT_CALL(*topoService, ListUnAvailCopySets(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const ListUnAvailCopySetsRequest *request, - ListUnAvailCopySetsResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly( + Invoke([](RpcController *controller, + const ListUnAvailCopySetsRequest *request, + ListUnAvailCopySetsResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.ListUnAvailCopySets(©sets)); // return code not ok ListUnAvailCopySetsResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeStorgeFail); EXPECT_CALL(*topoService, ListUnAvailCopySets(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListUnAvailCopySetsRequest *request, - ListUnAvailCopySetsResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const ListUnAvailCopySetsRequest *request, + ListUnAvailCopySetsResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListUnAvailCopySets(©sets)); // normal @@ -1574,13 +1548,12 @@ TEST_F(ToolMDSClientTest, ListUnAvailCopySets) { cp->set_copysetid(i); } EXPECT_CALL(*topoService, ListUnAvailCopySets(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListUnAvailCopySetsRequest *request, - ListUnAvailCopySetsResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const ListUnAvailCopySetsRequest *request, + ListUnAvailCopySetsResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListUnAvailCopySets(©sets)); } diff --git a/test/tools/mock/mock_mds_client.h b/test/tools/mock/mock_mds_client.h index 840f33d665..7243c6854e 100644 --- a/test/tools/mock/mock_mds_client.h +++ b/test/tools/mock/mock_mds_client.h @@ -48,8 +48,7 @@ class MockMDSClient : public MDSClient { MOCK_METHOD3(GetSegmentInfo, GetSegmentRes(const std::string&, uint64_t, PageFileSegment*)); MOCK_METHOD2(DeleteFile, int(const std::string&, bool)); - MOCK_METHOD5(CreateFile, int(const std::string&, uint64_t, bool, - uint64_t, uint64_t)); + MOCK_METHOD1(CreateFile, int(const CreateFileContext&)); MOCK_METHOD2(ExtendVolume, int(const std::string&, uint64_t)); MOCK_METHOD3(GetChunkServerListInCopySet, int(const PoolIdType&, const CopySetIdType&, std::vector*)); diff --git a/test/tools/mock/mock_namespace_tool_core.h b/test/tools/mock/mock_namespace_tool_core.h index 4bd51af3fe..3963c7a9a5 100644 --- a/test/tools/mock/mock_namespace_tool_core.h +++ b/test/tools/mock/mock_namespace_tool_core.h @@ -47,8 +47,7 @@ class MockNameSpaceToolCore : public NameSpaceToolCore { const CopySetIdType&, std::vector*)); MOCK_METHOD2(DeleteFile, int(const std::string&, bool)); - MOCK_METHOD5(CreateFile, int(const std::string&, uint64_t, bool, - uint64_t, uint64_t)); + MOCK_METHOD1(CreateFile, int(const CreateFileContext&)); MOCK_METHOD3(GetAllocatedSize, int(const std::string&, uint64_t*, AllocMap*)); MOCK_METHOD2(GetFileSegments, int(const std::string&, diff --git a/test/tools/namespace_tool_core_test.cpp b/test/tools/namespace_tool_core_test.cpp index 56c5d10262..e1b365b28f 100644 --- a/test/tools/namespace_tool_core_test.cpp +++ b/test/tools/namespace_tool_core_test.cpp @@ -31,6 +31,7 @@ using ::testing::DoAll; using ::testing::SetArgPointee; using ::testing::SaveArg; using curve::tool::GetSegmentRes; +using curve::tool::CreateFileContext; DECLARE_bool(isTest); DECLARE_string(fileName); @@ -141,20 +142,28 @@ TEST_F(NameSpaceToolCoreTest, CreateFile) { uint64_t length = 5 * segmentSize; uint64_t stripeUnit = 32 * 1024 *1024; uint64_t stripeCount = 32; + std::string pstName = ""; // 1、正常情况 - EXPECT_CALL(*client_, CreateFile(_, _, _, _, _)) + EXPECT_CALL(*client_, CreateFile(_)) .Times(1) .WillOnce(Return(0)); - ASSERT_EQ(0, namespaceTool.CreateFile(fileName, length, - true, stripeUnit, stripeCount)); + + CreateFileContext context; + context.type = curve::mds::FileType::INODE_PAGEFILE; + context.name = fileName; + context.length = length; + context.stripeUnit = stripeUnit; + context.stripeCount = stripeCount; + context.poolset = pstName; + + ASSERT_EQ(0, namespaceTool.CreateFile(context)); // 2、创建失败 - EXPECT_CALL(*client_, CreateFile(_, _, _, _, _)) + EXPECT_CALL(*client_, CreateFile(_)) .Times(1) .WillOnce(Return(-1)); - ASSERT_EQ(-1, namespaceTool.CreateFile(fileName, length, - true, stripeUnit, stripeCount)); + ASSERT_EQ(-1, namespaceTool.CreateFile(context)); } TEST_F(NameSpaceToolCoreTest, ExtendVolume) { diff --git a/test/tools/namespace_tool_test.cpp b/test/tools/namespace_tool_test.cpp index 176668f571..a8202bda39 100644 --- a/test/tools/namespace_tool_test.cpp +++ b/test/tools/namespace_tool_test.cpp @@ -298,13 +298,13 @@ TEST_F(NameSpaceToolTest, CreateFile) { .WillOnce(Return(0)); // 1、正常情况 - EXPECT_CALL(*core_, CreateFile(_, _, _, _, _)) + EXPECT_CALL(*core_, CreateFile(_)) .Times(1) .WillOnce(Return(0)); ASSERT_EQ(0, namespaceTool.RunCommand("create")); // 2、创建失败 - EXPECT_CALL(*core_, CreateFile(_, _, _, _, _)) + EXPECT_CALL(*core_, CreateFile(_)) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.RunCommand("create")); diff --git a/thirdparties/fmt.BUILD b/thirdparties/fmt.BUILD new file mode 100644 index 0000000000..ee359f151d --- /dev/null +++ b/thirdparties/fmt.BUILD @@ -0,0 +1,43 @@ +# +# Copyright (c) 2020 NetEase Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +cc_library( + name = "fmt", + srcs = [ + "src/format.cc", + "src/os.cc", + ], + hdrs = [ + "include/fmt/args.h", + "include/fmt/chrono.h", + "include/fmt/color.h", + "include/fmt/compile.h", + "include/fmt/core.h", + "include/fmt/format-inl.h", + "include/fmt/format.h", + "include/fmt/os.h", + "include/fmt/ostream.h", + "include/fmt/printf.h", + "include/fmt/ranges.h", + "include/fmt/std.h", + "include/fmt/xchar.h", + ], + includes = [ + "include", + ], + strip_include_prefix = "include", + visibility = ["//visibility:public"], +) \ No newline at end of file diff --git a/thirdparties/memcache/Makefile b/thirdparties/memcache/Makefile index c79cd4da0b..273178e69e 100644 --- a/thirdparties/memcache/Makefile +++ b/thirdparties/memcache/Makefile @@ -2,7 +2,7 @@ .PHONY: download build clean build: clean download - @cd libmemcached-1.1.2 && mkdir build-libmemcached && cd build-libmemcached && cmake .. && make libmemcached + @cd libmemcached-1.1.2 && mkdir build-libmemcached && cd build-libmemcached && cmake .. && make libmemcached -j$$(nproc) @cp libmemcached-1.1.2/build-libmemcached/include/libmemcached-1.0/configure.h libmemcached-1.1.2/include/libmemcached-1.0 download: clean diff --git a/thirdparties/spdlog.BUILD b/thirdparties/spdlog.BUILD new file mode 100644 index 0000000000..15ccc54365 --- /dev/null +++ b/thirdparties/spdlog.BUILD @@ -0,0 +1,26 @@ +# +# Copyright (c) 2020 NetEase Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +cc_library( + name = "spdlog", + hdrs = glob([ + "include/**/*.h", + ]), + defines = ["SPDLOG_FMT_EXTERNAL"], + includes = ["include"], + visibility = ["//visibility:public"], + deps = ["@fmt//:fmt"], +) \ No newline at end of file diff --git a/tools-v2/Makefile b/tools-v2/Makefile index e125987843..ed33716180 100644 --- a/tools-v2/Makefile +++ b/tools-v2/Makefile @@ -25,6 +25,7 @@ GO := go # output OUTPUT := sbin/curve +DAEMON_OUTPUT := sbin/daemon # version VERSION_FLAG := -X github.com/opencurve/curve/tools-v2/pkg/cli/command/version.Version=$(version) @@ -50,13 +51,16 @@ DEBUG_FLAGS := -gcflags=$(GCFLAGS) DEBUG_FLAGS += $(CGO_DEBUG_FLAG) # packages -PACKAGES := $(PWD)/cmd/curvecli/main.go +PACKAGES := $(PWD)/cmd/curve/main.go +DAEMON_PACKAGES := $(PWD)/cmd/daemon/main.go build: proto $(GOENV) $(GO) build -o $(OUTPUT) $(BUILD_FLAGS) $(PACKAGES) + $(GOENV) $(GO) build -o $(DAEMON_OUTPUT) $(BUILD_FLAGS) $(DAEMON_PACKAGES) debug: proto $(GOENV) $(GO) build -o $(OUTPUT) $(DEBUG_FLAGS) $(PACKAGES) + $(GOENV) $(GO) build -o $(DAEMON_OUTPUT) $(DEBUG_FLAGS) $(DAEMON_PACKAGES) init: proto go mod init github.com/opencurve/curve/tools-v2 diff --git a/tools-v2/README.md b/tools-v2/README.md index 290c0a6c50..ef8c6edc90 100644 --- a/tools-v2/README.md +++ b/tools-v2/README.md @@ -47,15 +47,38 @@ A tool for CurveFS & CurveBs. - [list server](#list-server) - [list client](#list-client) - [list dir](#list-dir) + - [list space](#list-space) + - [list chunkserver](#list-chunkserver) + - [list scan-status](#list-scan-status) + - [clean-recycle](#clean-recycle) - [query](#query-1) - [query file](#query-file) + - [query chunk](#query-chunk) + - [query segment](#query-segment) + - [query scan-status](#query-scan-status) - [status](#status-1) - - [staus etcd](#staus-etcd) - - [staus mds](#staus-mds) + - [status etcd](#status-etcd-1) + - [status mds](#status-mds-1) + - [status client](#status-client) + - [status snapshotserver](#status-snapshotserver) + - [status chunkserver](#status-chunkserver) + - [status copyset](#status-copyset-1) - [delete](#delete-1) - [delete peer](#delete-peer) - [update](#update) - [update peer](#update-peer) + - [update leader](#update-leader) + - [update file](#update-file) + - [update throttle](#update-throttle) + - [update scan-state](#update-scan-state) + - [update copyset availflag](#update-copyset-availflag) + - [create](#create-1) + - [create file](#create-file) + - [create dir](#create-dir) + - [check](#check-1) + - [check copyset](#check-copyset-1) + - [snapshot](#snapshot) + - [snapshot copyset](#snapshot-copyset) - [Comparison of old and new commands](#comparison-of-old-and-new-commands) - [curve fs](#curve-fs) - [curve bs](#curve-bs) @@ -87,7 +110,13 @@ wget https://curve-tool.nos-eastchina1.126.net/config/curve.yaml Please modify the `mdsAddr, mdsDummyAddr, etcdAddr` under `curvefs/bs` in the template.yaml file as required ```bash -mv template.yaml ~/.curve/curve.yaml +mv curve.yaml ~/.curve/curve.yaml +``` + +or + +```bash +mv curve.yaml /etc/curve/curve.yaml ``` ### Introduction @@ -884,6 +913,284 @@ Output: +------+-------------+----------+-----------------+------------+---------------------+---------------+-------------+ ``` +##### list space + +show curvebs all disk type space, include total space and used space + +```bash +curve bs list space +``` + +Output: + +```bash ++----------+---------+---------+---------+------------+---------+ +| TYPE | TOTAL | USED | LEFT | RECYCLABLE | CREATED | ++----------+---------+---------+---------+------------+---------+ +| physical | *** GiB | *** GiB | *** GiB | - | - | ++----------+---------+---------+---------+------------+---------+ +| logical | *** GiB | *** GiB | *** GiB | *** GiB | *** GiB | ++----------+---------+---------+---------+------------+---------+ +``` + +##### list chunkserver + +list chunkserver information in curvebs + +```bash +curve bs list chunkserver +``` + +Output: + +```bash ++----+------+-----------+------+-----------+------------+------------+-----------------------------------------------+--------------+-------------+------------------+-----------+ +| ID | TYPE | IP | PORT | RWSTATUS | DISKSTATE | COPYSETNUM | MOUNTPOINT | DISKCAPACITY | DISKUSED | UNHEALTHYCOPYSET | EXTADDR | ++----+------+-----------+------+-----------+------------+------------+-----------------------------------------------+--------------+-------------+------------------+-----------+ +| 1 | nvme | 127.0.0.1 | 8201 | READWRITE | DISKNORMAL | 100 | local:///curvebs/playground/chunkserver1/data | 39 GiB | 42140479488 | 0 % | 127.0.0.1 | ++----+ + +------+ + +------------+-----------------------------------------------+--------------+-------------+------------------+ + +| 2 | | | 8202 | | | 100 | local:///curvebs/playground/chunkserver2/data | 39 GiB | 42140479488 | 0 % | | ++----+ + +------+ + +------------+-----------------------------------------------+--------------+-------------+------------------+ + +| 3 | | | 8200 | | | 100 | local:///curvebs/playground/chunkserver0/data | 39 GiB | 42140479488 | 0 % | | ++----+------+-----------+------+-----------+------------+------------+-----------------------------------------------+--------------+-------------+------------------+-----------+ +``` + +##### list scan-status + +list curvebs all copyset that scanning is false + +```bash +curve bs list scan-status +``` + +Output: + +```bash ++-------------+-----------+ +| LOGICALPOOL | COPYSETID | ++-------------+-----------+ +| 1 | 1 | ++-------------+-----------+ +| 1 | 10 | ++-------------+-----------+ +| 1 | 100 | ++-------------+-----------+ +| 1 | 11 | ++-------------+-----------+ +| 1 | 12 | ++-------------+-----------+ +| 1 | 13 | ++-------------+-----------+ +| 1 | 14 | ++-------------+-----------+ +| 1 | 15 | ++-------------+-----------+ +| 1 | 16 | ++-------------+-----------+ +| 1 | 17 | ++-------------+-----------+ +| 1 | 18 | ++-------------+-----------+ +| 1 | 19 | ++-------------+-----------+ +| 1 | 2 | ++-------------+-----------+ +| 1 | 20 | ++-------------+-----------+ +| 1 | 21 | ++-------------+-----------+ +| 1 | 22 | ++-------------+-----------+ +| 1 | 23 | ++-------------+-----------+ +| 1 | 24 | ++-------------+-----------+ +| 1 | 25 | ++-------------+-----------+ +| 1 | 26 | ++-------------+-----------+ +| 1 | 27 | ++-------------+-----------+ +| 1 | 28 | ++-------------+-----------+ +| 1 | 29 | ++-------------+-----------+ +| 1 | 3 | ++-------------+-----------+ +| 1 | 30 | ++-------------+-----------+ +| 1 | 31 | ++-------------+-----------+ +| 1 | 32 | ++-------------+-----------+ +| 1 | 33 | ++-------------+-----------+ +| 1 | 34 | ++-------------+-----------+ +| 1 | 35 | ++-------------+-----------+ +| 1 | 36 | ++-------------+-----------+ +| 1 | 37 | ++-------------+-----------+ +| 1 | 38 | ++-------------+-----------+ +| 1 | 39 | ++-------------+-----------+ +| 1 | 4 | ++-------------+-----------+ +| 1 | 40 | ++-------------+-----------+ +| 1 | 41 | ++-------------+-----------+ +| 1 | 42 | ++-------------+-----------+ +| 1 | 43 | ++-------------+-----------+ +| 1 | 44 | ++-------------+-----------+ +| 1 | 45 | ++-------------+-----------+ +| 1 | 46 | ++-------------+-----------+ +| 1 | 47 | ++-------------+-----------+ +| 1 | 48 | ++-------------+-----------+ +| 1 | 49 | ++-------------+-----------+ +| 1 | 5 | ++-------------+-----------+ +| 1 | 50 | ++-------------+-----------+ +| 1 | 51 | ++-------------+-----------+ +| 1 | 52 | ++-------------+-----------+ +| 1 | 53 | ++-------------+-----------+ +| 1 | 54 | ++-------------+-----------+ +| 1 | 55 | ++-------------+-----------+ +| 1 | 56 | ++-------------+-----------+ +| 1 | 57 | ++-------------+-----------+ +| 1 | 58 | ++-------------+-----------+ +| 1 | 59 | ++-------------+-----------+ +| 1 | 6 | ++-------------+-----------+ +| 1 | 60 | ++-------------+-----------+ +| 1 | 61 | ++-------------+-----------+ +| 1 | 62 | ++-------------+-----------+ +| 1 | 63 | ++-------------+-----------+ +| 1 | 64 | ++-------------+-----------+ +| 1 | 65 | ++-------------+-----------+ +| 1 | 66 | ++-------------+-----------+ +| 1 | 67 | ++-------------+-----------+ +| 1 | 68 | ++-------------+-----------+ +| 1 | 69 | ++-------------+-----------+ +| 1 | 7 | ++-------------+-----------+ +| 1 | 70 | ++-------------+-----------+ +| 1 | 71 | ++-------------+-----------+ +| 1 | 72 | ++-------------+-----------+ +| 1 | 73 | ++-------------+-----------+ +| 1 | 74 | ++-------------+-----------+ +| 1 | 75 | ++-------------+-----------+ +| 1 | 76 | ++-------------+-----------+ +| 1 | 77 | ++-------------+-----------+ +| 1 | 78 | ++-------------+-----------+ +| 1 | 79 | ++-------------+-----------+ +| 1 | 8 | ++-------------+-----------+ +| 1 | 80 | ++-------------+-----------+ +| 1 | 81 | ++-------------+-----------+ +| 1 | 82 | ++-------------+-----------+ +| 1 | 83 | ++-------------+-----------+ +| 1 | 84 | ++-------------+-----------+ +| 1 | 85 | ++-------------+-----------+ +| 1 | 86 | ++-------------+-----------+ +| 1 | 87 | ++-------------+-----------+ +| 1 | 88 | ++-------------+-----------+ +| 1 | 89 | ++-------------+-----------+ +| 1 | 9 | ++-------------+-----------+ +| 1 | 90 | ++-------------+-----------+ +| 1 | 91 | ++-------------+-----------+ +| 1 | 92 | ++-------------+-----------+ +| 1 | 93 | ++-------------+-----------+ +| 1 | 94 | ++-------------+-----------+ +| 1 | 95 | ++-------------+-----------+ +| 1 | 96 | ++-------------+-----------+ +| 1 | 97 | ++-------------+-----------+ +| 1 | 98 | ++-------------+-----------+ +| 1 | 99 | ++-------------+-----------+ +``` + +### clean-recycle + +clean the recycle bin + +Usage: + +```bash +curve bs clean-recycle --recycleprefix=/test --expiredtime=1h +``` + +Output: + +```bash ++---------+ +| RESULT | ++---------+ +| success | ++---------+ +``` + ### query ##### query file @@ -909,11 +1216,77 @@ Output: +------+------+----------------+-------+--------+---------+--------+-----+---------------------+--------------+---------+-----------------+----------+ ``` -### status +##### query chunk + +query the location of the chunk corresponding to the offset + +Usage: + +```bash +curve bs query chunk --path /test1 --offset 1008600000 +``` + +Output: + +```bash ++-------+-------------+---------+------------+----------------------+ +| CHUNK | LOGICALPOOL | COPYSET | GROUP | LOCATION | ++-------+-------------+---------+------------+----------------------+ +| 61 | 1 | 61 | 4294967357 | ***.***.***.***:**** | +| | | | | ***.***.***.***:**** | +| | | | | ***.***.***.***:**** | ++-------+-------------+---------+------------+----------------------+ +``` -#### staus etcd +##### query segment -get the etcd status of curvefs +query the segments info of the file + +Usage: + +```bash +curve bs query seginfo --path /test1 +``` + +Output: + +```bash ++-------------+-------------+-----------+------------+---------+-------+ +| LOGICALPOOL | SEGMENTSIZE | CHUNKSIZE | START | COPYSET | CHUNK | ++-------------+-------------+-----------+------------+---------+-------+ +| 1 | 1073741824 | 16777216 | 0 | 1 | 1 | ++ + + + +---------+-------+ +| ...... | ++ + + +------------+---------+-------+ +| | | | 9663676416 | 1 | 101 | ++ + + + +---------+-------+ +| ...... | ++ + + + +---------+-------+ +| | | | | 99 | 99 | ++-------------+-------------+-----------+------------+---------+-------+ +``` + +##### query scan-status +quey ScanStatus Info in bs + +Usage: +```bash +curve bs query scan-satus --copysetid 1 --logicalpoolid 1 +``` + +Output: +```bash ++-------------+-----------+-------+-------------+--------------------+ +| LOGICALPOOL | COPYSETID | SCAN | LASTSCANSEC | LASTSCANCONSISTENT | ++-------------+-----------+-------+-------------+--------------------+ +| 1 | 1 | false | 1684425801 | true | ++-------------+-----------+-------+-------------+--------------------+ +``` + +### status + +#### status etcd +get the etcd status of curvebs Usage: @@ -935,9 +1308,9 @@ Output: +---------------------+---------+----------+ ``` -#### staus mds +#### status mds -get the mds status of curvefs +get the mds status of curvebs Usage: @@ -959,6 +1332,100 @@ Output: +-------------------+-------------------+-------------------+----------+ ``` +#### status client + +get the client status of curvebs + +Usage: + +```bash +curve bs status client +``` + +Output: + +```bash ++-------------+----------------+---------------------+-----+ +| TYPE | VERSION | ADDR | NUM | ++-------------+----------------+---------------------+-----+ +| nebd-server | 9.9.9+2c4861ca | ***.***.**.***:**** | 2 | ++ + +---------------------+ + +| | | ***.***.**.***:**** | | ++-------------+----------------+---------------------+-----+ +``` + +#### status snapshotserver + +get the mds status of curvebs + +Usage: + +```bash +curve bs status snapshotserver +``` + +Output: + +```bash ++---------------------+---------------------+-------------------+----------+ +| ADDR | DUMMYADDR | VERSION | STATUS | ++---------------------+---------------------+-------------------+----------+ +| ***.***.**.***:**** | ***.***.**.***:**** | ci+562296c7+debug | follower | ++---------------------+---------------------+ + + +| ***.***.**.***:**** | ***.***.**.***:**** | | | ++---------------------+---------------------+ +----------+ +| ***.***.**.***:**** | ***.***.**.***:**** | | leader | ++---------------------+---------------------+-------------------+----------+ +``` + +#### status chunkserver + +get the chunkserver status of curvebs + +Usage: + +```bash +curve bs status chunkserver +``` + +Output: + +```bash ++------------------+------------------+----------------+--------+------------+ +| EXTERNALADDR | INTERNALADDR | VERSION | STATUS | RECOVERING | ++------------------+------------------+----------------+--------+------------+ +| **************** | **************** | d9b6bb98+debug | online | false | ++------------------+------------------+ + + + +| **************** | **************** | | | | ++------------------+------------------+ + + + +| **************** | **************** | | | | ++------------------+------------------+----------------+--------+------------+ +``` + +#### status copyset + +get the copyset status of curvebs + +Usage: + +```bash +curve bs status copyset +``` + +Output: + +```bash ++------------+-----------+--------+--------+--------+---------+ +| COPYSETKEY | COPYSETID | POOLID | STATUS | LOGGAP | EXPLAIN | ++------------+-----------+--------+--------+--------+---------+ +| 4294967297 | 1 | 1 | ok | 0 | | ++------------+-----------+ + +--------+---------+ +| ...... | ...... | ...... | ...... | ...... | ...... | ++------------+-----------+ + +--------+---------+ +| 4294967395 | 99 | | | 0 | | ++------------+-----------+--------+--------+--------+---------+ +``` + ### delete #### delete peer @@ -979,8 +1446,6 @@ Output: +------------------+------------------+---------+---------+--------+ | 127.0.0.1:8201:0 | 127.0.0.1:8202:0 | (1:29) | success | null | +------------------+------------------+---------+---------+--------+ - - ``` ### update @@ -1003,71 +1468,240 @@ Output: +----------------------+---------+---------+--------+ ``` +#### update leader + +transfer leader + +Usage: +```bash +curve bs update leader 127.0.0.1:8202:0 --logicalpoolid=1 --copysetid=1 --peers=127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0 +``` + +Output: +``` ++-----------------------+-----------------------+---------+---------+ +| LEADER | OLDLEADER | COPYSET | RESULT | ++-----------------------+-----------------------+---------+---------+ +| ***.***.**.***:****:* | ***.***.**.***:****:* | (1:1) | success | ++-----------------------+-----------------------+---------+---------+ +``` + +#### update file + +expand pagefile + +Usage: +```bash +curve bs update file --path /test2/test1 --size 10 +``` + +Output: +``` ++---------+ +| RESULT | ++---------+ +| success | ++---------+ +``` + +#### update throttle + +update file throttle params + +Usage: +```bash +curve bs update throttle --path /test1 --type=bps_total --limit 20000 +``` + +Output: +``` ++---------+ +| RESULT | ++---------+ +| success | ++---------+ +``` + +#### update scan-state + +enable/disable scan for logical pool + +Usage: +```bash +curve bs update scan-state --logicalpoolid 1 [--scan=true/false] +``` + +Output: +``` ++----+------+---------+--------+ +| ID | SCAN | RESULT | REASON | ++----+------+---------+--------+ +| 1 | true | success | null | ++----+------+---------+--------+ +``` + +#### update copyset availflag + +update copyset availflag + +Usage: +```bash +curve bs update copyset availflag --availflag=true [--dryrun=true/false] +``` + +Output: +``` ++--------+-----------+---------------+--------+ +| POOLID | COPYSETID | AVAILFLAG | DRYRUN | ++--------+-----------+---------------+--------+ +| 1 | 1 | false => true | true | ++--------+-----------+---------------+--------+ +``` + +### create + +#### create file + +create pagefile + +Usage: +```bash +curve bs create file --path /test2/test4 --size 10GiB +``` + +Output: +``` ++---------+ +| RESULT | ++---------+ +| success | ++---------+ +``` + +#### create dir + +create directory + +Usage: +```bash +curve bs create dir --path /test2/test5 +``` + +Output: +``` ++---------+ +| RESULT | ++---------+ +| success | ++---------+ +``` + +### check + +#### check copyset + +check copysets health in curvebs + +Usage: + +```shell +curve bs check copyset --copysetid 1 --logicalpoolid 1 +``` + +Output: + +```shell ++------------+-----------+--------+--------+--------+---------+ +| COPYSETKEY | COPYSETID | POOLID | STATUS | LOGGAP | EXPLAIN | ++------------+-----------+--------+--------+--------+---------+ +| 4294967297 | 1 | 1 | ok | 0 | | ++------------+-----------+--------+--------+--------+---------+ +``` + +### snapshot + +#### snapshot copyset + +take snapshot for copyset + +Usage: +```bash +curve bs snapshot copyset 127.0.0.0:8200:0 --logicalpoolid=1 --copysetid=1 +``` + +Output: +``` ++-----------------------+---------+---------+ +| PEER | COPYSET | RESULT | ++-----------------------+---------+---------+ +| ***.***.**.***:****:* | (**:**) | success | ++-----------------------+---------+---------+ +``` + ## Comparison of old and new commands ### curve fs -| old | new | -| ---- | ---- | -| curvefs_tool check-copyset | curve fs check copyset | -| curvefs_tool create-fs | curve fs create fs | -| curvefs_tool create-topology | curve fs create topology | -| curvefs_tool delete-fs | curve fs delete fs | -| curvefs_tool list-copyset | curve fs list copyset | -| curvefs_tool list-fs | curve fs list fs | -| curvefs_tool list-fs | curve fs list mountpoint | -| curvefs_tool list-partition | curve fs list partition | -| curvefs_tool query-copyset | curve fs query copyset | -| curvefs_tool query-fs | curve fs query fs | -| curvefs_tool query-inode | curve fs query inode | -| curvefs_tool query-metaserver | curve fs query metaserver | -| curvefs_tool query-partition | curve fs query partition | -| curvefs_tool status-mds | curve fs status mds | -| curvefs_tool status-metaserver | curve fs status metaserver | -| curvefs_tool status-etcd | curve fs status etcd | -| curvefs_tool status-copyset | curve fs status copyset | -| curvefs_tool status-cluster | curve fs status cluster | -| curvefs_tool umount-fs | curve fs umount fs | -| curvefs_tool usage-inode | curve fs usage inode | -| curvefs_tool usage-metadata | curve fs usage metadata | +| old | new | +| ------------------------------ | -------------------------- | +| curvefs_tool check-copyset | curve fs check copyset | +| curvefs_tool create-fs | curve fs create fs | +| curvefs_tool create-topology | curve fs create topology | +| curvefs_tool delete-fs | curve fs delete fs | +| curvefs_tool list-copyset | curve fs list copyset | +| curvefs_tool list-fs | curve fs list fs | +| curvefs_tool list-fs | curve fs list mountpoint | +| curvefs_tool list-partition | curve fs list partition | +| curvefs_tool query-copyset | curve fs query copyset | +| curvefs_tool query-fs | curve fs query fs | +| curvefs_tool query-inode | curve fs query inode | +| curvefs_tool query-metaserver | curve fs query metaserver | +| curvefs_tool query-partition | curve fs query partition | +| curvefs_tool status-mds | curve fs status mds | +| curvefs_tool status-metaserver | curve fs status metaserver | +| curvefs_tool status-etcd | curve fs status etcd | +| curvefs_tool status-copyset | curve fs status copyset | +| curvefs_tool status-cluster | curve fs status cluster | +| curvefs_tool umount-fs | curve fs umount fs | +| curvefs_tool usage-inode | curve fs usage inode | +| curvefs_tool usage-metadata | curve fs usage metadata | ### curve bs -| old | new | -| ---- | ---- | -| curve_ops_tool logical-pool-list | curve bs list logical-pool | -| curve_ops_tool get -fileName= | curve bs query file -path | -| curve_ops_tool etcd-status | curve bs status etcd | -| curve_ops_tool mds-status | curve bs status mds | -| curve_ops_tool server-list | curve bs list server | -| space | | -| status | | -| chunkserver-status | | -| client-status | | -| curve_ops_tool client-list | curve bs list client | -| snapshot-clone-status | | -| copysets-status | | -| chunkserver-list | | -| cluster-status | | -| curve_ops_tool list | curve bs list dir | -| seginfo | | -| curve_ops_tool delete | curve bs delete file | -| clean-recycle | | -| create | | -| chunk-location | | -| check-consistency | | -| remove-peer | curve bs delete peer | -| transfer-leader | | -| reset-peer | curve bs update peer | -| do-snapshot | | -| do-snapshot-all | | -| check-chunkserver | | -| check-copyset | | -| check-server | | -| check-operator | | -| list-may-broken-vol | | -| set-copyset-availflag | | -| update-throttle | | -| rapid-leader-schedule | | -| set-scan-state | | -| scan-status | | +| old | new | +| ------------------------------------ | --------------------------------- | +| curve_ops_tool logical-pool-list | curve bs list logical-pool | +| curve_ops_tool get -fileName= | curve bs query file -path | +| curve_ops_tool etcd-status | curve bs status etcd | +| curve_ops_tool mds-status | curve bs status mds | +| curve_ops_tool server-list | curve bs list server | +| curve_ops_tool client-list | curve bs list client | +| curve_ops_tool delete | curve bs delete file | +| curve_ops_tool list | curve bs list dir | +| curve_ops_tool create | curve bs create file/dir | +| curve_ops_tool seginfo | curve bs query seginfo | +| curve_ops_tool chunk-location | curve bs query chunk | +| curve_ops_tool remove-peer | curve bs delete peer | +| curve_ops_tool reset-peer | curve bs update peer | +| curve_ops_tool space | curve bs list space | +| curve_ops_tool update-throttle | curve bs update throttle | +| curve_ops_tool check-copyset | curve bs check copyset | +| curve_ops_tool client-status | curve bs status client | +| curve_ops_tool check-operator | curve bs check operator | +| curve_ops_tool snapshot-clone-status | curve bs status snapshotserver | +| curve_ops_tool transfer-leader | curve bs update leader | +| curve_ops_tool do-snapshot | curve bs snapshot copyset | +| curve_ops_tool set-scan-state | curve bs update scan-state | +| curve_ops_tool chunkserver-status | curve bs status chunkserver | +| curve_ops_tool chunkserver-list | curve bs list chunkserver | +| curve_ops_tool set-copyset-availflag | curve bs update copyset availflag | +| curve_ops_tool scan-status | curve bs list/query scan-status | +| curve_ops_tool clean-recycle | curve bs clean-recycle | +| curve_ops_tool copysets-status | curve bs status copyset | +| curve_ops_tool status | | +| curve_ops_tool check-consistency | | +| curve_ops_tool do-snapshot-all | | +| curve_ops_tool check-chunkserver | | +| curve_ops_tool check-server | | +| curve_ops_tool list-may-broken-vol | | +| curve_ops_tool rapid-leader-schedule | | diff --git a/tools-v2/cmd/curvecli/main.go b/tools-v2/cmd/curve/main.go similarity index 100% rename from tools-v2/cmd/curvecli/main.go rename to tools-v2/cmd/curve/main.go diff --git a/tools-v2/cmd/daemon/main.go b/tools-v2/cmd/daemon/main.go new file mode 100644 index 0000000000..42a575dfb5 --- /dev/null +++ b/tools-v2/cmd/daemon/main.go @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: CurveCli + * Created Date: 2023-03-16 + * Author: chengyi (Cyber-SiKu) + */ +package main + +import "github.com/opencurve/curve/tools-v2/pkg/daemon" + +func main() { + daemon.Execute() +} diff --git a/tools-v2/docs/zh/develop.md b/tools-v2/docs/zh/develop.md index a33d4a70bf..e2e42f7d2a 100644 --- a/tools-v2/docs/zh/develop.md +++ b/tools-v2/docs/zh/develop.md @@ -12,11 +12,18 @@ curve 工具是 Curve 团队为了提高系统的易用性,解决旧工具种 - [项目组织结构](#项目组织结构) - [Curve 命令的实现(添加)](#curve-命令的实现添加) - [Curve 命令开发调试](#curve-命令开发调试) - - [部署 Curve 集群](#部署-curve-集群) - - [编译和调试 tools-v2](#编译和调试-tools-v2) - - [环境准备](#环境准备) - - [编译](#编译) - - [调试](#调试) + - [利用提供的playground docker镜像部署](#利用提供的playground-docker镜像部署) + - [部署 Curve 集群](#部署-curve-集群) + - [环境准备](#环境准备) + - [编译](#编译) + - [调试](#调试) + - [调试流程](#调试流程) + - [在物理机上部署](#在物理机上部署) + - [部署curve集群](#部署curve集群) + - [环境准备](#环境准备-1) + - [编译](#编译-1) + - [调试](#调试-1) + - [调试流程](#调试流程-1) ## 整体设计 @@ -63,7 +70,7 @@ curve 的命令分为两大类: 根命令是一种特殊的中间命令,即为 curve。 以命令 curve bs list server 为例: -curve bs list 为中间命令,server 为最终命令。其中 bs list 对应的 go 文件分别为:[bs.go](pkg/cli/command/curvebs/bs.go) 和 [list.go](pkg/cli/command/curvebs/list/list.go);server 对应的 go 文件为:[server.go](pkg/cli/command/curvebs/list/server/server.go)。 +curve bs list 为中间命令,server 为最终命令。其中 bs list 对应的 go 文件分别为:[bs.go](../../pkg/cli/command/curvebs/bs.go) 和 [list.go](../../pkg/cli/command/curvebs/list/list.go);server 对应的 go 文件为:[server.go](../../pkg/cli/command/curvebs/list/server/server.go)。 该命令的输出为: ```shell @@ -107,7 +114,7 @@ func NewListCommand() *cobra.Command { } ``` -类 ListCommand 继承接口 `basecmd.MidCurveCmd`表示它是一个中间命令;`func (listCmd *ListCommand) AddSubCommands() {...}` 用来添加子命令,该条命令的子命令包括在包 `logicalpool` 和 `server` 包下各自 New 函数返回的 cobra.Command 命令,后面会以。 +类 ListCommand 继承接口 `basecmd.MidCurveCmd`,表示它是一个中间命令;`func (listCmd *ListCommand) AddSubCommands() {...}` 用来添加子命令,该条命令的子命令包括在 `logicalpool` 、 `server` 等包下各自 New 函数返回的 cobra.Command 命令。中间命令的子命令可以是中间命令或最终命令,但最终会以最终命令结束。 下面是最终命令 `server` (pkg/cli/command/curvebs/list/server/server.go) 中的 rpc 相关的部分代码: @@ -236,7 +243,25 @@ func (pCmd *ServerCommand) RunCommand(cmd *cobra.Command, args []string) error { ## Curve 命令开发调试 -### 部署 Curve 集群 +> 注意:linux内核版本最好是3.15以上,且具有nbd模块,若当前内核不提供 nbd 模块,用户需自行编译并导入。 +> +> 通过以下命令查看内核版本: +> +> ```shell +> uname -r +> ``` +> +> 推荐操作系统:debian10、11。 +> +> --- +> +> 目前snapshot 相关的命令不能使用 curveadm palygroud 部署的集群(没有s3,所以没有snapshot)。所以要实现/运行snapshot相关指令请参考[在物理机上部署](#在物理机上部署) + + + +### 利用提供的playground docker镜像部署 + +#### 部署 Curve 集群 首先你需要部署一个 Curve 集群,curve集群拉起方式如下: @@ -246,7 +271,14 @@ func (pCmd *ServerCommand) RunCommand(cmd *cobra.Command, args []string) error { CURVEADM_VERSION=v0.1.12-dev bash -c "$(curl -fsSL https://curveadm.nos-eastchina1.126.net/script/install.sh)" ``` -2. 执行 playground 命令时得确保当前用户有 root 权限,或者给 docker 的 socket 加上任意用户读写权限,或者将用户加入 docker 用户组: +2. 升级curveadm: + + ```shell + CURVEADM_VERSION=v0.2.0 curveadm -u + ``` + + +3. 执行 playground 命令时得确保当前用户有 root 权限,或者给 docker 的 socket 加上任意用户读写权限,或者将用户加入 docker 用户组: ```shell curveadm playground run --kind curvebs --container_image harbor.cloud.netease.com/curve/curvebs:playground @@ -263,9 +295,7 @@ sudo usermod -aG docker $USER > > 2. [Run the Docker daemon as a non-root user (Rootless mode)](https://docs.docker.com/engine/security/rootless/) -### 编译和调试 tools-v2 - -### 环境准备 +#### 环境准备 1. 安装 [golang 1.19](https://go.dev/doc/install) 版本及以上 2. 安装 [protoc-v21.8](https://github.com/protocolbuffers/protobuf/releases/tag/v21.8),请保证命令 `protoc` 可执行 @@ -281,9 +311,7 @@ go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest > go env -w GOPROXY=https://goproxy.io,direct > ``` -1. 准备配置文件,将项目目录下的 `tools-v2/pkg/config/template.yaml` 复制到 `$(HOME)/.curve/curve.yaml`。 - -### 编译 +#### 编译 在 tools-v2 目录下执行 `make debug` 即可完成编译: @@ -293,7 +321,36 @@ make 生成的二进制文件保存为 `tools-v2/sbin/curve`。 -### 调试 +> 可能遇到的问题-1:找不到 `protoc-gen-go` 、`protoc-gen-go-grpc` 二进制文件,但在上面步骤已经install。 +> +> 解决办法-1:将 `~/go` 添加进环境变量 `PATH` : +> +> ```shell +> export $PATH=$HOME/go/bin:$PATH +> ``` +> +> 解决办法-2:拷贝二进制文件或创建软链接。 +> +> 查看 `GOROOT` 下(一般为 `~/go` )是否有二进制文件,若有,可以将其拷贝至go的安装路径后再重新编译。 +> +> ```shell +> cd ~/go/bin +> cp ./protoc-gen-go* /usr/local/go/bin +> ``` +> +> --- +> +> 可能遇到的问题-2:出现musl-gcc相关的报错。 +> +> 解决办法-1:[安装musl-gcc](https://command-not-found.com/musl-gcc)。 +> +> 解决办法-2:直接编译: +> +> ```shell +> go build -o sbin/curve ./cmd/curve/main.go +> ``` + +#### 调试 你可以通过一下两种方式来对生成的二进制文件进行调试: @@ -305,3 +362,210 @@ make ```shell dlv exec sbin/curve --${命令行参数} ``` + +#### 调试流程 + +1. 检查环境是否拉起成功,记录容器ID,后续有用: + +```shell +docker ps -a +``` + +2. 编写好代码后,在 `curve/tools-v2` 目录下编译成二进制文件: + +```shell +make +``` + +3. 将编译好的 Curve 文件拷贝进 playground 容器内: + +```shell +docker cp ./sbin/curve de7603f17cf9:/ +``` + +4. 准备配置文件,将之拷贝进 playground 容器内: + +```shell +docker cp ./pkg/config/curve.yaml de7603f17cf9:/etc/curve/curve.yaml +``` + +5. 进入对应的容器: + +```shell +docker exec -it de7603f17cf9 bash +``` + +6. 执行命令/调试: + + > 查看状态: + > + > ```shell + > ./curve bs status mds + > ``` + > + > 新建一个目录: + > + > ```shell + > ./curve bs create dir --path /yourname + > ``` + > + > 查看刚刚新建的目录,可以发现新建的目录已经添加: + > + > ```shell + > ./curve bs list dir --dir / + > ``` + +### 在物理机上部署 + +#### 部署curve集群 + +首先你需要部署一个 Curve 集群,curve集群拉起方式如下: + +1. 安装curveadm: + +```shell +CURVEADM_VERSION=v0.1.12-dev bash -c "$(curl -fsSL https://curveadm.nos-eastchina1.126.net/script/install.sh)" +``` + +2. 升级curveadm: + + ```shell +CURVEADM_VERSION=v0.2.0 curveadm -u + ``` + + +3. [部署一个MinIO](https://github.com/opencurve/curveadm/wiki/curvefs-cluster-deployment#第-3-步部署-minio可选)。 + + + 拉取镜像: + + ```shell + docker pull minio/minio + ``` + + + 挂载命令执行: + + ```shell + mkdir -p /data/minio/config + mkdir -p /data/minio/data + ``` + + + 运行MinIO: + + ```shell + docker run -p 9000:9000 -p 9090:9090 --name minio \ + -d --restart=always \ + -e "MINIO_ACCESS_KEY=minioadmin" \ + -e "MINIO_SECRET_KEY=minioadmin" \ + -v /data/minio/data:/data \ + -v /data/minio/config:/root/.minio \minio/minio \ + server /data --console-address ":9090" + ``` + + + 查看MinIO是否启动成功,并创建桶: + + + 图形界面方法:打开 http://ip:9090,密码为上面设置的admin admin123456,要记得**在web页面上创建桶**。 + + 命令行方法:[使用mc工具创建桶](https://min.io/docs/minio/linux/reference/minio-mc.html),主要是采用[mc mb命令](https://min.io/docs/minio/linux/reference/minio-mc/mc-mb.html)。 + + 后续配置文件(topology.yaml)中的ak(Access Key)和sk(Secret Key) 都为 minioadmin。 + + nos_address 为 ip:9000 为minio部署的ip和端口号(此处就是9000,MinIO有两个监听端口,一个是给接口调用的,另一个是浏览器访问用的)。 + + snapshot_bucket_name 为创建的桶名。 + +4. 按照[部署文档](https://github.com/opencurve/curveadm/wiki/curvebs-cluster-deployment)部署一个单机集群,可以跳过格式化磁盘,然后填写topology.yaml的配置项(此处仅列出关键步骤,具体还请参考[部署文档](https://github.com/opencurve/curveadm/wiki/curvebs-cluster-deployment)): + + + 导入主机列表:`hosts.yaml`(将其中IP,user替换为自己的,再通过 `ssh-copy-id username@IP` 的形式上传密钥): + + ```yaml + # hosts.yaml + global: + user: curve + ssh_port: 22 + private_key_file: /home/curve/.ssh/id_rsa + + hosts: + - host: server-host + hostname: 10.0.1.1 + ``` + + + 准备集群拓扑文件([单机部署模板](https://github.com/opencurve/curveadm/blob/master/configs/bs/stand-alone/topology.yaml)):修改其中的s3相关属性为上述MinIO部署的值,同时禁用chunkfile pool。 + + ```yaml + # topology.yaml + kind: curvebs + global: + ... + s3.nos_address: <> // ip:9000 is the ip and port number deployed by minio + s3.snapshot_bucket_name: <> // created bucket name + s3.ak: <> // ak minioadmin + s3.sk: <> //sk minioadmin + ... + + chunkserver_services: + config: + ... + copiesets: 100 + chunkfilepool.enable_get_chunk_from_pool: false + deploy: + - host: ${target} + - host: ${target} + - host: ${target} + ... + ``` + + + 添加集群并切换集群: + + ```shell + # 添加 'my-cluster' 集群,并指定集群拓扑文件 + curveadm cluster add my-cluster -f topology.yaml + # 切换 'my-cluster' 集群为当前管理集群 + curveadm cluster checkout my-cluster + ``` + + + 部署集群: + + ```shell + curveadm deploy + ``` + + + 查看集群状态: + + ```shell + curveadm status + ``` + + +#### 环境准备 + +请参照[利用docker部署-环境准备](#环境准备)完成环境准备步骤。 + +#### 编译 + +请参照[利用docker部署-编译](#编译)完成编译步骤。 + +#### 调试 + +请参照[利用docker部署-调试](#调试)完成调试步骤。 + +#### 调试流程 + +1. 检查curve是否部署成功: + +```shell +curveadm status +``` + +2. 编写好代码后,在 `/curve/tools-v2` 目录下编译成二进制文件: + +```shell +make +``` + +3. 准备配置文件,将项目目录下的 `tools-v2/pkg/config/curve.yaml` 复制到 `$(HOME)/.curve/curve.yaml`: + +```shell +cp ./pkg/config/curve.yaml ~/.curve/curve.yaml +``` + +4. 在项目目录(`curve/tools-v2`) 下执行命令/调试: + +```shell +./sbin/curve bs status mds +``` \ No newline at end of file diff --git a/tools-v2/go.mod b/tools-v2/go.mod index ae97543b28..82fd9a5e2a 100644 --- a/tools-v2/go.mod +++ b/tools-v2/go.mod @@ -5,80 +5,94 @@ go 1.19 replace github.com/optiopay/kafka => github.com/cilium/kafka v0.0.0-20180809090225-01ce283b732b require ( - github.com/cilium/cilium v1.12.8 - github.com/deckarep/golang-set/v2 v2.1.0 - github.com/docker/cli v20.10.18+incompatible - github.com/dustin/go-humanize v1.0.0 - github.com/gookit/color v1.5.2 - github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae + github.com/cilium/cilium v1.13.3 + github.com/deckarep/golang-set/v2 v2.3.0 + github.com/docker/cli v24.0.2+incompatible + github.com/dustin/go-humanize v1.0.1 + github.com/gookit/color v1.5.3 + github.com/moby/term v0.5.0 github.com/olekukonko/tablewriter v0.0.5 github.com/pkg/xattr v0.4.9 - github.com/schollz/progressbar/v3 v3.13.0 - github.com/smartystreets/goconvey v1.7.2 - github.com/spf13/cobra v1.5.0 + github.com/schollz/progressbar/v3 v3.13.1 + github.com/smartystreets/goconvey v1.8.0 + github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 - github.com/spf13/viper v1.13.0 - golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561 - golang.org/x/sys v0.5.0 - google.golang.org/grpc v1.49.0 - google.golang.org/protobuf v1.28.1 + github.com/spf13/viper v1.16.0 + golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 + golang.org/x/sys v0.8.0 + google.golang.org/grpc v1.55.0 + google.golang.org/protobuf v1.30.0 ) require ( - github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect - github.com/Microsoft/go-winio v0.5.2 // indirect - github.com/Microsoft/hcsshim v0.9.4 // indirect + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/Microsoft/hcsshim v0.9.9 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect - github.com/containerd/continuity v0.3.0 // indirect - github.com/docker/distribution v2.8.1+incompatible // indirect - github.com/docker/docker v20.10.18+incompatible // indirect - github.com/docker/docker-credential-helpers v0.6.4 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/containerd/cgroups v1.1.0 // indirect + github.com/containerd/continuity v0.4.1 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/docker/distribution v2.8.2+incompatible // indirect + github.com/docker/docker v24.0.2+incompatible // indirect + github.com/docker/docker-credential-helpers v0.7.0 // indirect github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/fsnotify/fsnotify v1.5.4 // indirect - github.com/fvbommel/sortorder v1.0.2 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/fvbommel/sortorder v1.1.0 // indirect + github.com/godbus/dbus/v5 v5.0.4 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/gopherjs/gopherjs v1.17.2 // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect - github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jtolds/gls v4.20.0+incompatible // indirect - github.com/magiconair/properties v1.8.6 // indirect + github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/sys/mount v0.3.3 // indirect github.com/moby/sys/mountinfo v0.6.2 // indirect + github.com/moby/sys/sequential v0.5.0 // indirect github.com/morikuni/aec v1.0.0 // indirect + github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86 // indirect + github.com/neelance/sourcemap v0.0.0-20200213170602-2833bce08e4c // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect + github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 // indirect github.com/pelletier/go-toml v1.9.5 // indirect - github.com/pelletier/go-toml/v2 v2.0.5 // indirect + github.com/pelletier/go-toml/v2 v2.0.8 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/client_golang v1.13.0 // indirect - github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a // indirect - github.com/prometheus/common v0.37.0 // indirect - github.com/prometheus/procfs v0.8.0 // indirect - github.com/rivo/uniseg v0.4.3 // indirect - github.com/sirupsen/logrus v1.9.0 // indirect - github.com/smartystreets/assertions v1.13.0 // indirect - github.com/spf13/afero v1.9.2 // indirect - github.com/spf13/cast v1.5.0 // indirect + github.com/prometheus/client_golang v1.15.1 // indirect + github.com/prometheus/client_model v0.4.0 // indirect + github.com/prometheus/common v0.44.0 // indirect + github.com/prometheus/procfs v0.10.1 // indirect + github.com/rivo/uniseg v0.4.4 // indirect + github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/smartystreets/assertions v1.13.1 // indirect + github.com/spf13/afero v1.9.5 // indirect + github.com/spf13/cast v1.5.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect - github.com/subosito/gotenv v1.4.1 // indirect + github.com/subosito/gotenv v1.4.2 // indirect github.com/theupdateframework/notary v0.7.0 // indirect github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect - golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 // indirect - golang.org/x/net v0.7.0 // indirect - golang.org/x/term v0.5.0 // indirect - golang.org/x/text v0.7.0 // indirect - google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5 // indirect + go.opencensus.io v0.24.0 // indirect + golang.org/x/crypto v0.9.0 // indirect + golang.org/x/mod v0.10.0 // indirect + golang.org/x/net v0.10.0 // indirect + golang.org/x/sync v0.2.0 // indirect + golang.org/x/term v0.8.0 // indirect + golang.org/x/text v0.9.0 // indirect + golang.org/x/tools v0.9.3 // indirect + google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/tools-v2/go.sum b/tools-v2/go.sum index 39fe444eb5..5e6e569bfe 100644 --- a/tools-v2/go.sum +++ b/tools-v2/go.sum @@ -41,6 +41,8 @@ github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9mo github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= @@ -63,6 +65,8 @@ github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JP github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= @@ -73,6 +77,8 @@ github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+V github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim v0.9.4 h1:mnUj0ivWy6UzbB1uLFqKR6F+ZyiDc7j4iGgHTpO+5+I= github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim v0.9.9 h1:FYrTiCNOc8ZddNBVkJBxWZYm22rgxHFmxMoGK66sDF0= +github.com/Microsoft/hcsshim v0.9.9/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -118,17 +124,22 @@ github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXe github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/cilium v1.12.8 h1:yxlmOmWbObcTg0SUQm0ov3fHyGBU9JpwfwqdU4uHuiQ= -github.com/cilium/cilium v1.12.8/go.mod h1:JsAUFtYyB9LymeD0t56u2sBhN5W729AjDHegzAw8JXc= +github.com/cilium/cilium v1.12.9 h1:tUVFqhI6lKamOn35/kLCr+vy6yxwIduV2Yn6bqfw6O0= +github.com/cilium/cilium v1.12.9/go.mod h1:ZpbQlJvfGBBPP4him5qTNwdy8wX/lhPVjFwTeWDkDOU= +github.com/cilium/cilium v1.13.3 h1:SDSFNOyz+k3XzMM2NWSO1rHfb3aJfHrRAWbaBIbjklU= +github.com/cilium/cilium v1.13.3/go.mod h1:6LRwUmF823W0wSfFuLbd7ZDL8Lq9lgTUZ4//1dohwcY= github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= @@ -155,7 +166,10 @@ github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1 github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.1 h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5vHQ= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= +github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= @@ -184,6 +198,8 @@ github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EX github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= +github.com/containerd/continuity v0.4.1 h1:wQnVrjIyQ8vhU2sgOiL5T07jo+ouqc2bnKsv5/EqGhU= +github.com/containerd/continuity v0.4.1/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= @@ -237,10 +253,13 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= @@ -261,6 +280,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/deckarep/golang-set/v2 v2.1.0 h1:g47V4Or+DUdzbs8FxCCmgb6VYd+ptPAngjM6dtGktsI= github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= +github.com/deckarep/golang-set/v2 v2.3.0 h1:qs18EKUfHm2X9fA50Mr/M5hccg2tNnVqsiBImnyDs0g= +github.com/deckarep/golang-set/v2 v2.3.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= @@ -270,17 +291,23 @@ github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyG github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v20.10.18+incompatible h1:f/GQLsVpo10VvToRay2IraVA1wHz9KktZyjev3SIVDU= github.com/docker/cli v20.10.18+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v24.0.2+incompatible h1:QdqR7znue1mtkXIJ+ruQMGQhpw2JzMJLRXp6zpzF6tM= +github.com/docker/cli v24.0.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= -github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.18+incompatible h1:SN84VYXTBNGn92T/QwIRPlum9zfemfitN7pbsp26WSc= -github.com/docker/docker v20.10.18+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.24+incompatible h1:Ugvxm7a8+Gz6vqQYQQ2W7GYq5EUPaAiuPgIfVyI3dYE= +github.com/docker/docker v20.10.24+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v24.0.2+incompatible h1:eATx+oLz9WdNVkQrr0qjQ8HvRJ4bOOxfzEo8R+dA3cg= +github.com/docker/docker v24.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o= github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= +github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= +github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0= github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c/go.mod h1:CADgU4DSXK5QUlFslkQu2yW2TKzFZcXq/leZfM0UH5Q= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= @@ -301,6 +328,8 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3 github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/dvsekhvalnov/jose2go v0.0.0-20170216131308-f21a8cedbbae/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -322,9 +351,13 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/fvbommel/sortorder v1.0.2 h1:mV4o8B2hKboCdkJm+a7uX/SIpZob4JzUpc5GGnM45eo= github.com/fvbommel/sortorder v1.0.2/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= +github.com/fvbommel/sortorder v1.1.0 h1:fUmoe+HLsBTctBDoaBwpQo5N+nrCp8g/BjKb/6ZQmYw= +github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -358,8 +391,10 @@ github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= @@ -378,6 +413,8 @@ github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -403,6 +440,8 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93 h1:jc2UWq7CbdszqeH6qu1ougXMIUBfSy8Pbh/anURYbGI= @@ -415,6 +454,7 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -446,6 +486,8 @@ github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3i github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gookit/color v1.5.2 h1:uLnfXcaFjlrDnQDT+NCBcfhrXqYTx/rcCa6xn01Y8yI= github.com/gookit/color v1.5.2/go.mod h1:w8h4bGiHeeBpvQVePTutdbERIUf3oJE5lZ8HM0UgXyg= +github.com/gookit/color v1.5.3 h1:twfIhZs4QLCtimkP7MOxlF3A0U/5cDPseRT9M/+2SCE= +github.com/gookit/color v1.5.3/go.mod h1:NUzwzeehUfl7GIb36pqId+UGmRfQcU/WiiyTTeNjHtE= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= @@ -486,6 +528,8 @@ github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= github.com/jinzhu/gorm v0.0.0-20170222002820-5409931a1bb8 h1:CZkYfurY6KGhVtlalI4QwQ6T0Cu6iuY3e0x5RLu96WE= github.com/jinzhu/gorm v0.0.0-20170222002820-5409931a1bb8/go.mod h1:Vla75njaFJ8clLU1W44h34PjIkijhjHIYnZxMqCdxqo= @@ -536,6 +580,8 @@ github.com/magiconair/properties v1.5.3/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czP github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -554,6 +600,8 @@ github.com/mattn/go-sqlite3 v1.6.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOq github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= @@ -575,10 +623,14 @@ github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2J github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae h1:O4SWKdcHVCvYqyDV+9CJA1fcDN2L11Bule0iFy3YlAI= github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= @@ -593,6 +645,10 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86 h1:D6paGObi5Wud7xg83MaEFyjxQB1W5bz5d0IFppr+ymk= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20200213170602-2833bce08e4c h1:bY6ktFuJkt+ZXkX0RChQch2FtHpWQLVS8Qo1YasiIVk= +github.com/neelance/sourcemap v0.0.0-20200213170602-2833bce08e4c/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= @@ -636,6 +692,7 @@ github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/ github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= @@ -649,6 +706,8 @@ github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3v github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg= github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= +github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= +github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -672,6 +731,8 @@ github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqr github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= +github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= +github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -679,6 +740,8 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a h1:CmF68hwI0XsOQ5UwlBopMi2Ow4Pbg32akc4KIVCOm+Y= github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -689,6 +752,8 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9 github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -703,10 +768,14 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw= github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= +github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -717,8 +786,12 @@ github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiB github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/schollz/progressbar/v3 v3.13.0 h1:9TeeWRcjW2qd05I8Kf9knPkW4vLM/hYoa6z9ABvxje8= github.com/schollz/progressbar/v3 v3.13.0/go.mod h1:ZBYnSuLAX2LU8P8UiKN/KgF2DY58AJC8yfVYLPC8Ly4= +github.com/schollz/progressbar/v3 v3.13.1 h1:o8rySDYiQ59Mwzy2FELeHY5ZARXZTVJC7iHD6PEFUiE= +github.com/schollz/progressbar/v3 v3.13.1/go.mod h1:xvrbki8kfT1fzWzBT/UZd9L6GA+jdL7HAgq2RFnO6fQ= github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk= +github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= @@ -730,29 +803,41 @@ github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/assertions v1.13.0 h1:Dx1kYM01xsSqKPno3aqLnrwac2LetPvN23diwyr69Qs= github.com/smartystreets/assertions v1.13.0/go.mod h1:wDmR7qL282YbGsPy6H/yAsesrxfxaaSlJazyFLYVFx8= +github.com/smartystreets/assertions v1.13.1 h1:Ef7KhSmjZcK6AVf9YbJdvPYG9avaF0ZxudX+ThRdWfU= +github.com/smartystreets/assertions v1.13.1/go.mod h1:cXr/IwVfSo/RbCSPhoAPv73p3hlSdrBH/b3SdnW/LMY= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= +github.com/smartystreets/goconvey v1.8.0 h1:Oi49ha/2MURE0WexF052Z0m+BNSGirfjg5RL+JXWq3w= +github.com/smartystreets/goconvey v1.8.0/go.mod h1:EdX8jtrTIj26jmjCOVNMVSIYAtgexqXKHOXW2Dx9JLg= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw= github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= +github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= +github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU= github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/jwalterweatherman v0.0.0-20141219030609-3d60171a6431/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= @@ -768,12 +853,15 @@ github.com/spf13/viper v0.0.0-20150530192845-be5ff3e4840c/go.mod h1:A8kyI5cUJhb8 github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.13.0 h1:BWSJ/M+f+3nmdz9bxB+bWX28kkALN2ok11D0rSo8EJU= github.com/spf13/viper v1.13.0/go.mod h1:Icm2xNL3/8uyh/wFuB1jI7TiTNKp8632Nwegu+zgdYw= +github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc= +github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -784,8 +872,12 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= +github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -835,6 +927,8 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -858,8 +952,11 @@ golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 h1:Y/gsMcFOcR+6S6f3YeMKl5g+dZMEWqcz5Czj/GWYbkM= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= +golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -872,6 +969,8 @@ golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EH golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561 h1:MDc5xs78ZrZr3HMQugiXOAkSZtfTpbJLDr/lwfgO53E= golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc= +golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -896,6 +995,10 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -942,10 +1045,13 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -968,6 +1074,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= +golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1055,15 +1163,22 @@ golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1074,6 +1189,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1141,6 +1258,10 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM= +golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1216,6 +1337,12 @@ google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5 h1:ou3VRVAif8UJqz3l1r4Isoz7rrUWHWDHBonShMNYoQs= google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc h1:8DyZCyvI8mE1IdLy/60bS+52xfymkE72wv1asokgtao= +google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e h1:NumxXLPfHSndr3wBBdeKiVHjGVFzi9RX2HwwQke94iY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -1242,6 +1369,8 @@ google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw= google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag= +google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1257,6 +1386,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/cenkalti/backoff.v2 v2.2.1 h1:eJ9UAg01/HIHG987TwxvnzK2MgxXq97YY6rYDpY9aII= diff --git a/tools-v2/internal/error/error.go b/tools-v2/internal/error/error.go index 6800931cfd..66d3e90833 100644 --- a/tools-v2/internal/error/error.go +++ b/tools-v2/internal/error/error.go @@ -30,6 +30,7 @@ import ( "github.com/opencurve/curve/tools-v2/proto/curvefs/proto/topology" "github.com/opencurve/curve/tools-v2/proto/proto/copyset" "github.com/opencurve/curve/tools-v2/proto/proto/nameserver2" + "github.com/opencurve/curve/tools-v2/proto/proto/topology/statuscode" bs_topo_statuscode "github.com/opencurve/curve/tools-v2/proto/proto/topology/statuscode" ) @@ -187,9 +188,9 @@ func MostImportantCmdError(err []*CmdError) *CmdError { // keep the most important wrong id, all wrong message will be kept // if all success return success -func MergeCmdErrorExceptSuccess(err []*CmdError) CmdError { +func MergeCmdErrorExceptSuccess(err []*CmdError) *CmdError { if len(err) == 0 { - return *NewSucessCmdError() + return NewSucessCmdError() } var ret CmdError ret.Code = CODE_UNKNOWN @@ -207,31 +208,31 @@ func MergeCmdErrorExceptSuccess(err []*CmdError) CmdError { } } if countSuccess == len(err) { - return *NewSucessCmdError() + return NewSucessCmdError() } ret.Message = ret.Message[:len(ret.Message)-1] - return ret + return &ret } // keep the most important wrong id, all wrong message will be kept // if have one success return success -func MergeCmdError(err []*CmdError) CmdError { +func MergeCmdError(err []*CmdError) *CmdError { if len(err) == 0 { - return *NewSucessCmdError() + return NewSucessCmdError() } var ret CmdError ret.Code = CODE_UNKNOWN ret.Message = "" for _, e := range err { if e.Code == CODE_SUCCESS { - return *e + return e } else if e.Code < ret.Code { ret.Code = e.Code } ret.Message = e.Message + "\n" + ret.Message } ret.Message = ret.Message[:len(ret.Message)-1] - return ret + return &ret } var ( @@ -389,6 +390,75 @@ var ( ErrQueryWarmup = func() *CmdError { return NewInternalCmdError(43, "query warmup progress fail, err: %s") } + ErrBsGetSegment = func() *CmdError { + return NewInternalCmdError(44, "get segments fail, err: %s") + } + ErrBsGetChunkCopyset = func() *CmdError { + return NewInternalCmdError(45, "get copyset of chunk fail, err: %s") + } + ErrBsChunkServerListInCopySets = func() *CmdError { + return NewInternalCmdError(46, "get chunkserver list in copysets fail, err: %s") + } + ErrBsUnknownFileType = func() *CmdError { + return NewInternalCmdError(47, "unknown file type[%s], only support: dir, file") + } + ErrBsCreateFileOrDirectoryType = func() *CmdError { + return NewInternalCmdError(48, "create file or directory fail, err: %s") + } + ErrBsListLogicalPoolInfo = func() *CmdError { + return NewInternalCmdError(49, "list logical pool info fail, the error is: %s") + } + ErrBsUnknownThrottleType = func() *CmdError { + return NewInternalCmdError(50, "unknown throttle type[%s], only support: iops_total|iops_read|iops_write|bps_total|bps_read|bps_write") + } + ErrBsListDir = func() *CmdError { + return NewInternalCmdError(51, "list directory fail, err: %s") + } + ErrBsGetCopysetStatus = func() *CmdError { + return NewInternalCmdError(52, "get copyset status fail, err: %s") + } + ErrBsOpNameNotSupport = func() *CmdError { + return NewInternalCmdError(53, "not support op[%s], only support: operator, change_peer, add_peer, remove_peer, transfer_leader") + } + ErrBsGetClientList = func() *CmdError { + return NewInternalCmdError(54, "get client list fail, err: %s") + } + ErrBsGetClientStatus = func() *CmdError { + return NewInternalCmdError(55, "get client status fail, err: %s") + } + ErrBsGetEtcdStatus = func() *CmdError { + return NewInternalCmdError(56, "get etcd status fail, err: %s") + } + ErrBsGetMdsStatus = func() *CmdError { + return NewInternalCmdError(57, "get mds status fail, err: %s") + } + ErrBsGetSnapshotServerStatus = func() *CmdError { + return NewInternalCmdError(58, "get snapshotserver status fail, err: %s") + } + ErrBsGetChunkServerInCluster = func() *CmdError { + return NewInternalCmdError(59, "get chunkserver in cluster fail, err: %s") + } + ErrBsQueryChunkServerRecoverStatus = func() *CmdError { + return NewInternalCmdError(60, "query chunkserver recover status fail, err: %s") + } + ErrBsListChunkServer = func() *CmdError { + return NewInternalCmdError(61, "list chunkserver fail, err: %s") + } + ErrBsGetCopysetInChunkServer = func() *CmdError { + return NewInternalCmdError(62, "get copyset in chunkserver fail, err: %s") + } + ErrBsGetChunkInfo = func() *CmdError { + return NewInternalCmdError(63, "get chunk info fail, err: %s") + } + ErrBsGetUnavailCopysets = func() *CmdError { + return NewInternalCmdError(64, "get unavail copysets fail, err: %s") + } + ErrBsGetScanStatus = func() *CmdError { + return NewInternalCmdError(65, "query scan-status fail, err: %s") + } + ErrBsListScanStatus = func() *CmdError { + return NewInternalCmdError(66, "list scan-status fail, err: %s") + } // http error ErrHttpUnreadableResult = func() *CmdError { @@ -602,6 +672,18 @@ var ( message := fmt.Sprintf("Rpc[ListPoolZone] faild status code: %s", statuscode.String()) return NewInternalCmdError(int(statuscode), message) } + ErrBsGetChunkserverInClusterRpc = func(statuscode bs_topo_statuscode.TopoStatusCode) *CmdError { + message := fmt.Sprintf("Rpc[GetChunkserverInCluster] faild status code: %s", statuscode.String()) + return NewInternalCmdError(int(statuscode), message) + } + ErrBsSetCopysetAvailFlagRpc = func(statuscode bs_topo_statuscode.TopoStatusCode) *CmdError { + message := fmt.Sprintf("Rpc[SetCopysetAvailFlag] faild status code: %s", statuscode.String()) + return NewInternalCmdError(int(statuscode), message) + } + ErrBsGetCopysetInChunkServerRpc = func(statuscode bs_topo_statuscode.TopoStatusCode) *CmdError { + message := fmt.Sprintf("Rpc[GetCopySetsInChunkServer] faild status code: %s", statuscode.String()) + return NewInternalCmdError(int(statuscode), message) + } // bs ErrCreateBsTopology = func(statusCode bs_topo_statuscode.TopoStatusCode, topoType string, name string) *CmdError { @@ -685,4 +767,95 @@ var ( } return NewRpcReultCmdError(-code, message) } + + ErrGetOrAllocateSegment = func(statusCode nameserver2.StatusCode, file string, offset uint64) *CmdError { + var message string + code := int(statusCode) + switch statusCode { + case nameserver2.StatusCode_kOK: + message = "getOrAllocateSegment successfully" + default: + message = fmt.Sprintf("getOrAllocateSegment file[%s] offset[%d], err: %s", file, offset, statusCode.String()) + } + return NewRpcReultCmdError(code, message) + } + + ErrGetChunkServerListInCopySets = func(statusCode statuscode.TopoStatusCode, logicalPool uint32, copysetIds []uint32) *CmdError { + var message string + code := int(statusCode) + switch statusCode { + case statuscode.TopoStatusCode_Success: + message = "getChunkServerListInCopySets successfully" + default: + message = fmt.Sprintf("getChunkServerListInCopySets logicalPool[%d] copysets%v, err: %s", logicalPool, copysetIds, statusCode.String()) + } + return NewRpcReultCmdError(-code, message) + } + + ErrExtendFile = func(statusCode nameserver2.StatusCode, path, size string) *CmdError { + var message string + code := int(statusCode) + switch statusCode { + case nameserver2.StatusCode_kOK: + message = "successfully expanded the file" + default: + message = fmt.Sprintf("failed to expand file[%s] to %s, err: %s", path, size, statusCode.String()) + } + return NewRpcReultCmdError(code, message) + } + ErrCreateFile = func(statusCode nameserver2.StatusCode, path string) *CmdError { + var message string + code := int(statusCode) + switch statusCode { + case nameserver2.StatusCode_kOK: + message = "Created successfully" + default: + message = fmt.Sprintf("failed to create file[%s], err: %s", path, statusCode.String()) + } + return NewRpcReultCmdError(code, message) + } + ErrUpdateFileThrottle = func(statusCode nameserver2.StatusCode, path string) *CmdError { + var message string + code := int(statusCode) + switch statusCode { + case nameserver2.StatusCode_kOK: + message = "successfully update the file throttle" + default: + message = fmt.Sprintf("failed to update file[%s] throttle, err: %s", path, statusCode.String()) + } + return NewRpcReultCmdError(code, message) + } + ErrBsGetCopyset = func(statusCode statuscode.TopoStatusCode, logicalpoolid, copysetid uint32) *CmdError { + var message string + code := int(statusCode) + switch statusCode { + case statuscode.TopoStatusCode_Success: + message = "success" + default: + message = fmt.Sprintf("rpc get copyset(id: %d,logicalPoolid: %d) info fail, err: %s", copysetid, logicalpoolid, statusCode.String()) + } + return NewRpcReultCmdError(code, message) + } + ErrBsGetChunkServerInClusterRpc = func(statusCode statuscode.TopoStatusCode) *CmdError { + var message string + code := int(statusCode) + switch statusCode { + case statuscode.TopoStatusCode_Success: + message = "success" + default: + message = fmt.Sprintf("Rpc[GetChunkServerInCluster] fail, err: %s", statusCode.String()) + } + return NewRpcReultCmdError(code, message) + } + ErrBsQueryChunkserverRecoverStatus = func(statusCode statuscode.TopoStatusCode) *CmdError { + var message string + code := int(statusCode) + switch statusCode { + case statuscode.TopoStatusCode_Success: + message = "success" + default: + message = fmt.Sprintf("Rpc[QueryChunkserverRecoverStatus] fail, err: %s", statusCode.String()) + } + return NewRpcReultCmdError(code, message) + } ) diff --git a/tools-v2/internal/utils/copyset.go b/tools-v2/internal/utils/copyset.go index 2a0528d8b5..d7762c88de 100644 --- a/tools-v2/internal/utils/copyset.go +++ b/tools-v2/internal/utils/copyset.go @@ -27,6 +27,8 @@ import ( cmderror "github.com/opencurve/curve/tools-v2/internal/error" "github.com/opencurve/curve/tools-v2/proto/curvefs/proto/copyset" "github.com/opencurve/curve/tools-v2/proto/curvefs/proto/heartbeat" + bscopyset "github.com/opencurve/curve/tools-v2/proto/proto/copyset" + bsheartbeat "github.com/opencurve/curve/tools-v2/proto/proto/heartbeat" ) type CopysetInfoStatus struct { @@ -34,6 +36,11 @@ type CopysetInfoStatus struct { Peer2Status map[string]*copyset.CopysetStatusResponse `json:"peer status,omitempty"` } +type BsCopysetInfoStatus struct { + Info *bsheartbeat.CopySetInfo `json:"info,omitempty"` + Peer2Status map[string]*bscopyset.CopysetStatusResponse `json:"peer status,omitempty"` +} + type COPYSET_HEALTH_STATUS int32 const ( @@ -153,6 +160,45 @@ func CheckCopySetHealth(copysetIS *CopysetInfoStatus) (COPYSET_HEALTH_STATUS, [] } } +func CheckBsCopySetHealth(copysetIS *BsCopysetInfoStatus) (COPYSET_HEALTH_STATUS, []*cmderror.CmdError) { + peers := copysetIS.Info.GetPeers() + peer2Status := copysetIS.Peer2Status + avalibalePeerNum := 0 + var errs []*cmderror.CmdError + for addr, status := range peer2Status { + if status == nil { + // peer is offline + err := cmderror.ErrOfflineCopysetPeer() + err.Format(addr) + errs = append(errs, err) + continue + } + opStatus := status.GetStatus() + state := status.GetState() + peer := status.GetPeer() + if opStatus == bscopyset.COPYSET_OP_STATUS_COPYSET_OP_STATUS_SUCCESS && CopysetState_Avaliable[state] { + avalibalePeerNum++ + } else if opStatus != bscopyset.COPYSET_OP_STATUS_COPYSET_OP_STATUS_SUCCESS { + err := cmderror.ErrBsCopysetOpStatus(opStatus, addr) + errs = append(errs, err) + } else { + err := cmderror.ErrStateCopysetPeer() + err.Format(peer.String(), CopysetState_name[state]) + errs = append(errs, err) + } + } + + n := len(peers) + switch { + case avalibalePeerNum == n: + return COPYSET_OK, errs + case avalibalePeerNum >= n/2+1: + return COPYSET_WARN, errs + default: + return COPYSET_ERROR, errs + } +} + func GetCopysetKey(poolid uint64, copysetid uint64) uint64 { return (poolid << 32) | copysetid } diff --git a/tools-v2/internal/utils/align.go b/tools-v2/internal/utils/flag.go similarity index 100% rename from tools-v2/internal/utils/align.go rename to tools-v2/internal/utils/flag.go diff --git a/tools-v2/internal/utils/metric.go b/tools-v2/internal/utils/metric.go index b43de31179..19dec6a0dc 100644 --- a/tools-v2/internal/utils/metric.go +++ b/tools-v2/internal/utils/metric.go @@ -21,15 +21,75 @@ */ package cobrautil +import ( + "time" + + cmderror "github.com/opencurve/curve/tools-v2/internal/error" +) + const ( kVars = "/vars/" kLogicalPoolMetricPrefix = "topology_metric_logicalPool_" + kSechduleOpMetricpPrefix = "mds_scheduler_metric_" ) func GetPoolLogicalCapacitySubUri(poolName string) string { - return kVars + ToUnderscoredName(kLogicalPoolMetricPrefix + poolName + "_logicalCapacity") + return kVars + ToUnderscoredName(kLogicalPoolMetricPrefix+poolName+"_logicalCapacity") } func GetPoolLogicalAllocSubUri(poolName string) string { - return kVars+ToUnderscoredName(kLogicalPoolMetricPrefix + poolName + "_logicalAlloc") + return kVars + ToUnderscoredName(kLogicalPoolMetricPrefix+poolName+"_logicalAlloc") +} + +func GetPoolTotalChunkSizeName(poolName string) string { + return kVars + ToUnderscoredName(kLogicalPoolMetricPrefix+poolName+"_chunkSizeTotalBytes") +} + +func GetPoolUsedChunkSizeName(poolName string) string { + return kVars + ToUnderscoredName(kLogicalPoolMetricPrefix+poolName+"_chunkSizeUsedBytes") +} + +type CheckOperatorType int32 + +const ( + CheckOperatorTypeTotal CheckOperatorType = 1 + CheckOperatorTypeChange CheckOperatorType = 2 + CheckOperatorTypeAdd CheckOperatorType = 3 + CheckOperatorTypeRemove CheckOperatorType = 4 + CheckOperatorTypeTransfer CheckOperatorType = 5 +) + +var ( + CheckOperatorName_value = map[string]int32{ + "operator": 1, + "change_peer": 2, + "add_peer": 3, + "remove_peer": 4, + "transfer_leader": 5, + } +) + +func SupportOpName(opName string) (CheckOperatorType, *cmderror.CmdError) { + op, ok := CheckOperatorName_value[opName] + if !ok { + retErr := cmderror.ErrBsOpNameNotSupport() + retErr.Format(opName) + return 0, retErr + } + return CheckOperatorType(op), cmderror.Success() +} + +func GetDefaultCheckTime(opType CheckOperatorType) time.Duration { + var checkTime time.Duration + switch opType { + case CheckOperatorTypeTotal, CheckOperatorTypeTransfer: + checkTime = 30 * time.Second + case CheckOperatorTypeChange, CheckOperatorTypeAdd, CheckOperatorTypeRemove: + checkTime = 5 * time.Second + } + return checkTime +} + +func GetOpNumSubUri(opName string) string { + return kVars + ToUnderscoredName(kSechduleOpMetricpPrefix+opName+"_num") } diff --git a/tools-v2/internal/utils/proto.go b/tools-v2/internal/utils/proto.go index 1911f824b1..fc53053a9e 100644 --- a/tools-v2/internal/utils/proto.go +++ b/tools-v2/internal/utils/proto.go @@ -29,6 +29,7 @@ import ( cmderror "github.com/opencurve/curve/tools-v2/internal/error" "github.com/opencurve/curve/tools-v2/proto/curvefs/proto/common" "github.com/opencurve/curve/tools-v2/proto/curvefs/proto/topology" + "github.com/opencurve/curve/tools-v2/proto/proto/nameserver2" ) func TranslateFsType(fsType string) (common.FSType, *cmderror.CmdError) { @@ -191,5 +192,34 @@ func Topology2Map(topo *topology.ListTopologyResponse) (map[string]interface{}, ret[POOL_LIST] = poolList retErr := cmderror.MergeCmdErrorExceptSuccess(errs) - return ret, &retErr + return ret, retErr +} + +const ( + TYPE_DIR = "dir" + TYPE_FILE = "file" +) + +func TranslateFileType(fileType string) (nameserver2.FileType, *cmderror.CmdError) { + switch fileType { + case TYPE_DIR: + return nameserver2.FileType_INODE_DIRECTORY, cmderror.ErrSuccess() + case TYPE_FILE: + return nameserver2.FileType_INODE_PAGEFILE, cmderror.ErrSuccess() + } + retErr := cmderror.ErrBsUnknownFileType() + retErr.Format(fileType) + return nameserver2.FileType_INODE_DIRECTORY, retErr +} + +func ParseThrottleType(typeStr string) (nameserver2.ThrottleType, *cmderror.CmdError) { + throttleType := nameserver2.ThrottleType_value[strings.ToUpper(typeStr)] + var retErr *cmderror.CmdError + if throttleType == 0 { + retErr = cmderror.ErrBsUnknownThrottleType() + retErr.Format(typeStr) + } else { + retErr = cmderror.ErrSuccess() + } + return nameserver2.ThrottleType(throttleType), retErr } diff --git a/tools-v2/internal/utils/row.go b/tools-v2/internal/utils/row.go index 09147bfd4a..d10a3213ad 100644 --- a/tools-v2/internal/utils/row.go +++ b/tools-v2/internal/utils/row.go @@ -23,84 +23,106 @@ package cobrautil const ( - ROW_ADDR = "addr" - ROW_ALLOC = "alloc" - ROW_ALLOC_SIZE = "allocatedSize" - ROW_BLOCKSIZE = "blocksize" - ROW_CAPACITY = "capacity" - ROW_CHILD_LIST = "childList" - ROW_CHILD_TYPE = "childType" - ROW_CHUNK = "chunk" - ROW_COPYSET_ID = "copysetId" - ROW_COPYSET_KEY = "copysetKey" - ROW_CREATE_TIME = "createTime" - ROW_CTIME = "ctime" - ROW_DUMMY_ADDR = "dummyAddr" - ROW_END = "end" - ROW_EPOCH = "epoch" - ROW_EXPLAIN = "explain" - ROW_EXTERNAL_ADDR = "externalAddr" - ROW_FILE_SIZE = "fileSize" - ROW_FILE_TYPE = "fileType" - ROW_FILE_NAME = "fileName" - ROW_FS_ID = "fsId" - ROW_FS_NAME = "fsName" - ROW_FS_TYPE = "fsType" - ROW_HOSTNAME = "hostname" - ROW_ID = "id" - ROW_INODE_ID = "inodeId" - ROW_INTERNAL_ADDR = "internalAddr" - ROW_KEY = "key" - ROW_LEADER_PEER = "leaderPeer" - ROW_LEFT = "left" - ROW_LENGTH = "length" - ROW_LOG_GAP = "logGap" - ROW_METASERVER = "metaserver" - ROW_METASERVER_ADDR = "metaserverAddr" - ROW_MOUNT_NUM = "mountNum" - ROW_MOUNTPOINT = "mountpoint" - ROW_NAME = "name" - ROW_NLINK = "nlink" - ROW_NUM = "num" - ROW_ONLINE_STATE = "onlineState" - ROW_OPERATION = "operation" - ROW_ORIGINAL_PATH = "originalPath" - ROW_OWNER = "owner" - ROW_PARENT = "parent" - ROW_PARENT_ID = "parentId" - ROW_PARTITION_ID = "partitionId" - ROW_PEER_ADDR = "peerAddr" - ROW_PEER_ID = "peerId" - ROW_PEER_NUMBER = "peerNumber" - ROW_PHYPOOL = "phyPool" - ROW_POOL = "pool" - ROW_POOL_ID = "poolId" - ROW_READONLY = "readonly" - ROW_RECYCLE = "recycle" - ROW_RESULT = "result" - ROW_SCAN = "scan" - ROW_SEGMENT = "segment" - ROW_SEQ = "seq" - ROW_SERVER = "server" - ROW_SIZE = "size" - ROW_START = "start" - ROW_STATE = "state" - ROW_STATUS = "status" - ROW_STRIPE = "stripe" - ROW_SUM_IN_DIR = "sumInDir" - ROW_TERM = "term" - ROW_THROTTLE = "throttle" - ROW_TOTAL = "total" - ROW_TYPE = "type" - ROW_USED = "used" - ROW_VERSION = "version" - ROW_ZONE = "zone" - ROW_IP = "ip" - ROW_PORT = "port" - ROW_REASON = "reason" - ROW_PEER = "peer" - ROW_COPYSET = "copyset" - ROW_LEADER = "leader" + ROW_ADDR = "addr" + ROW_ALLOC = "alloc" + ROW_ALLOC_SIZE = "allocatedSize" + ROW_BLOCKSIZE = "blocksize" + ROW_CAPACITY = "capacity" + ROW_CHILD_LIST = "childList" + ROW_CHILD_TYPE = "childType" + ROW_CHUNK = "chunk" + ROW_CHUNK_SIZE = "chunkSize" + ROW_COPYSET = "copyset" + ROW_COPYSET_ID = "copysetId" + ROW_COPYSET_KEY = "copysetKey" + ROW_CREATE_TIME = "createTime" + ROW_CREATED = "created" + ROW_CTIME = "ctime" + ROW_DUMMY_ADDR = "dummyAddr" + ROW_END = "end" + ROW_EPOCH = "epoch" + ROW_EXPLAIN = "explain" + ROW_EXTERNAL_ADDR = "externalAddr" + ROW_FILE_NAME = "fileName" + ROW_FILE_SIZE = "fileSize" + ROW_FILE_TYPE = "fileType" + ROW_FS_ID = "fsId" + ROW_FS_NAME = "fsName" + ROW_FS_TYPE = "fsType" + ROW_GROUP = "group" + ROW_HOSTNAME = "hostname" + ROW_ID = "id" + ROW_INODE_ID = "inodeId" + ROW_INTERNAL_ADDR = "internalAddr" + ROW_IP = "ip" + ROW_KEY = "key" + ROW_LEADER = "leader" + ROW_OLDLEADER = "oldLeader" + ROW_LEADER_PEER = "leaderPeer" + ROW_LEFT = "left" + ROW_LENGTH = "length" + ROW_LOCATION = "location" + ROW_LOG_GAP = "logGap" + ROW_LOGICALPOOL = "logicalpool" + ROW_METASERVER = "metaserver" + ROW_METASERVER_ADDR = "metaserverAddr" + ROW_MOUNT_NUM = "mountNum" + ROW_MOUNTPOINT = "mountpoint" + ROW_NAME = "name" + ROW_NLINK = "nlink" + ROW_NUM = "num" + ROW_ONLINE_STATE = "onlineState" + ROW_OPERATION = "operation" + ROW_OPNAME = "opname" + ROW_ORIGINAL_PATH = "originalPath" + ROW_OWNER = "owner" + ROW_PARENT = "parent" + ROW_PARENT_ID = "parentId" + ROW_PARTITION_ID = "partitionId" + ROW_PEER = "peer" + ROW_PEER_ADDR = "peerAddr" + ROW_PEER_ID = "peerId" + ROW_PEER_NUMBER = "peerNumber" + ROW_PHYPOOL = "phyPool" + ROW_POOL = "pool" + ROW_POOL_ID = "poolId" + ROW_PORT = "port" + ROW_READONLY = "readonly" + ROW_REASON = "reason" + ROW_RECOVERING = "recovering" + ROW_RECYCLABLE = "recyclable" + ROW_RECYCLE = "recycle" + ROW_RESULT = "result" + ROW_SCAN = "scan" + ROW_LASTSCAN = "last Scan" + ROW_LAST_SCAN_CONSISTENT = "last Scan Consistent" + ROW_SEGMENT = "segment" + ROW_SEGMENT_SIZE = "segmentSize" + ROW_SEQ = "seq" + ROW_SERVER = "server" + ROW_SIZE = "size" + ROW_START = "start" + ROW_STATE = "state" + ROW_STATUS = "status" + ROW_STRIPE = "stripe" + ROW_SUM_IN_DIR = "sumInDir" + ROW_TERM = "term" + ROW_THROTTLE = "throttle" + ROW_TOTAL = "total" + ROW_TYPE = "type" + ROW_USED = "used" + ROW_VERSION = "version" + ROW_ZONE = "zone" + ROW_AVAILFLAG = "availFlag" + ROW_DRYRUN = "dryrun" + + ROW_RW_STATUS = "rwStatus" + ROW_DISK_STATE = "diskState" + ROW_COPYSET_NUM = "copysetNum" + ROW_DISK_CAPACITY = "diskCapacity" + ROW_DISK_USED = "diskUsed" + ROW_UNHEALTHY_COPYSET = "unhealthyCopyset" + ROW_EXT_ADDR = "extAddr" // s3 ROW_S3CHUNKINFO_CHUNKID = "s3ChunkId" @@ -109,14 +131,22 @@ const ( ROW_S3CHUNKINFO_SIZE = "s3Size" // vale - ROW_VALUE_ADD = "add" - ROW_VALUE_DEL = "del" - ROW_VALUE_DNE = "DNE" - ROW_VALUE_OFFLINE = "offline" - ROW_VALUE_UNKNOWN = "unknown" - ROW_VALUE_SUCCESS = "success" - ROW_VALUE_FAILED = "failed" - ROW_VALUE_NULL = "null" + ROW_VALUE_ADD = "add" + ROW_VALUE_DEL = "del" + ROW_VALUE_DNE = "DNE" + ROW_VALUE_FAILED = "failed" + ROW_VALUE_LOGICAL = "logical" + ROW_VALUE_NO_RECOVERING = "" + ROW_VALUE_NO_VALUE = "-" + ROW_VALUE_NULL = "null" + ROW_VALUE_OFFLINE = "offline" + ROW_VALUE_ONLINE = "online" + ROW_VALUE_PHYSICAL = "physical" + ROW_VALUE_RECOVERING_OUT = "recovering from out" + ROW_VALUE_SUCCESS = "success" + ROW_VALUE_UNKNOWN = "unknown" + ROW_VALUE_TRUE = "true" + ROW_VALUE_FALSE = "false" ) // topology type diff --git a/tools-v2/internal/utils/string.go b/tools-v2/internal/utils/string.go index 151479ef79..92060c760a 100644 --- a/tools-v2/internal/utils/string.go +++ b/tools-v2/internal/utils/string.go @@ -38,18 +38,15 @@ import ( ) const ( - IP_PORT_REGEX = "((\\d|[1-9]\\d|1\\d{2}|2[0-4]\\d|25[0-5])\\.(\\d|[1-9]\\d|1\\d{2}|2[0-4]\\d|25[0-5])\\.(\\d|[1-9]\\d|1\\d{2}|2[0-4]\\d|25[0-5])\\.(\\d|[1-9]\\d|1\\d{2}|2[0-4]\\d|25[0-5]):([0-9]|[1-9]\\d{1,3}|[1-5]\\d{4}|6[0-4]\\d{4}|65[0-4]\\d{2}|655[0-2]\\d|6553[0-5]))|(\\d|[1-9]\\d|1\\d{2}|2[0-4]\\d|25[0-5])\\.(\\d|[1-9]\\d|1\\d{2}|2[0-4]\\d|25[0-5])\\.(\\d|[1-9]\\d|1\\d{2}|2[0-4]\\d|25[0-5])\\.(\\d|[1-9]\\d|1\\d{2}|2[0-4]\\d|25[0-5])" PATH_REGEX = `^(/[^/ ]*)+/?$` FS_NAME_REGEX = "^([a-z0-9]+\\-?)+$" -) + K_STRING_TRUE = "true" -func IsValidAddr(addr string) bool { - matched, err := regexp.MatchString(IP_PORT_REGEX, addr) - if err != nil || !matched { - return false - } - return true -} + ROOT_PATH = "/" + RECYCLEBIN_PATH = "/RecycleBin" + + FALSE_STRING = "false" +) func IsValidFsname(fsName string) bool { matched, err := regexp.MatchString(FS_NAME_REGEX, fsName) @@ -146,7 +143,7 @@ func ToUnderscoredName(src string) string { if i != 0 && !IsUpper(rune(src[i-1])) && ret[len(ret)-1] != '-' { ret += "_" } - ret += string(c-'A'+'a') + ret += string(c - 'A' + 'a') } else { ret += string(c) } @@ -174,3 +171,27 @@ func Addr2IpPort(addr string) (string, uint32, *cmderror.CmdError) { } return ipPort[0], uint32(u64Port), cmderror.Success() } + +func StringList2Uint64List(strList []string) ([]uint64, error) { + retList := make([]uint64, 0) + for _, str := range strList { + v, err := strconv.ParseUint(str, 10, 64) + if err != nil { + return nil, err + } + retList = append(retList, v) + } + return retList, nil +} + +func StringList2Uint32List(strList []string) ([]uint32, error) { + retList := make([]uint32, 0) + for _, str := range strList { + v, err := strconv.ParseUint(str, 10, 32) + if err != nil { + return nil, err + } + retList = append(retList, uint32(v)) + } + return retList, nil +} diff --git a/tools-v2/internal/utils/cobra.go b/tools-v2/internal/utils/template/cobra.go similarity index 56% rename from tools-v2/internal/utils/cobra.go rename to tools-v2/internal/utils/template/cobra.go index c8e211afbc..91241f5710 100644 --- a/tools-v2/internal/utils/cobra.go +++ b/tools-v2/internal/utils/template/cobra.go @@ -19,7 +19,7 @@ * Created Date: 2022-05-09 * Author: chengyi (Cyber-SiKu) */ -package cobrautil +package cobratemplate import ( "fmt" @@ -27,7 +27,9 @@ import ( "github.com/docker/cli/cli" "github.com/docker/cli/cli/command" "github.com/moby/term" + "github.com/opencurve/curve/tools-v2/pkg/config" "github.com/spf13/cobra" + "github.com/spf13/pflag" ) var ( @@ -62,18 +64,26 @@ Commands: {{- end}} {{- end}} -{{- if .HasAvailableFlags}} +{{- if .HasAvailableLocalFlags}} Flags: -{{ wrappedFlagUsages . | trimRightSpace}} - +{{ wrapLocalFlagUsages . | trimRightSpace}} {{- end}} +{{- if .HasAvailableInheritedFlags}} +Global Flags: +{{ wrapInheritedFlagUsages . | trimRightSpace}} +{{- end}} {{- if .HasExample}} Examples: {{ .Example }} +{{ else if not .HasSubCommands}} + +Examples: +{{ genExample .}} + {{- end}} {{- if .HasSubCommands }} @@ -97,12 +107,28 @@ func hasSubCommands(cmd *cobra.Command) bool { return len(subCommands(cmd)) > 0 } -func wrappedFlagUsages(cmd *cobra.Command) string { +// func wrappedFlagUsages(cmd *cobra.Command) string { +// width := 80 +// if ws, err := term.GetWinsize(0); err == nil { +// width = int(ws.Width) +// } +// return cmd.Flags().FlagUsagesWrapped(width - 1) +// } + +func wrapLocalFlagUsages(cmd *cobra.Command) string { width := 80 if ws, err := term.GetWinsize(0); err == nil { width = int(ws.Width) } - return cmd.Flags().FlagUsagesWrapped(width - 1) + return cmd.LocalFlags().FlagUsagesWrapped(width - 1) +} + +func wrapInheritedFlagUsages(cmd *cobra.Command) string { + width := 80 + if ws, err := term.GetWinsize(0); err == nil { + width = int(ws.Width) + } + return cmd.InheritedFlags().FlagUsagesWrapped(width - 1) } func SetFlagErrorFunc(cmd *cobra.Command) { @@ -119,9 +145,62 @@ func SetHelpTemplate(cmd *cobra.Command) { cmd.SetHelpTemplate(helpTemplate) } +type cmdType int + +const ( + BSNAME = "bs" + FSNAME = "fs" + Unknown cmdType = iota + RootCmd + BsCmd + FsCmd +) + +// return the type of command (bs or fs or root) +func GetCmdType(cmd *cobra.Command) cmdType { + if !cmd.HasParent() { + return RootCmd + } + if cmd.Parent().HasParent() { + return GetCmdType(cmd.Parent()) + } + switch cmd.Name() { + case BSNAME: + return BsCmd + case FSNAME: + return FsCmd + default: + return Unknown + } +} + +func genExample(cmd *cobra.Command) string { + ret := cmd.CommandPath() + if cmd.HasLocalFlags() { + lFlags := cmd.LocalFlags() + lFlags.VisitAll(func(flag *pflag.Flag) { + required := flag.Annotations[cobra.BashCompOneRequiredFlag] + if len(required) > 0 && required[0] == "true" { + ret += fmt.Sprintf(" --%s %v", flag.Name, AvailableValueStr(flag, GetCmdType(cmd))) + } + }) + } + return ret +} + func SetUsageTemplate(cmd *cobra.Command) { cobra.AddTemplateFunc("subCommands", subCommands) cobra.AddTemplateFunc("hasSubCommands", hasSubCommands) - cobra.AddTemplateFunc("wrappedFlagUsages", wrappedFlagUsages) + cobra.AddTemplateFunc("wrapLocalFlagUsages", wrapLocalFlagUsages) + cobra.AddTemplateFunc("wrapInheritedFlagUsages", wrapInheritedFlagUsages) + cobra.AddTemplateFunc("genExample", genExample) cmd.SetUsageTemplate(usageTemplate) } + +func AvailableValueStr(flag *pflag.Flag, cmdtype cmdType) string { + switch cmdtype { + case BsCmd: + return config.BsAvailableValueStr(flag.Name) + } + return "" +} diff --git a/tools-v2/pkg/cli/command/base.go b/tools-v2/pkg/cli/command/base.go index eaaf8a5ded..ccc6c6a8e0 100644 --- a/tools-v2/pkg/cli/command/base.go +++ b/tools-v2/pkg/cli/command/base.go @@ -36,6 +36,7 @@ import ( cmderror "github.com/opencurve/curve/tools-v2/internal/error" cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" process "github.com/opencurve/curve/tools-v2/internal/utils/process" + cobratemplate "github.com/opencurve/curve/tools-v2/internal/utils/template" config "github.com/opencurve/curve/tools-v2/pkg/config" "github.com/spf13/cobra" "google.golang.org/grpc" @@ -112,6 +113,9 @@ func NewFinalCurveCli(cli *FinalCurveCmd, funcs FinalCurveCmdFunc) *cobra.Comman RunE: func(cmd *cobra.Command, args []string) error { cmd.SilenceUsage = true err := funcs.Init(cmd, args) + if cli.Cmd.Flag(config.VERBOSE) == nil { + cli.Cmd.PersistentFlags().BoolP(config.VERBOSE, "v", false, "verbose output") + } show := config.GetFlagBool(cli.Cmd, config.VERBOSE) process.SetShow(show) if err != nil { @@ -127,7 +131,7 @@ func NewFinalCurveCli(cli *FinalCurveCmd, funcs FinalCurveCmdFunc) *cobra.Comman } config.AddFormatFlag(cli.Cmd) funcs.AddFlags() - cobrautil.SetFlagErrorFunc(cli.Cmd) + cobratemplate.SetFlagErrorFunc(cli.Cmd) // set table cli.TableNew = tablewriter.NewWriter(os.Stdout) @@ -143,7 +147,7 @@ func NewMidCurveCli(cli *MidCurveCmd, add MidCurveCmdFunc) *cobra.Command { cli.Cmd = &cobra.Command{ Use: cli.Use, Short: cli.Short, - Args: cobrautil.NoArgs, + Args: cobratemplate.NoArgs, } add.AddSubCommands() return cli.Cmd @@ -199,7 +203,7 @@ func QueryMetric(m *Metric) (string, *cmderror.CmdError) { } } retErr := cmderror.MergeCmdError(vecErrs) - return retStr, &retErr + return retStr, retErr } func GetMetricValue(metricRet string) (string, *cmderror.CmdError) { @@ -293,7 +297,7 @@ func GetRpcResponse(rpc *Rpc, rpcFunc RpcFunc) (interface{}, *cmderror.CmdError) results := make(chan Result, size) for _, addr := range rpc.Addrs { go func(address string) { - log.Printf("%s: start to dial", address) + log.Printf("%s: start to dial [%s]", address, rpc.RpcFuncName) ctx, cancel := context.WithTimeout(context.Background(), rpc.RpcTimeout) defer cancel() conn, err := grpc.DialContext(ctx, address, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock()) @@ -348,12 +352,13 @@ func GetRpcResponse(rpc *Rpc, rpcFunc RpcFunc) (interface{}, *cmderror.CmdError) } if len(vecErrs) >= len(rpc.Addrs) { retErr := cmderror.MergeCmdError(vecErrs) - return ret, &retErr + return ret, retErr } return ret, cmderror.ErrSuccess() } type RpcResult struct { + position int Response interface{} Error *cmderror.CmdError } @@ -367,21 +372,21 @@ func GetRpcListResponse(rpcList []*Rpc, rpcFunc []RpcFunc) ([]interface{}, []*cm size := 0 for i := range rpcList { size++ - go func(rpc *Rpc, rpcFunc RpcFunc) { + go func(position int, rpc *Rpc, rpcFunc RpcFunc) { res, err := GetRpcResponse(rpc, rpcFunc) - results <- RpcResult{res, err} - }(rpcList[i], rpcFunc[i]) + results <- RpcResult{position, res, err} + }(i, rpcList[i], rpcFunc[i]) } count := 0 - var retRes []interface{} + retRes := make([]interface{}, len(rpcList)) var vecErrs []*cmderror.CmdError for res := range results { if res.Error.TypeCode() != cmderror.CODE_SUCCESS { // get fail vecErrs = append(vecErrs, res.Error) } else { - retRes = append(retRes, res.Response) + retRes[res.position] = res.Response } count++ diff --git a/tools-v2/pkg/cli/command/curvebs/bs.go b/tools-v2/pkg/cli/command/curvebs/bs.go index deb52faa85..3fa7fa86c6 100644 --- a/tools-v2/pkg/cli/command/curvebs/bs.go +++ b/tools-v2/pkg/cli/command/curvebs/bs.go @@ -26,10 +26,13 @@ import ( "github.com/spf13/cobra" basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/check" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/clean_recycle" "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/create" "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/delete" "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/list" "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/query" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/snapshot" "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/status" "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/update" ) @@ -48,6 +51,9 @@ func (bsCmd *CurveBsCommand) AddSubCommands() { delete.NewDeleteCommand(), create.NewCreateCmd(), update.NewUpdateCommand(), + clean_recycle.NewCleanRecycleCommand(), + check.NewCheckCommand(), + snapshot.NewSnapshotCommand(), ) } diff --git a/tools-v2/pkg/cli/command/curvebs/check/check.go b/tools-v2/pkg/cli/command/curvebs/check/check.go new file mode 100644 index 0000000000..3b94148ed1 --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/check/check.go @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2022 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: CurveCli + * Created Date: 2023-04-24 + * Author: baytan0720 + */ + +package check + +import ( + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/check/copyset" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/check/operator" + "github.com/spf13/cobra" +) + +type CheckCommand struct { + basecmd.MidCurveCmd +} + +var _ basecmd.MidCurveCmdFunc = (*CheckCommand)(nil) // check interface + +func (checkCmd *CheckCommand) AddSubCommands() { + checkCmd.Cmd.AddCommand( + copyset.NewCopysetCommand(), + operator.NewOperatorCommand(), + ) +} + +func NewCheckCommand() *cobra.Command { + checkCmd := &CheckCommand{ + basecmd.MidCurveCmd{ + Use: "check", + Short: "checkout the health of resources in curvebs", + }, + } + return basecmd.NewMidCurveCli(&checkCmd.MidCurveCmd, checkCmd) +} diff --git a/tools-v2/pkg/cli/command/curvebs/check/copyset/copyset.go b/tools-v2/pkg/cli/command/curvebs/check/copyset/copyset.go new file mode 100644 index 0000000000..2f5afc3fb4 --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/check/copyset/copyset.go @@ -0,0 +1,371 @@ +/* + * Copyright (c) 2022 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: CurveCli + * Created Date: 2023-04-24 + * Author: baytan0720 + */ + +package copyset + +import ( + "fmt" + "sort" + "strings" + "sync" + "time" + + mapset "github.com/deckarep/golang-set/v2" + "github.com/olekukonko/tablewriter" + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/query/chunkserver" + status "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/query/copyset" + fscopyset "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvefs/check/copyset" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/opencurve/curve/tools-v2/proto/proto/common" + "github.com/opencurve/curve/tools-v2/proto/proto/heartbeat" + "github.com/spf13/cobra" +) + +const ( + copysetExample = `$ curve bs check copyset --copysetid 1 --logicalpoolid 1` +) + +type CopysetCommand struct { + basecmd.FinalCurveCmd + key2Copyset *map[uint64]*cobrautil.BsCopysetInfoStatus + Key2LeaderInfo *map[uint64]*fscopyset.CopysetLeaderInfo + Key2Health *map[uint64]cobrautil.ClUSTER_HEALTH_STATUS + leaderAddr mapset.Set[string] +} + +var _ basecmd.FinalCurveCmdFunc = (*CopysetCommand)(nil) // check interface + +func NewCopysetCommand() *cobra.Command { + return NewCheckCopysetCommand().Cmd +} + +func NewCheckCopysetCommand() *CopysetCommand { + copysetCmd := &CopysetCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{ + Use: "copyset", + Short: "check copysets health in curvebs", + Example: copysetExample, + }, + } + basecmd.NewFinalCurveCli(©setCmd.FinalCurveCmd, copysetCmd) + return copysetCmd +} + +func (cCmd *CopysetCommand) AddFlags() { + config.AddBsMdsFlagOption(cCmd.Cmd) + config.AddRpcRetryTimesFlag(cCmd.Cmd) + config.AddRpcTimeoutFlag(cCmd.Cmd) + + config.AddBsCopysetIdSliceRequiredFlag(cCmd.Cmd) + config.AddBsLogicalPoolIdSliceRequiredFlag(cCmd.Cmd) + config.AddBsMarginOptionFlag(cCmd.Cmd) +} + +func (cCmd *CopysetCommand) Init(cmd *cobra.Command, args []string) error { + timeout := config.GetFlagDuration(cCmd.Cmd, config.RPCTIMEOUT) + + logicalpoolidList := config.GetBsFlagStringSlice(cCmd.Cmd, config.CURVEBS_LOGIC_POOL_ID) + copysetidList := config.GetBsFlagStringSlice(cCmd.Cmd, config.CURVEBS_COPYSET_ID) + if len(logicalpoolidList) != len(copysetidList) { + return fmt.Errorf("the number of logicpoolid and copysetid is not equal") + } + logicalpoolIds, errParse := cobrautil.StringList2Uint32List(logicalpoolidList) + if errParse != nil { + return fmt.Errorf("parse logicalpoolid%v fail", logicalpoolidList) + } + copysetIds, errParse := cobrautil.StringList2Uint32List(copysetidList) + if errParse != nil { + return fmt.Errorf("parse copysetid%v fail", copysetidList) + } + + key2Location, err := chunkserver.GetChunkServerListInCopySets(cCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + config.AddBsPeersConfFlag(cCmd.Cmd) + for i := 0; i < len(logicalpoolIds); i++ { + logicpoolid := logicalpoolIds[i] + copysetid := copysetIds[i] + key := cobrautil.GetCopysetKey(uint64(logicpoolid), uint64(copysetid)) + + var peerAddress []string + for _, cs := range (*key2Location)[key] { + address := fmt.Sprintf("%s:%d", *cs.HostIp, *cs.Port) + peerAddress = append(peerAddress, address) + } + config.ResetStringSliceFlag(cCmd.Cmd.Flag(config.CURVEBS_LOGIC_POOL_ID), logicalpoolidList[i]) + config.ResetStringSliceFlag(cCmd.Cmd.Flag(config.CURVEBS_COPYSET_ID), copysetidList[i]) + cCmd.Cmd.ParseFlags([]string{ + fmt.Sprintf("--%s", config.CURVEBS_PEERS_ADDRESS), + strings.Join(peerAddress, ","), + }) + peer2Status, err := status.GetCopysetStatus(cCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + peers := make([]*common.Peer, 0, len(*peer2Status)) + var leaderPeer *common.Peer + for _, result := range *peer2Status { + if result != nil && result.Leader != nil && result.Peer != nil && + result.Leader.Address != nil && result.Peer.Address != nil { + if *result.Leader.Address == *result.Peer.Address { + leaderPeer = result.Peer + } + peers = append(peers, result.Peer) + } else { + peers = append(peers, nil) + } + } + + copysetKey := cobrautil.GetCopysetKey(uint64(logicpoolid), uint64(copysetid)) + if cCmd.key2Copyset == nil { + key2copyset := make(map[uint64]*cobrautil.BsCopysetInfoStatus) + cCmd.key2Copyset = &key2copyset + } + + (*cCmd.key2Copyset)[copysetKey] = &cobrautil.BsCopysetInfoStatus{ + Info: &heartbeat.CopySetInfo{ + CopysetId: ©setid, + Peers: peers, + LeaderPeer: leaderPeer, + }, + Peer2Status: *peer2Status, + } + } + + header := []string{cobrautil.ROW_COPYSET_KEY, cobrautil.ROW_COPYSET_ID, + cobrautil.ROW_POOL_ID, cobrautil.ROW_STATUS, cobrautil.ROW_LOG_GAP, + cobrautil.ROW_EXPLAIN, + } + cCmd.SetHeader(header) + cCmd.TableNew.SetAutoMergeCellsByColumnIndex(cobrautil.GetIndexSlice( + cCmd.Header, []string{cobrautil.ROW_COPYSET_ID, cobrautil.ROW_POOL_ID, + cobrautil.ROW_STATUS, + }, + )) + + // update leaderAddr + cCmd.leaderAddr = mapset.NewSet[string]() + for _, cs := range *cCmd.key2Copyset { + addr, err := cobrautil.PeerAddressToAddr(cs.Info.LeaderPeer.GetAddress()) + if err.TypeCode() != cmderror.CODE_SUCCESS { + err := cmderror.ErrCopysetInfo() + err.Format(cs.Info.CopysetId) + } else { + cCmd.leaderAddr.Add(addr) + } + } + + key2LeaderInfo := make(map[uint64]*fscopyset.CopysetLeaderInfo) + cCmd.Key2LeaderInfo = &key2LeaderInfo + err = cCmd.UpdateCopysteGap(timeout) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + return nil +} + +func (cCmd *CopysetCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&cCmd.FinalCurveCmd, cCmd) +} + +func (cCmd *CopysetCommand) RunCommand(cmd *cobra.Command, args []string) error { + key2Health := make(map[uint64]cobrautil.ClUSTER_HEALTH_STATUS) + cCmd.Key2Health = &key2Health + rows := make([]map[string]string, 0) + var errs []*cmderror.CmdError + for k, v := range *cCmd.key2Copyset { + copysetHealthCount := make(map[cobrautil.COPYSET_HEALTH_STATUS]uint32) + row := make(map[string]string) + row[cobrautil.ROW_COPYSET_KEY] = fmt.Sprintf("%d", k) + poolid, copysetid := cobrautil.CopysetKey2PoolidCopysetid(k) + row[cobrautil.ROW_POOL_ID] = fmt.Sprintf("%d", poolid) + row[cobrautil.ROW_COPYSET_ID] = fmt.Sprintf("%d", copysetid) + if v == nil { + row[cobrautil.ROW_STATUS] = cobrautil.CopysetHealthStatus_Str[int32(cobrautil.COPYSET_NOTEXIST)] + } else { + status, errsCheck := cobrautil.CheckBsCopySetHealth(v) + copysetHealthCount[status]++ + row[cobrautil.ROW_STATUS] = cobrautil.CopysetHealthStatus_Str[int32(status)] + explain := "" + if status != cobrautil.COPYSET_OK { + for i, e := range errsCheck { + if i != len(errsCheck) { + explain += fmt.Sprintf("%s\n", e.Message) + } else { + explain += e.Message + } + } + } + margin := config.GetMarginOptionFlag(cCmd.Cmd) + leaderInfo := (*cCmd.Key2LeaderInfo)[k] + if leaderInfo == nil { + explain = "no leader peer" + copysetHealthCount[cobrautil.COPYSET_ERROR]++ + row[cobrautil.ROW_STATUS] = cobrautil.CopysetHealthStatus_Str[int32(cobrautil.COPYSET_ERROR)] + } else if leaderInfo.Snapshot { + installSnapshot := "installing snapshot" + if len(explain) > 0 { + explain += "\n" + } + explain += installSnapshot + if row[cobrautil.ROW_STATUS] == cobrautil.COPYSET_OK_STR { + row[cobrautil.ROW_STATUS] = cobrautil.COPYSET_WARN_STR + copysetHealthCount[cobrautil.COPYSET_WARN]++ + } + } else { + gap := leaderInfo.Gap + row[cobrautil.ROW_LOG_GAP] = fmt.Sprintf("%d", gap) + if gap >= margin { + behind := fmt.Sprintf("log behind %d", gap) + if len(explain) > 0 { + explain += "\n" + } + explain += behind + if row[cobrautil.ROW_STATUS] == cobrautil.COPYSET_OK_STR { + row[cobrautil.ROW_STATUS] = cobrautil.COPYSET_WARN_STR + copysetHealthCount[cobrautil.COPYSET_WARN]++ + } + } + + } + row[cobrautil.ROW_EXPLAIN] = explain + } + if copysetHealthCount[cobrautil.COPYSET_NOTEXIST] > 0 || copysetHealthCount[cobrautil.COPYSET_ERROR] > 0 { + (*cCmd.Key2Health)[k] = cobrautil.HEALTH_ERROR + } else if copysetHealthCount[cobrautil.COPYSET_WARN] > 0 { + (*cCmd.Key2Health)[k] = cobrautil.HEALTH_WARN + } else { + (*cCmd.Key2Health)[k] = cobrautil.HEALTH_OK + } + rows = append(rows, row) + } + retErr := cmderror.MergeCmdErrorExceptSuccess(errs) + cCmd.Error = retErr + sort.Slice(rows, func(i, j int) bool { + return rows[i][cobrautil.ROW_COPYSET_KEY] < rows[j][cobrautil.ROW_COPYSET_KEY] + }) + list := cobrautil.ListMap2ListSortByKeys(rows, cCmd.Header, []string{ + cobrautil.ROW_POOL_ID, cobrautil.ROW_STATUS, cobrautil.ROW_COPYSET_ID, + }) + cCmd.TableNew.AppendBulk(list) + cCmd.Result = rows + return nil +} + +func (cCmd *CopysetCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&cCmd.FinalCurveCmd) +} + +func (cCmd *CopysetCommand) UpdateCopysetGap(timeout time.Duration) *cmderror.CmdError { + var key2LeaderInfo sync.Map + size := config.MaxChannelSize() + errChan := make(chan *cmderror.CmdError, size) + count := 0 + for iter := range cCmd.leaderAddr.Iter() { + go func(addr string) { + err := fscopyset.GetLeaderCopysetGap(addr, &key2LeaderInfo, timeout) + errChan <- err + }(iter) + count++ + } + var errs []*cmderror.CmdError + for i := 0; i < count; i++ { + err := <-errChan + if err.TypeCode() != cmderror.CODE_SUCCESS { + errs = append(errs, err) + } + } + key2LeaderInfo.Range(func(key, value interface{}) bool { + (*cCmd.Key2LeaderInfo)[key.(uint64)] = value.(*fscopyset.CopysetLeaderInfo) + return true + }) + retErr := cmderror.MergeCmdErrorExceptSuccess(errs) + return retErr +} + +func (cCmd *CopysetCommand) UpdateCopysteGap(timeout time.Duration) *cmderror.CmdError { + var key2LeaderInfo sync.Map + size := config.MaxChannelSize() + errChan := make(chan *cmderror.CmdError, size) + count := 0 + for iter := range cCmd.leaderAddr.Iter() { + go func(addr string) { + err := fscopyset.GetLeaderCopysetGap(addr, &key2LeaderInfo, timeout) + errChan <- err + }(iter) + count++ + } + var errs []*cmderror.CmdError + for i := 0; i < count; i++ { + err := <-errChan + if err.TypeCode() != cmderror.CODE_SUCCESS { + errs = append(errs, err) + } + } + key2LeaderInfo.Range(func(key, value interface{}) bool { + (*cCmd.Key2LeaderInfo)[key.(uint64)] = value.(*fscopyset.CopysetLeaderInfo) + return true + }) + retErr := cmderror.MergeCmdErrorExceptSuccess(errs) + return retErr +} + +func CheckCopysets(caller *cobra.Command) (*map[uint64]cobrautil.ClUSTER_HEALTH_STATUS, *cmderror.CmdError) { + cCmd := NewCheckCopysetCommand() + cCmd.Cmd.SetArgs([]string{fmt.Sprintf("--%s", config.FORMAT), config.FORMAT_NOOUT}) + config.AlignFlagsValue(caller, cCmd.Cmd, []string{ + config.RPCRETRYTIMES, config.RPCTIMEOUT, config.CURVEBS_MDSADDR, + config.CURVEBS_COPYSET_ID, config.CURVEBS_LOGIC_POOL_ID, config.CURVEBS_MARGIN, + }) + cCmd.Cmd.SilenceErrors = true + cCmd.Cmd.SilenceUsage = true + err := cCmd.Cmd.Execute() + if err != nil { + retErr := cmderror.ErrCheckCopyset() + retErr.Format(err.Error()) + return cCmd.Key2Health, retErr + } + return cCmd.Key2Health, cmderror.Success() +} + +func GetCopysetsStatus(caller *cobra.Command) (*tablewriter.Table, *cmderror.CmdError) { + cCmd := NewCheckCopysetCommand() + cCmd.Cmd.SetArgs([]string{fmt.Sprintf("--%s", config.FORMAT), config.FORMAT_NOOUT}) + config.AlignFlagsValue(caller, cCmd.Cmd, []string{ + config.RPCRETRYTIMES, config.RPCTIMEOUT, config.CURVEBS_MDSADDR, + config.CURVEBS_COPYSET_ID, config.CURVEBS_LOGIC_POOL_ID, config.CURVEBS_MARGIN, + }) + cCmd.Cmd.SilenceErrors = true + cCmd.Cmd.SilenceUsage = true + err := cCmd.Cmd.Execute() + if err != nil { + retErr := cmderror.ErrCheckCopyset() + retErr.Format(err.Error()) + return nil, retErr + } + return cCmd.TableNew, cCmd.Error +} diff --git a/tools-v2/pkg/cli/command/curvebs/check/operator/operator.go b/tools-v2/pkg/cli/command/curvebs/check/operator/operator.go new file mode 100644 index 0000000000..61c9c89914 --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/check/operator/operator.go @@ -0,0 +1,135 @@ +/* +* Copyright (c) 2023 NetEase Inc. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ +/* +* Project: curve +* Created Date: 2023-05-05 +* Author: chengyi01 + */ + +package operator + +import ( + "strconv" + "time" + + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/spf13/cobra" +) + +const ( + operatorExample = `$ curve bs check operator --op=operator|change_peer|add_peer|remove_peer|transfer_leader` +) + +const ( + CHECK_SLEEP_TIME = 1 * time.Second +) + +type OperatorCommand struct { + basecmd.FinalCurveCmd + metric *basecmd.Metric + checkTime time.Duration + row map[string]string +} + +var _ basecmd.FinalCurveCmdFunc = (*OperatorCommand)(nil) // check interface + +func NewOperatorCommand() *cobra.Command { + return NewCheckOperatorCommand().Cmd +} + +func (oCmd *OperatorCommand) AddFlags() { + config.AddHttpTimeoutFlag(oCmd.Cmd) + config.AddBsMdsFlagOption(oCmd.Cmd) + config.AddBsOpRequiredFlag(oCmd.Cmd) + config.AddBsCheckTimeOptionFlag(oCmd.Cmd) +} + +func (oCmd *OperatorCommand) Init(cmd *cobra.Command, args []string) error { + opName := config.GetBsFlagString(cmd, config.CURVEBS_OP) + opType, cmdErr := cobrautil.SupportOpName(opName) + if cmdErr.TypeCode() != cmderror.CODE_SUCCESS { + return cmdErr.ToError() + } + if oCmd.Cmd.Flags().Changed(config.CURVEBS_CHECK_TIME) { + oCmd.checkTime = config.GetBsFlagDuration(cmd, config.CURVEBS_CHECK_TIME) + } else { + oCmd.checkTime = cobrautil.GetDefaultCheckTime(opType) + } + addrs, cmdErr := config.GetBsMdsAddrSlice(oCmd.Cmd) + if cmdErr.TypeCode() != cmderror.CODE_SUCCESS { + return cmdErr.ToError() + } + timeout := config.GetBsFlagDuration(oCmd.Cmd, config.HTTPTIMEOUT) + subUri := cobrautil.GetOpNumSubUri(opName) + oCmd.metric = basecmd.NewMetric(addrs, subUri, timeout) + + oCmd.SetHeader([]string{cobrautil.ROW_OPNAME, cobrautil.ROW_NUM}) + oCmd.row = make(map[string]string) + oCmd.row[cobrautil.ROW_OPNAME] = opName + + return nil +} + +func (oCmd *OperatorCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&oCmd.FinalCurveCmd, oCmd) +} + +func (oCmd *OperatorCommand) RunCommand(cmd *cobra.Command, args []string) error { + start := time.Now() + for { + res, metricErr := basecmd.QueryMetric(oCmd.metric) + if metricErr.TypeCode() != cmderror.CODE_SUCCESS { + return metricErr.ToError() + } + resValueStr, metricErr := basecmd.GetMetricValue(res) + if metricErr.TypeCode() != cmderror.CODE_SUCCESS { + return metricErr.ToError() + } + resValue, err := strconv.ParseUint(resValueStr, 10, 64) + if err != nil { + metricErr := cmderror.ErrParseMetric() + metricErr.Format(resValueStr) + return metricErr.ToError() + } + oCmd.row[cobrautil.ROW_NUM] = resValueStr + if resValue != 0 || time.Since(start) >= oCmd.checkTime { + break + } + time.Sleep(CHECK_SLEEP_TIME) + } + list := cobrautil.Map2List(oCmd.row, oCmd.Header) + oCmd.TableNew.Append(list) + return nil +} + +func (oCmd *OperatorCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&oCmd.FinalCurveCmd) +} + +func NewCheckOperatorCommand() *OperatorCommand { + operatorCmd := &OperatorCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{ + Use: "operator", + Short: "check the operators", + // Example: operatorExample, + }} + basecmd.NewFinalCurveCli(&operatorCmd.FinalCurveCmd, operatorCmd) + return operatorCmd +} diff --git a/tools-v2/pkg/cli/command/curvebs/clean_recycle/clean_recycle.go b/tools-v2/pkg/cli/command/curvebs/clean_recycle/clean_recycle.go new file mode 100644 index 0000000000..dad824d9bf --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/clean_recycle/clean_recycle.go @@ -0,0 +1,134 @@ +/* + * Project: tools-v2 + * Created Date: 2023-4-6 + * Author: nanguanlin6@gmail.com + */ + +package clean_recycle + +import ( + "strings" + "time" + + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/delete/file" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/list/dir" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/opencurve/curve/tools-v2/proto/proto/nameserver2" + "github.com/spf13/cobra" +) + +const ( + cleanRecycleBinExample = `$ curve bs clean-recycle --expiredtime=1h --recycleprefix=/test` + RECYCLEBINDIR = "/RecycleBin" +) + +// CleanRecycleCommand +type CleanRecycleCommand struct { + basecmd.FinalCurveCmd + recyclePrefix string + expireTime time.Duration +} + +var _ basecmd.FinalCurveCmdFunc = (*CleanRecycleCommand)(nil) // check interface + +// new CleanRecycleCommand function +func NewCleanRecycleCommand() *cobra.Command { + crCmd := &CleanRecycleCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{ + Use: "clean-recycle", + Short: "clean recycle bin", + Example: cleanRecycleBinExample, + }, + } + return basecmd.NewFinalCurveCli(&crCmd.FinalCurveCmd, crCmd) +} + +// method of CleanRecycleCommand struct +func (crCmd *CleanRecycleCommand) Init(cmd *cobra.Command, args []string) error { + crCmd.recyclePrefix = config.GetBsRecyclePrefix(crCmd.Cmd) + crCmd.expireTime = config.GetBsExpireTime(crCmd.Cmd) + header := []string{cobrautil.ROW_RESULT} + crCmd.SetHeader(header) + crCmd.TableNew.SetAutoMergeCellsByColumnIndex(cobrautil.GetIndexSlice( + crCmd.Header, header, + )) + return nil +} + +func (crCmd *CleanRecycleCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&crCmd.FinalCurveCmd, crCmd) +} + +func (crCmd *CleanRecycleCommand) RunCommand(cmd *cobra.Command, args []string) error { + // Get the file infos in recycle bin + crCmd.Cmd.Flags().Set(config.CURVEBS_PATH, RECYCLEBINDIR) + resp, err := dir.ListDir(crCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + crCmd.Error = err + crCmd.Result = cobrautil.ROW_VALUE_FAILED + return err.ToError() + } + + // Define the needDelete function + needDelete := func(fileInfo *nameserver2.FileInfo, now time.Time, expireTime time.Duration) bool { + createTime := time.Unix(int64(fileInfo.GetCtime()/1000000), 0) + return createTime.Add(expireTime).Before(now) + } + + // Iterate through files and delete if necessary + now := time.Now() + + var errs []*cmderror.CmdError + infos := resp.GetFileInfo() + for _, fileInfo := range infos { + originPath := fileInfo.GetOriginalFullPathName() + if !strings.HasPrefix(originPath, crCmd.recyclePrefix) || !needDelete(fileInfo, now, crCmd.expireTime) { + continue + } + + filename := RECYCLEBINDIR + "/" + fileInfo.GetFileName() + crCmd.Cmd.Flags().Set(config.CURVEBS_PATH, filename) + crCmd.Cmd.Flags().Set(config.CURVEBS_FORCE, cobrautil.K_STRING_TRUE) + deleteResult, err := file.DeleteFile(crCmd.Cmd) + if deleteResult.GetStatusCode() != nameserver2.StatusCode_kOK { + errs = append(errs, err) + continue + } + } + + if len(errs) != 0 { + crCmd.Result = cobrautil.ROW_VALUE_FAILED + crCmd.Error = cmderror.MergeCmdError(errs) + return crCmd.Error.ToError() + } + + out := make(map[string]string) + out[cobrautil.ROW_RESULT] = cobrautil.ROW_VALUE_SUCCESS + list := cobrautil.Map2List(out, []string{cobrautil.ROW_RESULT}) + crCmd.TableNew.Append(list) + + crCmd.Result = out + crCmd.Error = cmderror.ErrSuccess() + return nil +} + +func (crCmd *CleanRecycleCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&crCmd.FinalCurveCmd) +} + +func (crCmd *CleanRecycleCommand) AddFlags() { + config.AddBsMdsFlagOption(crCmd.Cmd) + config.AddRpcRetryTimesFlag(crCmd.Cmd) + config.AddRpcTimeoutFlag(crCmd.Cmd) + config.AddBsUserOptionFlag(crCmd.Cmd) + config.AddBsPasswordOptionFlag(crCmd.Cmd) + + config.AddBsForceDeleteOptionFlag(crCmd.Cmd) + config.AddBsPathOptionFlag(crCmd.Cmd) + config.AddBsRecyclePrefixOptionFlag(crCmd.Cmd) + config.AddBsExpireTimeOptionFlag(crCmd.Cmd) +} diff --git a/tools-v2/pkg/cli/command/curvebs/create/create.go b/tools-v2/pkg/cli/command/curvebs/create/create.go index 35e22b9679..947385d113 100644 --- a/tools-v2/pkg/cli/command/curvebs/create/create.go +++ b/tools-v2/pkg/cli/command/curvebs/create/create.go @@ -25,6 +25,8 @@ package create import ( basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/create/cluster" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/create/dir" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/create/file" "github.com/spf13/cobra" ) @@ -37,6 +39,8 @@ var _ basecmd.MidCurveCmdFunc = (*CreateCmd)(nil) // check interface func (createCmd *CreateCmd) AddSubCommands() { createCmd.Cmd.AddCommand( cluster.NewClusterTopoCmd(), + dir.NewDirectoryCommand(), + file.NewFileCommand(), ) } diff --git a/tools-v2/pkg/cli/command/curvebs/create/dir/dir.go b/tools-v2/pkg/cli/command/curvebs/create/dir/dir.go new file mode 100644 index 0000000000..f91e04d286 --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/create/dir/dir.go @@ -0,0 +1,96 @@ +/* +* Copyright (c) 2023 NetEase Inc. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ +/* +* Project: curve +* Created Date: 2023-04-14 +* Author: chengyi01 + */ +package dir + +import ( + "fmt" + + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/create/file" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/spf13/cobra" +) + +const ( + dirExample = `$ curve bs create dir --path /test` +) + +type DirectoryCommand struct { + basecmd.FinalCurveCmd +} + +var _ basecmd.FinalCurveCmdFunc = (*DirectoryCommand)(nil) + + +func (dCmd *DirectoryCommand) Init(cmd *cobra.Command, args []string) error { + config.AddBsFileTypeRequiredFlag(dCmd.Cmd) + dCmd.Cmd.ParseFlags([]string{ + fmt.Sprintf("--%s", config.CURVEBS_TYPE), + fmt.Sprint(cobrautil.TYPE_DIR), + }) + dCmd.SetHeader([]string{cobrautil.ROW_RESULT}) + return nil +} + +func (dCmd *DirectoryCommand) RunCommand(cmd *cobra.Command, args []string) error { + dCmd.Result, dCmd.Error = file.CreateFileOrDirectory(dCmd.Cmd) + if dCmd.Error.TypeCode() != cmderror.CODE_SUCCESS { + return dCmd.Error.ToError() + } + dCmd.TableNew.Append([]string{dCmd.Error.Message}) + return nil +} + +func (dCmd *DirectoryCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&dCmd.FinalCurveCmd, dCmd) +} + +func (dCmd *DirectoryCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&dCmd.FinalCurveCmd) +} + +func (dCmd *DirectoryCommand) AddFlags() { + config.AddBsMdsFlagOption(dCmd.Cmd) + config.AddRpcTimeoutFlag(dCmd.Cmd) + config.AddRpcRetryTimesFlag(dCmd.Cmd) + config.AddBsUserOptionFlag(dCmd.Cmd) + config.AddBsPasswordOptionFlag(dCmd.Cmd) + config.AddBsPathRequiredFlag(dCmd.Cmd) +} + +func NewCreateDirectoryCommand() *DirectoryCommand { + dCmd := &DirectoryCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{ + Use: "dir", + Short: "create directory in curvebs cluster", + Example: dirExample, + }, + } + basecmd.NewFinalCurveCli(&dCmd.FinalCurveCmd, dCmd) + return dCmd +} + +func NewDirectoryCommand() *cobra.Command { + return NewCreateDirectoryCommand().Cmd +} diff --git a/tools-v2/pkg/cli/command/curvebs/create/file/create.go b/tools-v2/pkg/cli/command/curvebs/create/file/create.go new file mode 100644 index 0000000000..2b7338042d --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/create/file/create.go @@ -0,0 +1,180 @@ +/* +* Copyright (c) 2023 NetEase Inc. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ +/* +* Project: curve +* Created Date: 2023-04-14 +* Author: chengyi01 + */ + +package file + +import ( + "context" + "fmt" + + "github.com/dustin/go-humanize" + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/opencurve/curve/tools-v2/proto/proto/nameserver2" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "google.golang.org/grpc" +) + +type CreateFileRpc struct { + Info *basecmd.Rpc + Request *nameserver2.CreateFileRequest + mdsClient nameserver2.CurveFSServiceClient +} + +var _ basecmd.RpcFunc = (*CreateFileRpc)(nil) + +// CreateCommand definition +type CreateCommand struct { + basecmd.FinalCurveCmd + Rpc *CreateFileRpc + Response *nameserver2.CreateFileResponse +} + +var _ basecmd.FinalCurveCmdFunc = (*CreateCommand)(nil) + +func (gRpc *CreateFileRpc) NewRpcClient(cc grpc.ClientConnInterface) { + gRpc.mdsClient = nameserver2.NewCurveFSServiceClient(cc) +} + +func (gRpc *CreateFileRpc) Stub_Func(ctx context.Context) (interface{}, error) { + return gRpc.mdsClient.CreateFile(ctx, gRpc.Request) +} + +func (cCmd *CreateCommand) Init(cmd *cobra.Command, args []string) error { + mdsAddrs, err := config.GetBsMdsAddrSlice(cCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + //get the default timeout and retrytimes + timeout := config.GetFlagDuration(cCmd.Cmd, config.RPCTIMEOUT) + retrytimes := config.GetFlagInt32(cCmd.Cmd, config.RPCRETRYTIMES) + filename := config.GetBsFlagString(cCmd.Cmd, config.CURVEBS_PATH) + username := config.GetBsFlagString(cCmd.Cmd, config.CURVEBS_USER) + password := config.GetBsFlagString(cCmd.Cmd, config.CURVEBS_PASSWORD) + date, errDat := cobrautil.GetTimeofDayUs() + if errDat.TypeCode() != cmderror.CODE_SUCCESS { + return errDat.ToError() + } + + fileTypeStr := config.GetBsFlagString(cCmd.Cmd, config.CURVEBS_TYPE) + //fileTypeStr := "dir or file" + fileType, errType := cobrautil.TranslateFileType(fileTypeStr) + if errType.TypeCode() != cmderror.CODE_SUCCESS { + return errType.ToError() + } + createRequest := nameserver2.CreateFileRequest{ + FileName: &filename, + Owner: &username, + Date: &date, + FileType: &fileType, + } + if fileType == nameserver2.FileType_INODE_PAGEFILE { + size := config.GetBsFlagUint64(cCmd.Cmd, config.CURVEBS_SIZE) + size = size * humanize.GiByte + createRequest.FileLength = &size + + stripeCount := config.GetBsFlagUint64(cCmd.Cmd, config.CURVEBS_STRIPE_COUNT) + createRequest.StripeCount = &stripeCount + stripeUnitStr := config.GetBsFlagString(cCmd.Cmd, config.CURVEBS_STRIPE_UNIT) + stripeUnit, errUnit := humanize.ParseBytes(stripeUnitStr) + if errUnit != nil { + return fmt.Errorf("parse stripe unit[%s] failed, err: %v", stripeUnitStr, errUnit) + } + createRequest.StripeUnit = &stripeUnit + } + if username == viper.GetString(config.VIPER_CURVEBS_USER) && len(password) != 0 { + strSig := cobrautil.GetString2Signature(date, username) + sig := cobrautil.CalcString2Signature(strSig, password) + createRequest.Signature = &sig + } + cCmd.Rpc = &CreateFileRpc{ + Info: basecmd.NewRpc(mdsAddrs, timeout, retrytimes, "CreateFile"), + Request: &createRequest, + } + return nil +} + +func (cCmd *CreateCommand) RunCommand(cmd *cobra.Command, args []string) error { + result, err := basecmd.GetRpcResponse(cCmd.Rpc.Info, cCmd.Rpc) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + cCmd.Response = result.(*nameserver2.CreateFileResponse) + if cCmd.Response.GetStatusCode() != nameserver2.StatusCode_kOK { + err = cmderror.ErrCreateFile(cCmd.Response.GetStatusCode(), cCmd.Rpc.Request.GetFileName()) + return err.ToError() + } + return nil +} + +func (cCmd *CreateCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&cCmd.FinalCurveCmd, cCmd) +} + +func (cCmd *CreateCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&cCmd.FinalCurveCmd) +} + +func (cCmd *CreateCommand) AddFlags() { + config.AddBsMdsFlagOption(cCmd.Cmd) + config.AddRpcTimeoutFlag(cCmd.Cmd) + config.AddRpcRetryTimesFlag(cCmd.Cmd) + config.AddBsPathRequiredFlag(cCmd.Cmd) + config.AddBsUserOptionFlag(cCmd.Cmd) + config.AddBsPasswordOptionFlag(cCmd.Cmd) + config.AddBsSizeOptionFlag(cCmd.Cmd) + config.AddBsFileTypeRequiredFlag(cCmd.Cmd) + config.AddBsStripeUnitOptionFlag(cCmd.Cmd) + config.AddBsStripeCountOptionFlag(cCmd.Cmd) +} + +// NewCreateCommand return the mid cli +func NewCreateCommand() *CreateCommand { + cCmd := &CreateCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{}, + } + basecmd.NewFinalCurveCli(&cCmd.FinalCurveCmd, cCmd) + return cCmd +} + +func CreateFileOrDirectory(caller *cobra.Command) (*nameserver2.CreateFileResponse, *cmderror.CmdError) { + createCmd := NewCreateCommand() + config.AlignFlagsValue(caller, createCmd.Cmd, []string{ + config.RPCRETRYTIMES, config.RPCTIMEOUT, config.CURVEBS_MDSADDR, + config.CURVEBS_PATH, config.CURVEBS_USER, config.CURVEBS_PASSWORD, + config.CURVEBS_SIZE, config.CURVEBS_TYPE, config.CURVEBS_STRIPE_UNIT, + config.CURVEBS_STRIPE_COUNT, + }) + createCmd.Cmd.SilenceErrors = true + createCmd.Cmd.SilenceUsage = true + createCmd.Cmd.SetArgs([]string{"--format", config.FORMAT_NOOUT}) + err := createCmd.Cmd.Execute() + if err != nil { + retErr := cmderror.ErrBsCreateFileOrDirectoryType() + retErr.Format(err.Error()) + return createCmd.Response, retErr + } + return createCmd.Response, cmderror.Success() +} diff --git a/tools-v2/pkg/cli/command/curvebs/create/file/file.go b/tools-v2/pkg/cli/command/curvebs/create/file/file.go new file mode 100644 index 0000000000..71b6b206fc --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/create/file/file.go @@ -0,0 +1,98 @@ +/* +* Copyright (c) 2023 NetEase Inc. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ +/* +* Project: curve +* Created Date: 2023-04-14 +* Author: chengyi01 + */ +package file + +import ( + "fmt" + + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/spf13/cobra" +) + +const ( + dirExample = `$ curve bs create file --path /test` + TYOE_DIR +) + +type fileCommand struct { + basecmd.FinalCurveCmd +} + +var _ basecmd.FinalCurveCmdFunc = (*fileCommand)(nil) + +func (fCmd *fileCommand) Init(cmd *cobra.Command, args []string) error { + config.AddBsFileTypeRequiredFlag(fCmd.Cmd) + fCmd.Cmd.ParseFlags([]string{ + fmt.Sprintf("--%s", config.CURVEBS_TYPE), + fmt.Sprint(cobrautil.TYPE_FILE), + }) + fCmd.SetHeader([]string{cobrautil.ROW_RESULT}) + return nil +} + +func (fCmd *fileCommand) RunCommand(cmd *cobra.Command, args []string) error { + fCmd.Result, fCmd.Error = CreateFileOrDirectory(fCmd.Cmd) + if fCmd.Error.TypeCode() != cmderror.CODE_SUCCESS { + return fCmd.Error.ToError() + } + fCmd.TableNew.Append([]string{fCmd.Error.Message}) + return nil +} + +func (fCmd *fileCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&fCmd.FinalCurveCmd, fCmd) +} + +func (fCmd *fileCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&fCmd.FinalCurveCmd) +} + +func (fCmd *fileCommand) AddFlags() { + config.AddBsMdsFlagOption(fCmd.Cmd) + config.AddRpcTimeoutFlag(fCmd.Cmd) + config.AddRpcRetryTimesFlag(fCmd.Cmd) + config.AddBsPathRequiredFlag(fCmd.Cmd) + config.AddBsUserOptionFlag(fCmd.Cmd) + config.AddBsPasswordOptionFlag(fCmd.Cmd) + config.AddBsSizeOptionFlag(fCmd.Cmd) + config.AddBsStripeUnitOptionFlag(fCmd.Cmd) + config.AddBsStripeCountOptionFlag(fCmd.Cmd) +} + +func NewCreateFileCommand() *fileCommand { + fCmd := &fileCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{ + Use: "file", + Short: "create page file in curvebs cluster", + Example: dirExample, + }, + } + basecmd.NewFinalCurveCli(&fCmd.FinalCurveCmd, fCmd) + return fCmd +} + +func NewFileCommand() *cobra.Command { + return NewCreateFileCommand().Cmd +} diff --git a/tools-v2/pkg/cli/command/curvebs/delete/delete.go b/tools-v2/pkg/cli/command/curvebs/delete/delete.go index 265aa378ef..e65312fb41 100644 --- a/tools-v2/pkg/cli/command/curvebs/delete/delete.go +++ b/tools-v2/pkg/cli/command/curvebs/delete/delete.go @@ -22,7 +22,7 @@ var _ basecmd.MidCurveCmdFunc = (*DeleteCommand)(nil) // check interface func (dCmd *DeleteCommand) AddSubCommands() { dCmd.Cmd.AddCommand( - file.NewCommand(), + file.NewFileCommand(), peer.NewCommand(), ) } diff --git a/tools-v2/pkg/cli/command/curvebs/delete/file/file.go b/tools-v2/pkg/cli/command/curvebs/delete/file/file.go index ed69811544..ec95bacaf0 100644 --- a/tools-v2/pkg/cli/command/curvebs/delete/file/file.go +++ b/tools-v2/pkg/cli/command/curvebs/delete/file/file.go @@ -16,7 +16,7 @@ import ( ) const ( - deleteCliExample = `curve bs delete file --filename /curvebs-file-name --username username [--password password] [--forcedelete true]` + deleteCliExample = `curve bs delete file --path /curvebs-file-path --user username [--password password] [--force true]` ) type DeleteCertainFileRpc struct { @@ -51,16 +51,16 @@ func (deleteCommand *DeleteCommand) Init(cmd *cobra.Command, args []string) erro //get the default timeout and retrytimes timeout := config.GetFlagDuration(deleteCommand.Cmd, config.RPCTIMEOUT) retrytimes := config.GetFlagInt32(deleteCommand.Cmd, config.RPCRETRYTIMES) - filename := config.GetBsFlagString(deleteCommand.Cmd, config.CURVEBS_FILENAME) + path := config.GetBsFlagString(deleteCommand.Cmd, config.CURVEBS_PATH) username := config.GetBsFlagString(deleteCommand.Cmd, config.CURVEBS_USER) password := config.GetBsFlagString(deleteCommand.Cmd, config.CURVEBS_PASSWORD) - forcedelete := config.GetFlagBool(deleteCommand.Cmd, config.CURVEBS_FORCEDELETE) + forcedelete := config.GetBsFlagBool(deleteCommand.Cmd, config.CURVEBS_FORCE) date, errDat := cobrautil.GetTimeofDayUs() if errDat.TypeCode() != cmderror.CODE_SUCCESS { return errDat.ToError() } deleteRequest := nameserver2.DeleteFileRequest{ - FileName: &filename, + FileName: &path, Owner: &username, Date: &date, ForceDelete: &forcedelete, @@ -74,7 +74,7 @@ func (deleteCommand *DeleteCommand) Init(cmd *cobra.Command, args []string) erro Info: basecmd.NewRpc(mdsAddrs, timeout, retrytimes, "DeleteFile"), Request: &deleteRequest, } - header := []string{cobrautil.ROW_RESULT, cobrautil.ROW_REASON} + header := []string{cobrautil.ROW_RESULT} deleteCommand.SetHeader(header) deleteCommand.TableNew.SetAutoMergeCellsByColumnIndex(cobrautil.GetIndexSlice( deleteCommand.Header, header, @@ -83,24 +83,24 @@ func (deleteCommand *DeleteCommand) Init(cmd *cobra.Command, args []string) erro } func (deleteCommand *DeleteCommand) RunCommand(cmd *cobra.Command, args []string) error { - out := make(map[string]string) result, err := basecmd.GetRpcResponse(deleteCommand.Rpc.Info, deleteCommand.Rpc) if err.TypeCode() != cmderror.CODE_SUCCESS { - out[cobrautil.ROW_RESULT] = "failed" - out[cobrautil.ROW_REASON] = err.Message - return nil + deleteCommand.Error = err + deleteCommand.Result = result + return err.ToError() } deleteCommand.Response = result.(*nameserver2.DeleteFileResponse) if deleteCommand.Response.GetStatusCode() != nameserver2.StatusCode_kOK { - err = cmderror.ErrBsDeleteFile() - out[cobrautil.ROW_RESULT] = "failed" - out[cobrautil.ROW_REASON] = err.Message - return nil + deleteCommand.Error = cmderror.ErrBsDeleteFile() + deleteCommand.Result = result + return deleteCommand.Error.ToError() } - out[cobrautil.ROW_RESULT] = "success" - out[cobrautil.ROW_REASON] = "" - list := cobrautil.Map2List(out, []string{cobrautil.ROW_RESULT, cobrautil.ROW_REASON}) + out := make(map[string]string) + out[cobrautil.ROW_RESULT] = cobrautil.ROW_VALUE_SUCCESS + list := cobrautil.Map2List(out, []string{cobrautil.ROW_RESULT}) deleteCommand.TableNew.Append(list) + + deleteCommand.Result, deleteCommand.Error = result, cmderror.Success() return nil } @@ -113,14 +113,18 @@ func (deleteCommand *DeleteCommand) ResultPlainOutput() error { } func (deleteCommand *DeleteCommand) AddFlags() { - config.AddBsFilenameRequiredFlag(deleteCommand.Cmd) - config.AddBsUsernameRequiredFlag(deleteCommand.Cmd) + config.AddBsMdsFlagOption(deleteCommand.Cmd) + config.AddRpcTimeoutFlag(deleteCommand.Cmd) + config.AddRpcRetryTimesFlag(deleteCommand.Cmd) + + config.AddBsPathRequiredFlag(deleteCommand.Cmd) + config.AddBsUserOptionFlag(deleteCommand.Cmd) config.AddBsPasswordOptionFlag(deleteCommand.Cmd) config.AddBsForceDeleteOptionFlag(deleteCommand.Cmd) } // NewCommand return the mid cli -func NewCommand() *cobra.Command { +func NewDeleteFileCommand() *DeleteCommand { deleteCommand := &DeleteCommand{ FinalCurveCmd: basecmd.FinalCurveCmd{ Use: "file", @@ -129,5 +133,29 @@ func NewCommand() *cobra.Command { }, } basecmd.NewFinalCurveCli(&deleteCommand.FinalCurveCmd, deleteCommand) - return basecmd.NewFinalCurveCli(&deleteCommand.FinalCurveCmd, deleteCommand) + return deleteCommand +} + +func NewFileCommand() *cobra.Command { + return NewDeleteFileCommand().Cmd +} + +// DeleteFile function wraps the DeleteCertainFile rpc +func DeleteFile(caller *cobra.Command) (*nameserver2.DeleteFileResponse, *cmderror.CmdError) { + delCmd := NewDeleteFileCommand() + config.AlignFlagsValue(caller, delCmd.Cmd, []string{ + config.RPCRETRYTIMES, config.RPCTIMEOUT, config.CURVEBS_MDSADDR, + config.CURVEBS_PATH, config.CURVEBS_USER, config.CURVEBS_PASSWORD, + config.CURVEBS_FORCE, + }) + delCmd.Cmd.SilenceErrors = true + delCmd.Cmd.SilenceUsage = true + delCmd.Cmd.SetArgs([]string{"--format", config.FORMAT_NOOUT}) + err := delCmd.Cmd.Execute() + if err != nil { + retErr := cmderror.ErrBsDeleteFile() + retErr.Format(err.Error()) + return delCmd.Response, retErr + } + return delCmd.Response, cmderror.Success() } diff --git a/tools-v2/pkg/cli/command/curvebs/delete/peer/leader.go b/tools-v2/pkg/cli/command/curvebs/delete/peer/leader.go index 85e199d594..34bc8f9004 100644 --- a/tools-v2/pkg/cli/command/curvebs/delete/peer/leader.go +++ b/tools-v2/pkg/cli/command/curvebs/delete/peer/leader.go @@ -141,5 +141,5 @@ func GetLeader(logicalPoolID, copysetID uint32, conf Configuration, opts Options } } ret := cmderror.MergeCmdError(errors) - return nil, &ret + return nil, ret } diff --git a/tools-v2/pkg/cli/command/curvebs/delete/peer/peer.go b/tools-v2/pkg/cli/command/curvebs/delete/peer/peer.go index 9cf8020b80..57eaac9534 100644 --- a/tools-v2/pkg/cli/command/curvebs/delete/peer/peer.go +++ b/tools-v2/pkg/cli/command/curvebs/delete/peer/peer.go @@ -90,10 +90,10 @@ func (cCmd *Command) AddFlags() { config.AddRpcRetryTimesFlag(cCmd.Cmd) config.AddRpcTimeoutFlag(cCmd.Cmd) - config.AddBSLogicalPoolIdFlag(cCmd.Cmd) - config.AddBSCopysetIdFlag(cCmd.Cmd) + config.AddBsLogicalPoolIdRequiredFlag(cCmd.Cmd) + config.AddBsCopysetIdRequiredFlag(cCmd.Cmd) - config.AddBSPeersConfFlag(cCmd.Cmd) + config.AddBsPeersConfFlag(cCmd.Cmd) } func (cCmd *Command) Init(cmd *cobra.Command, args []string) error { @@ -104,18 +104,11 @@ func (cCmd *Command) Init(cmd *cobra.Command, args []string) error { cCmd.Header, []string{}, )) - var err error cCmd.opts.Timeout = config.GetFlagDuration(cCmd.Cmd, config.RPCTIMEOUT) cCmd.opts.RetryTimes = config.GetFlagInt32(cCmd.Cmd, config.RPCRETRYTIMES) - cCmd.copysetID, err = config.GetBsFlagUint32(cCmd.Cmd, config.CURVEBS_COPYSET_ID) - if err != nil { - return err - } - cCmd.logicalPoolID, err = config.GetBsFlagUint32(cCmd.Cmd, config.CURVEBS_LOGIC_POOL_ID) - if err != nil { - return err - } + cCmd.copysetID = config.GetBsFlagUint32(cCmd.Cmd, config.CURVEBS_COPYSET_ID) + cCmd.logicalPoolID = config.GetBsFlagUint32(cCmd.Cmd, config.CURVEBS_LOGIC_POOL_ID) // parse peers config peers := config.GetBsFlagStringSlice(cCmd.Cmd, config.CURVEBS_PEERS_ADDRESS) diff --git a/tools-v2/pkg/cli/command/curvebs/list/chunkserver/chunkserver.go b/tools-v2/pkg/cli/command/curvebs/list/chunkserver/chunkserver.go new file mode 100644 index 0000000000..8f14920040 --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/list/chunkserver/chunkserver.go @@ -0,0 +1,270 @@ +/* + * Copyright (c) 2022 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: CurveCli + * Created Date: 2023-03-27 + * Author: Sindweller + */ + +package chunkserver + +import ( + "context" + "fmt" + "strconv" + + "github.com/dustin/go-humanize" + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/list/server" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/opencurve/curve/tools-v2/proto/proto/topology" + "github.com/spf13/cobra" + "google.golang.org/grpc" +) + +const ( + chunkServerExample = `$ curve bs list chunkserver` +) + +type ListChunkServerRpc struct { + Info *basecmd.Rpc + Request *topology.ListChunkServerRequest + topologyServiceClient topology.TopologyServiceClient +} + +var _ basecmd.RpcFunc = (*ListChunkServerRpc)(nil) // check interface + +type ChunkServerCommand struct { + basecmd.FinalCurveCmd + Rpc []*ListChunkServerRpc + Response []*topology.ListChunkServerResponse + ChunkServerInfos []*topology.ChunkServerInfo +} + +var _ basecmd.FinalCurveCmdFunc = (*ChunkServerCommand)(nil) // check interface + +func (lRpc *ListChunkServerRpc) NewRpcClient(cc grpc.ClientConnInterface) { + lRpc.topologyServiceClient = topology.NewTopologyServiceClient(cc) +} + +func (lRpc *ListChunkServerRpc) Stub_Func(ctx context.Context) (interface{}, error) { + return lRpc.topologyServiceClient.ListChunkServer(ctx, lRpc.Request) +} + +func NewChunkServerCommand() *cobra.Command { + return NewListChunkServerCommand().Cmd +} + +func NewListChunkServerCommand() *ChunkServerCommand { + lsCmd := &ChunkServerCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{ + Use: "chunkserver", + Short: "list chunk server in curvebs", + Example: chunkServerExample, + }, + } + + basecmd.NewFinalCurveCli(&lsCmd.FinalCurveCmd, lsCmd) + return lsCmd +} + +// AddFlags implements basecmd.FinalCurveCmdFunc +func (pCmd *ChunkServerCommand) AddFlags() { + config.AddBsMdsFlagOption(pCmd.Cmd) + config.AddRpcRetryTimesFlag(pCmd.Cmd) + config.AddRpcTimeoutFlag(pCmd.Cmd) + config.AddBsUserOptionFlag(pCmd.Cmd) + config.AddBsPasswordOptionFlag(pCmd.Cmd) + config.AddBsCheckCSAliveOptionFlag(pCmd.Cmd) + config.AddBsCheckHealthOptionFlag(pCmd.Cmd) + config.AddBsCSOfflineOptionFlag(pCmd.Cmd) + config.AddBsCSUnhealthyOptionFlag(pCmd.Cmd) +} + +// Init implements basecmd.FinalCurveCmdFunc +func (pCmd *ChunkServerCommand) Init(cmd *cobra.Command, args []string) error { + mdsAddrs, err := config.GetBsMdsAddrSlice(pCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + + timeout := config.GetFlagDuration(pCmd.Cmd, config.RPCTIMEOUT) + retrytimes := config.GetFlagInt32(pCmd.Cmd, config.RPCRETRYTIMES) + servers, err := server.ListServer(pCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + for _, server := range servers { + id := server.GetServerID() + rpc := &ListChunkServerRpc{ + Request: &topology.ListChunkServerRequest{ + ServerID: &id, + }, + Info: basecmd.NewRpc(mdsAddrs, timeout, retrytimes, "ListChunkServer"), + } + pCmd.Rpc = append(pCmd.Rpc, rpc) + } + + header := []string{ + cobrautil.ROW_ID, + cobrautil.ROW_TYPE, + cobrautil.ROW_IP, + cobrautil.ROW_PORT, + cobrautil.ROW_RW_STATUS, + cobrautil.ROW_DISK_STATE, + cobrautil.ROW_COPYSET_NUM, + cobrautil.ROW_MOUNTPOINT, + cobrautil.ROW_DISK_CAPACITY, + cobrautil.ROW_DISK_USED, + cobrautil.ROW_UNHEALTHY_COPYSET, + cobrautil.ROW_EXT_ADDR, + } + pCmd.SetHeader(header) + pCmd.TableNew.SetAutoMergeCellsByColumnIndex(cobrautil.GetIndexSlice( + pCmd.Header, []string{cobrautil.ROW_IP, cobrautil.ROW_ID, cobrautil.ROW_RW_STATUS, cobrautil.ROW_TYPE, cobrautil.ROW_DISK_STATE, cobrautil.ROW_EXT_ADDR}, + )) + return nil +} + +// Print implements basecmd.FinalCurveCmdFunc +func (pCmd *ChunkServerCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&pCmd.FinalCurveCmd, pCmd) +} + +// RunCommand implements basecmd.FinalCurveCmdFunc +func (pCmd *ChunkServerCommand) RunCommand(cmd *cobra.Command, args []string) error { + var rpcs []*basecmd.Rpc + var funcs []basecmd.RpcFunc + for _, rpc := range pCmd.Rpc { + rpcs = append(rpcs, rpc.Info) + funcs = append(funcs, rpc) + } + results, errs := basecmd.GetRpcListResponse(rpcs, funcs) + if len(errs) == len(rpcs) { + mergeErr := cmderror.MergeCmdErrorExceptSuccess(errs) + return mergeErr.ToError() + } + var errors []*cmderror.CmdError + rows := make([]map[string]string, 0) + + // counts + total := 0 + online := 0 + offline := 0 + unstable := 0 + pendding := 0 + retired := 0 + penddingCopyset := 0 + + for _, res := range results { + if res == nil { + continue + } + infos := res.(*topology.ListChunkServerResponse).GetChunkServerInfos() + for _, info := range infos { + csID := info.GetChunkServerID() + // get copyset info + copysetCmd := NewGetCopySetsInCopySetCommand() + config.AlignFlagsValue(pCmd.Cmd, copysetCmd.Cmd, []string{ + config.RPCRETRYTIMES, config.RPCTIMEOUT, config.CURVEBS_MDSADDR, + }) + copysetCmd.Cmd.Flags().Set(config.CURVEBS_CHUNKSERVER_ID, strconv.FormatUint(uint64(csID), 10)) + copysets, err := GetCopySetsInChunkServer(copysetCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + errors = append(errs, err) + continue + } + unhealthyRatio := 0.0 + + if info.GetOnlineState() != topology.OnlineState_ONLINE { + if info.GetOnlineState() == topology.OnlineState_OFFLINE { + offline++ + } + if info.GetOnlineState() == topology.OnlineState_UNSTABLE { + unstable++ + } + unhealthyRatio = 1.0 + } else { + if config.GetFlagBool(pCmd.Cmd, config.CURVEBS_CS_OFFLINE) { + continue + } + online++ + } + + if info.GetStatus() == topology.ChunkServerStatus_PENDDING { + pendding++ + penddingCopyset += len(copysets) + } + if info.GetStatus() == topology.ChunkServerStatus_RETIRED { + retired++ + } + total++ + unhealthyRatio *= 100.00 + // generate row message + row := make(map[string]string) + row[cobrautil.ROW_ID] = fmt.Sprintf("%d", info.GetChunkServerID()) + row[cobrautil.ROW_TYPE] = info.GetDiskType() + row[cobrautil.ROW_IP] = info.GetHostIp() + row[cobrautil.ROW_PORT] = fmt.Sprintf("%d", info.GetPort()) + row[cobrautil.ROW_RW_STATUS] = fmt.Sprintf("%s", info.GetStatus()) + row[cobrautil.ROW_DISK_STATE] = fmt.Sprintf("%s", info.GetDiskStatus()) + row[cobrautil.ROW_COPYSET_NUM] = fmt.Sprintf("%d", len(copysets)) + row[cobrautil.ROW_MOUNTPOINT] = info.GetMountPoint() + row[cobrautil.ROW_DISK_CAPACITY] = humanize.IBytes(info.GetDiskCapacity()) + row[cobrautil.ROW_DISK_USED] = humanize.IBytes(info.GetDiskCapacity()) + row[cobrautil.ROW_UNHEALTHY_COPYSET] = fmt.Sprintf("%.0f %%", unhealthyRatio) + row[cobrautil.ROW_EXT_ADDR] = info.GetExternalIp() + rows = append(rows, row) + pCmd.ChunkServerInfos = append(pCmd.ChunkServerInfos, info) + } + } + + list := cobrautil.ListMap2ListSortByKeys(rows, pCmd.Header, []string{cobrautil.ROW_IP, cobrautil.ROW_ID, cobrautil.ROW_RW_STATUS, cobrautil.ROW_TYPE, cobrautil.ROW_DISK_STATE, cobrautil.ROW_EXT_ADDR}) + pCmd.TableNew.AppendBulk(list) + if len(errors) != 0 { + mergeErr := cmderror.MergeCmdError(errors) + pCmd.Result, pCmd.Error = list, mergeErr + return mergeErr.ToError() + } + pCmd.Result, pCmd.Error = list, cmderror.Success() + return nil +} + +// ResultPlainOutput implements basecmd.FinalCurveCmdFunc +func (pCmd *ChunkServerCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&pCmd.FinalCurveCmd) +} + +func ListChunkServerInfos(caller *cobra.Command) ([]*topology.ChunkServerInfo, *cmderror.CmdError) { + lCmd := NewListChunkServerCommand() + config.AlignFlagsValue(caller, lCmd.Cmd, []string{ + config.RPCRETRYTIMES, config.RPCTIMEOUT, config.CURVEBS_MDSADDR, + }) + lCmd.Cmd.SilenceErrors = true + lCmd.Cmd.SilenceUsage = true + lCmd.Cmd.SetArgs([]string{"--format", config.FORMAT_NOOUT}) + err := lCmd.Cmd.Execute() + if err != nil { + retErr := cmderror.ErrBsListChunkServer() + retErr.Format(err.Error()) + return nil, retErr + } + return lCmd.ChunkServerInfos, cmderror.ErrSuccess() +} diff --git a/tools-v2/pkg/cli/command/curvebs/list/chunkserver/chunkserver_cluster.go b/tools-v2/pkg/cli/command/curvebs/list/chunkserver/chunkserver_cluster.go new file mode 100644 index 0000000000..9654e7866c --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/list/chunkserver/chunkserver_cluster.go @@ -0,0 +1,129 @@ +/* +* Copyright (c) 2023 NetEase Inc. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ +/* +* Project: CurveCli +* Created Date: 2023-05-11 +* Author: chengyi01 + */ +package chunkserver + +import ( + "context" + + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/opencurve/curve/tools-v2/proto/proto/topology" + "github.com/opencurve/curve/tools-v2/proto/proto/topology/statuscode" + "github.com/spf13/cobra" + "google.golang.org/grpc" +) + +type GetChunkServerInClusterRpc struct { + Info *basecmd.Rpc + Request *topology.GetChunkServerInClusterRequest + topologyClient topology.TopologyServiceClient +} + +var _ basecmd.RpcFunc = (*GetChunkServerInClusterRpc)(nil) // check interface + +type ChunkServerInClusterCommand struct { + basecmd.FinalCurveCmd + Rpc *GetChunkServerInClusterRpc + ChunkServerInfos []*topology.ChunkServerInfo +} + +var _ basecmd.FinalCurveCmdFunc = (*ChunkServerInClusterCommand)(nil) // check interface + +func (gRpc *GetChunkServerInClusterRpc) NewRpcClient(cc grpc.ClientConnInterface) { + gRpc.topologyClient = topology.NewTopologyServiceClient(cc) +} + +func (gRpc *GetChunkServerInClusterRpc) Stub_Func(ctx context.Context) (interface{}, error) { + return gRpc.topologyClient.GetChunkServerInCluster(ctx, gRpc.Request) +} + +func NewChunkServerInClusterCommand() *cobra.Command { + return NewListChunkServerInClusterCommand().Cmd +} + +func NewListChunkServerInClusterCommand() *ChunkServerInClusterCommand { + lsCmd := &ChunkServerInClusterCommand{FinalCurveCmd: basecmd.FinalCurveCmd{}} + basecmd.NewFinalCurveCli(&lsCmd.FinalCurveCmd, lsCmd) + return lsCmd +} + +func (csicCmd *ChunkServerInClusterCommand) AddFlags() { + config.AddRpcRetryTimesFlag(csicCmd.Cmd) + config.AddRpcTimeoutFlag(csicCmd.Cmd) + + config.AddBsMdsFlagOption(csicCmd.Cmd) +} + +func (csicCmd *ChunkServerInClusterCommand) Init(cmd *cobra.Command, args []string) error { + mdsAddrs, err := config.GetBsMdsAddrSlice(csicCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + + timeout := config.GetFlagDuration(csicCmd.Cmd, config.RPCTIMEOUT) + retrytimes := config.GetFlagInt32(csicCmd.Cmd, config.RPCRETRYTIMES) + + csicCmd.Rpc = &GetChunkServerInClusterRpc{ + Request: &topology.GetChunkServerInClusterRequest{}, + Info: basecmd.NewRpc(mdsAddrs, timeout, retrytimes, "GetChunkServerInCluster"), + } + return nil +} + +func (csicCmd *ChunkServerInClusterCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&csicCmd.FinalCurveCmd, csicCmd) +} + +func (csicCmd *ChunkServerInClusterCommand) RunCommand(cmd *cobra.Command, args []string) error { + result, err := basecmd.GetRpcResponse(csicCmd.Rpc.Info, csicCmd.Rpc) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + res := result.(*topology.GetChunkServerInClusterResponse) + if res.GetStatusCode() != int32(statuscode.TopoStatusCode_Success) { + err = cmderror.ErrBsGetChunkServerInClusterRpc(statuscode.TopoStatusCode(res.GetStatusCode())) + return err.ToError() + } + csicCmd.ChunkServerInfos = res.GetChunkServerInfos() + + return nil +} + +func (csicCmd *ChunkServerInClusterCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&csicCmd.FinalCurveCmd) +} + +func GetChunkServerInCluster(caller *cobra.Command) ([]*topology.ChunkServerInfo, *cmderror.CmdError) { + getCmd := NewListChunkServerInClusterCommand() + config.AlignFlagsValue(caller, getCmd.Cmd, []string{config.CURVEBS_MDSADDR}) + getCmd.Cmd.SilenceErrors = true + getCmd.Cmd.SilenceUsage = true + getCmd.Cmd.SetArgs([]string{"--format", config.FORMAT_NOOUT}) + err := getCmd.Cmd.Execute() + if err != nil { + retErr := cmderror.ErrBsGetChunkServerInCluster() + retErr.Format(err.Error()) + return nil, retErr + } + return getCmd.ChunkServerInfos, cmderror.Success() +} diff --git a/tools-v2/pkg/cli/command/curvebs/list/chunkserver/copyset.go b/tools-v2/pkg/cli/command/curvebs/list/chunkserver/copyset.go new file mode 100644 index 0000000000..b76c6f0f72 --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/list/chunkserver/copyset.go @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2022 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: CurveCli + * Created Date: 2023-03-27 + * Author: Sindweller + */ + +package chunkserver + +import ( + "context" + "strconv" + + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + common "github.com/opencurve/curve/tools-v2/proto/proto/common" + "github.com/opencurve/curve/tools-v2/proto/proto/topology" + "github.com/spf13/cobra" + "google.golang.org/grpc" +) + +type GetCopySetsInChunkServerRpc struct { + Info *basecmd.Rpc + Request *topology.GetCopySetsInChunkServerRequest + topologyServiceClient topology.TopologyServiceClient +} + +var _ basecmd.RpcFunc = (*GetCopySetsInChunkServerRpc)(nil) // check interface + +type CopySetCommand struct { + basecmd.FinalCurveCmd + Rpc *GetCopySetsInChunkServerRpc + Response *topology.GetCopySetsInChunkServerResponse + CopySets []*common.CopysetInfo +} + +var _ basecmd.FinalCurveCmdFunc = (*CopySetCommand)(nil) // check interface + +func (lRpc *GetCopySetsInChunkServerRpc) NewRpcClient(cc grpc.ClientConnInterface) { + lRpc.topologyServiceClient = topology.NewTopologyServiceClient(cc) +} + +func (lRpc *GetCopySetsInChunkServerRpc) Stub_Func(ctx context.Context) (interface{}, error) { + return lRpc.topologyServiceClient.GetCopySetsInChunkServer(ctx, lRpc.Request) +} + +func NewCopySetsInChunkServerCommand() *cobra.Command { + return NewGetCopySetsInCopySetCommand().Cmd +} + +func NewGetCopySetsInCopySetCommand() *CopySetCommand { + lsCmd := &CopySetCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{}, + } + + basecmd.NewFinalCurveCli(&lsCmd.FinalCurveCmd, lsCmd) + return lsCmd +} + +// AddFlags implements basecmd.FinalCurveCmdFunc +func (pCmd *CopySetCommand) AddFlags() { + config.AddBsMdsFlagOption(pCmd.Cmd) + config.AddRpcRetryTimesFlag(pCmd.Cmd) + config.AddRpcTimeoutFlag(pCmd.Cmd) + config.AddBsUserOptionFlag(pCmd.Cmd) + config.AddBsPasswordOptionFlag(pCmd.Cmd) + config.AddBsChunkServerIdFlag(pCmd.Cmd) +} + +// Init implements basecmd.FinalCurveCmdFunc +func (pCmd *CopySetCommand) Init(cmd *cobra.Command, args []string) error { + mdsAddrs, err := config.GetBsMdsAddrSlice(pCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + + timeout := config.GetFlagDuration(pCmd.Cmd, config.RPCTIMEOUT) + retrytimes := config.GetFlagInt32(pCmd.Cmd, config.RPCRETRYTIMES) + strid, e := strconv.Atoi(config.GetBsFlagString(pCmd.Cmd, config.CURVEBS_CHUNKSERVER_ID)) + if e != nil { + return e + } + id := uint32(strid) + pCmd.Rpc = &GetCopySetsInChunkServerRpc{ + Request: &topology.GetCopySetsInChunkServerRequest{ + ChunkServerID: &id, + }, + Info: basecmd.NewRpc(mdsAddrs, timeout, retrytimes, "GetCopySetsInChunkServer"), + } + + header := []string{ + cobrautil.ROW_ID, + cobrautil.ROW_TYPE, + cobrautil.ROW_IP, + cobrautil.ROW_PORT, + cobrautil.ROW_RW_STATUS, + cobrautil.ROW_DISK_STATE, + cobrautil.ROW_COPYSET_NUM, + cobrautil.ROW_MOUNTPOINT, + cobrautil.ROW_DISK_CAPACITY, + cobrautil.ROW_DISK_USED, + cobrautil.ROW_UNHEALTHY_COPYSET, + cobrautil.ROW_EXT_ADDR, + } + pCmd.SetHeader(header) + pCmd.TableNew.SetAutoMergeCellsByColumnIndex(cobrautil.GetIndexSlice( + pCmd.Header, []string{cobrautil.ROW_TYPE, cobrautil.ROW_IP, cobrautil.ROW_DISK_STATE}, + )) + return nil +} + +// Print implements basecmd.FinalCurveCmdFunc +func (pCmd *CopySetCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&pCmd.FinalCurveCmd, pCmd) +} + +// RunCommand implements basecmd.FinalCurveCmdFunc +func (pCmd *CopySetCommand) RunCommand(cmd *cobra.Command, args []string) error { + + result, err := basecmd.GetRpcResponse(pCmd.Rpc.Info, pCmd.Rpc) + if err.TypeCode() != cmderror.CODE_SUCCESS { + pCmd.Error = err + pCmd.Result = result + return err.ToError() + } + + pCmd.Response = result.(*topology.GetCopySetsInChunkServerResponse) + infos := pCmd.Response.GetCopysetInfos() + rows := make([]map[string]string, 0) + var errors []*cmderror.CmdError + pCmd.CopySets = append(pCmd.CopySets, infos...) + + list := cobrautil.ListMap2ListSortByKeys(rows, pCmd.Header, []string{cobrautil.ROW_TYPE, cobrautil.ROW_RW_STATUS, cobrautil.ROW_MOUNTPOINT}) + pCmd.TableNew.AppendBulk(list) + errRet := cmderror.MergeCmdError(errors) + pCmd.Error = errRet + pCmd.Result = result + return nil +} + +// ResultPlainOutput implements basecmd.FinalCurveCmdFunc +func (pCmd *CopySetCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&pCmd.FinalCurveCmd) +} + +func GetCopySetsInChunkServer(caller *cobra.Command) ([]*common.CopysetInfo, *cmderror.CmdError) { + getCopySetsCmd := NewGetCopySetsInCopySetCommand() + config.AlignFlagsValue(caller, getCopySetsCmd.Cmd, []string{ + config.RPCRETRYTIMES, config.RPCTIMEOUT, config.CURVEBS_MDSADDR, + config.CURVEBS_CHUNKSERVER_ID, + }) + getCopySetsCmd.Cmd.SilenceErrors = true + getCopySetsCmd.Cmd.SilenceUsage = true + getCopySetsCmd.Cmd.SetArgs([]string{"--format", config.FORMAT_NOOUT}) + err := getCopySetsCmd.Cmd.Execute() + if err != nil { + retErr := cmderror.ErrBsGetChunkCopyset() + retErr.Format(err.Error()) + return nil, retErr + } + return getCopySetsCmd.CopySets, cmderror.ErrSuccess() +} diff --git a/tools-v2/pkg/cli/command/curvebs/list/chunkserver/copyset_by_host.go b/tools-v2/pkg/cli/command/curvebs/list/chunkserver/copyset_by_host.go new file mode 100644 index 0000000000..9995ea1f65 --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/list/chunkserver/copyset_by_host.go @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: tools-v2 + * Created Date: 2023-04-28 + * Author: baytan0720 + */ + +package chunkserver + +import ( + "context" + "fmt" + + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/opencurve/curve/tools-v2/proto/proto/common" + "github.com/opencurve/curve/tools-v2/proto/proto/topology" + "github.com/opencurve/curve/tools-v2/proto/proto/topology/statuscode" + "github.com/spf13/cobra" + "google.golang.org/grpc" +) + +type GetCopySetsInChunkServerByHostRpc struct { + Info *basecmd.Rpc + Request *topology.GetCopySetsInChunkServerRequest + mdsClient topology.TopologyServiceClient +} + +var _ basecmd.RpcFunc = (*GetCopySetsInChunkServerByHostRpc)(nil) // check interface + +func (gRpc *GetCopySetsInChunkServerByHostRpc) NewRpcClient(cc grpc.ClientConnInterface) { + gRpc.mdsClient = topology.NewTopologyServiceClient(cc) +} + +func (gRpc *GetCopySetsInChunkServerByHostRpc) Stub_Func(ctx context.Context) (interface{}, error) { + return gRpc.mdsClient.GetCopySetsInChunkServer(ctx, gRpc.Request) +} + +type CopySetsInChunkServerCommand struct { + basecmd.FinalCurveCmd + Rpc []*GetCopySetsInChunkServerByHostRpc + addr2Copysets *map[string][]*common.CopysetInfo +} + +var _ basecmd.FinalCurveCmdFunc = (*CopySetsInChunkServerCommand)(nil) // check interface + +func NewCopySetsInChunkServerByHostCommand() *cobra.Command { + return NewGetCopySetsInChunkServerCommand().Cmd +} + +func NewGetCopySetsInChunkServerCommand() *CopySetsInChunkServerCommand { + copysetCmd := &CopySetsInChunkServerCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{}, + } + basecmd.NewFinalCurveCli(©setCmd.FinalCurveCmd, copysetCmd) + return copysetCmd +} + +func (cCmd *CopySetsInChunkServerCommand) Init(cmd *cobra.Command, args []string) error { + mdsAddrs, err := config.GetBsMdsAddrSlice(cCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + timeout := config.GetFlagDuration(cCmd.Cmd, config.RPCTIMEOUT) + retrytimes := config.GetFlagInt32(cCmd.Cmd, config.RPCRETRYTIMES) + chunkserverAddrs := config.GetBsFlagStringSlice(cCmd.Cmd, config.CURVEBS_CHUNKSERVER_ADDRESS) + for i := 0; i < len(chunkserverAddrs); i++ { + hostIp, port, addrErr := cobrautil.Addr2IpPort(chunkserverAddrs[i]) + if addrErr.TypeCode() != cmderror.CODE_SUCCESS { + return addrErr.ToError() + } + rpc := &GetCopySetsInChunkServerByHostRpc{ + Request: &topology.GetCopySetsInChunkServerRequest{ + HostIp: &hostIp, + Port: &port, + }, + Info: basecmd.NewRpc(mdsAddrs, timeout, retrytimes, "GetCopySetsInChunkServer"), + } + cCmd.Rpc = append(cCmd.Rpc, rpc) + } + return nil +} + +func (cCmd *CopySetsInChunkServerCommand) RunCommand(cmd *cobra.Command, args []string) error { + var infos []*basecmd.Rpc + var funcs []basecmd.RpcFunc + for _, rpc := range cCmd.Rpc { + infos = append(infos, rpc.Info) + funcs = append(funcs, rpc) + } + results, errs := basecmd.GetRpcListResponse(infos, funcs) + if len(errs) == len(infos) { + mergeErr := cmderror.MergeCmdErrorExceptSuccess(errs) + return mergeErr.ToError() + } + cCmd.addr2Copysets = &map[string][]*common.CopysetInfo{} + for i, result := range results { + resp := result.(*topology.GetCopySetsInChunkServerResponse) + if resp.GetStatusCode() != int32(statuscode.TopoStatusCode_Success) { + err := cmderror.ErrBsGetCopysetInChunkServerRpc( + statuscode.TopoStatusCode(resp.GetStatusCode()), + ) + errs = append(errs, err) + continue + } + addr := fmt.Sprintf("%s:%d", *cCmd.Rpc[i].Request.HostIp, *cCmd.Rpc[i].Request.Port) + (*cCmd.addr2Copysets)[addr] = resp.CopysetInfos + } + errRet := cmderror.MergeCmdError(errs) + cCmd.Error = errRet + return nil +} + +func (cCmd *CopySetsInChunkServerCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&cCmd.FinalCurveCmd, cCmd) +} + +func (cCmd *CopySetsInChunkServerCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&cCmd.FinalCurveCmd) +} + +func (cCmd *CopySetsInChunkServerCommand) AddFlags() { + config.AddBsMdsFlagOption(cCmd.Cmd) + config.AddRpcRetryTimesFlag(cCmd.Cmd) + config.AddRpcTimeoutFlag(cCmd.Cmd) + + config.AddBsChunkServerAddressSliceRequiredFlag(cCmd.Cmd) +} + +func GetCopySetsInChunkServerByHost(caller *cobra.Command) (*map[string][]*common.CopysetInfo, *cmderror.CmdError) { + gCmd := NewGetCopySetsInChunkServerCommand() + config.AlignFlagsValue(caller, gCmd.Cmd, []string{ + config.RPCRETRYTIMES, config.RPCTIMEOUT, config.CURVEBS_MDSADDR, + }) + gCmd.Cmd.SilenceErrors = true + gCmd.Cmd.SilenceUsage = true + gCmd.Cmd.SetArgs([]string{"--format", config.FORMAT_NOOUT}) + err := gCmd.Cmd.Execute() + if err != nil { + retErr := cmderror.ErrBsGetCopysetInChunkServer() + retErr.Format(err.Error()) + return nil, retErr + } + return gCmd.addr2Copysets, cmderror.Success() +} diff --git a/tools-v2/pkg/cli/command/curvebs/list/client/client.go b/tools-v2/pkg/cli/command/curvebs/list/client/client.go index 1fb6a9da68..b8b84faee1 100644 --- a/tools-v2/pkg/cli/command/curvebs/list/client/client.go +++ b/tools-v2/pkg/cli/command/curvebs/list/client/client.go @@ -25,6 +25,7 @@ package client import ( "context" "fmt" + cmderror "github.com/opencurve/curve/tools-v2/internal/error" cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" @@ -131,6 +132,9 @@ func (pCmd *ClientCommand) RunCommand(cmd *cobra.Command, args []string) error { var errors []*cmderror.CmdError rows := make([]map[string]string, 0) for _, res := range results { + if res == nil { + continue + } infos := res.(*nameserver2.ListClientResponse).GetClientInfos() for _, info := range infos { row := make(map[string]string) @@ -144,8 +148,8 @@ func (pCmd *ClientCommand) RunCommand(cmd *cobra.Command, args []string) error { }) pCmd.TableNew.AppendBulk(list) errRet := cmderror.MergeCmdError(errors) - pCmd.Error = &errRet - pCmd.Result = results + pCmd.Error = errRet + pCmd.Result = rows return nil } @@ -153,3 +157,19 @@ func (pCmd *ClientCommand) RunCommand(cmd *cobra.Command, args []string) error { func (pCmd *ClientCommand) ResultPlainOutput() error { return output.FinalCmdOutputPlain(&pCmd.FinalCurveCmd) } + +func GetClientList(caller *cobra.Command) (*interface{}, *cmderror.CmdError) { + listClientCmd := NewListCLientCommand() + listClientCmd.Cmd.SetArgs([]string{ + fmt.Sprintf("--%s", config.FORMAT), config.FORMAT_NOOUT, + }) + config.AlignFlagsValue(caller, listClientCmd.Cmd, []string{config.RPCRETRYTIMES, config.RPCTIMEOUT, config.CURVEBS_MDSADDR}) + listClientCmd.Cmd.SilenceErrors = true + err := listClientCmd.Cmd.Execute() + if err != nil { + retErr := cmderror.ErrBsGetClientList() + retErr.Format(err.Error()) + return nil, retErr + } + return &listClientCmd.Result, cmderror.Success() +} diff --git a/tools-v2/pkg/cli/command/curvebs/list/copyset/copyset.go b/tools-v2/pkg/cli/command/curvebs/list/copyset/copyset.go new file mode 100644 index 0000000000..c867e674f6 --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/list/copyset/copyset.go @@ -0,0 +1,136 @@ +/* +* Copyright (c) 2023 NetEase Inc. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ +/* +* Project: curve +* Created Date: 2023-05-04 +* Author: lianzhanbiao + */ + +package copyset + +import ( + "context" + "fmt" + + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/opencurve/curve/tools-v2/proto/proto/common" + "github.com/opencurve/curve/tools-v2/proto/proto/topology" + "github.com/spf13/cobra" + "google.golang.org/grpc" +) + +type GetCopySetsInClusterRpc struct { + Info *basecmd.Rpc + Request *topology.GetCopySetsInClusterRequest + mdsClient topology.TopologyServiceClient +} + +var _ basecmd.RpcFunc = (*GetCopySetsInClusterRpc)(nil) // check interface + +func (gRpc *GetCopySetsInClusterRpc) NewRpcClient(cc grpc.ClientConnInterface) { + gRpc.mdsClient = topology.NewTopologyServiceClient(cc) +} + +func (gRpc *GetCopySetsInClusterRpc) Stub_Func(ctx context.Context) (interface{}, error) { + return gRpc.mdsClient.GetCopySetsInCluster(ctx, gRpc.Request) +} + +type CopysetCommand struct { + basecmd.FinalCurveCmd + CopysetInfoList []*common.CopysetInfo + Rpc *GetCopySetsInClusterRpc +} + +var _ basecmd.FinalCurveCmdFunc = (*CopysetCommand)(nil) // check interface + +func NewListCopysetCommand() *CopysetCommand { + copysetCmd := &CopysetCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{}, + } + + basecmd.NewFinalCurveCli(©setCmd.FinalCurveCmd, copysetCmd) + return copysetCmd +} + +func NewCopysetCommand() *cobra.Command { + return NewListCopysetCommand().Cmd +} + +func (cCmd *CopysetCommand) AddFlags() { + config.AddRpcTimeoutFlag(cCmd.Cmd) + config.AddRpcRetryTimesFlag(cCmd.Cmd) + config.AddBsMdsFlagOption(cCmd.Cmd) + config.AddBsFilterOptionFlag(cCmd.Cmd) +} + +func (cCmd *CopysetCommand) Init(cmd *cobra.Command, args []string) error { + mdsAddrs, err := config.GetBsMdsAddrSlice(cCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + timeout := config.GetFlagDuration(cCmd.Cmd, config.RPCTIMEOUT) + retrytimes := config.GetFlagInt32(cCmd.Cmd, config.RPCRETRYTIMES) + filterscanning := config.GetBsFlagBool(cCmd.Cmd, config.CURVEBS_FIlTER) + + cCmd.Rpc = &GetCopySetsInClusterRpc{ + Info: basecmd.NewRpc(mdsAddrs, timeout, retrytimes, "GetCopySetsInCluster"), + Request: &topology.GetCopySetsInClusterRequest{ + FilterScaning: &filterscanning, + }, + } + return nil +} + +func (cCmd *CopysetCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&cCmd.FinalCurveCmd, cCmd) +} + +func (cCmd *CopysetCommand) RunCommand(cmd *cobra.Command, args []string) error { + result, err := basecmd.GetRpcResponse(cCmd.Rpc.Info, cCmd.Rpc) + if err.TypeCode() != cmderror.CODE_SUCCESS { + cCmd.Error = err + cCmd.Result = result + return err.ToError() + } + res := result.(*topology.GetCopySetsInClusterResponse) + cCmd.CopysetInfoList = res.GetCopysetInfos() + cCmd.Result, cCmd.Error = cCmd.CopysetInfoList, cmderror.Success() + return nil +} + +func (cCmd *CopysetCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&cCmd.FinalCurveCmd) +} + +func GetCopySetsInCluster(caller *cobra.Command) ([]*common.CopysetInfo, *cmderror.CmdError) { + getCmd := NewListCopysetCommand() + config.AlignFlagsValue(caller, getCmd.Cmd, []string{ + config.CURVEBS_MDSADDR, config.RPCRETRYTIMES, config.RPCTIMEOUT, config.CURVEBS_FIlTER, + }) + getCmd.Cmd.SilenceErrors = true + getCmd.Cmd.SilenceUsage = true + getCmd.Cmd.SetArgs([]string{fmt.Sprintf("--%s", config.FORMAT), config.FORMAT_NOOUT}) + err := getCmd.Cmd.Execute() + if err != nil { + retErr := cmderror.ErrBsListScanStatus() + retErr.Format(err.Error()) + return getCmd.CopysetInfoList, retErr + } + return getCmd.CopysetInfoList, cmderror.Success() +} diff --git a/tools-v2/pkg/cli/command/curvebs/list/dir/dir.go b/tools-v2/pkg/cli/command/curvebs/list/dir/dir.go index 04ed3a4cd3..35c54fcafd 100644 --- a/tools-v2/pkg/cli/command/curvebs/list/dir/dir.go +++ b/tools-v2/pkg/cli/command/curvebs/list/dir/dir.go @@ -25,6 +25,8 @@ package dir import ( "context" "fmt" + "time" + "github.com/dustin/go-humanize" cmderror "github.com/opencurve/curve/tools-v2/internal/error" cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" @@ -36,12 +38,10 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" "google.golang.org/grpc" - "log" - "time" ) const ( - dirExample = `$ curve bs list dir --dir /` + dirExample = `$ curve bs list dir --path /` ) type ListDirRpc struct { @@ -54,7 +54,8 @@ var _ basecmd.RpcFunc = (*ListDirRpc)(nil) // check interface type DirCommand struct { basecmd.FinalCurveCmd - Rpc []*ListDirRpc + Rpc *ListDirRpc + Response *nameserver2.ListDirResponse } var _ basecmd.FinalCurveCmdFunc = (*DirCommand)(nil) // check interface @@ -89,7 +90,7 @@ func (pCmd *DirCommand) AddFlags() { config.AddBsMdsFlagOption(pCmd.Cmd) config.AddRpcRetryTimesFlag(pCmd.Cmd) config.AddRpcTimeoutFlag(pCmd.Cmd) - config.AddBsDirOptionFlag(pCmd.Cmd) + config.AddBsPathOptionFlag(pCmd.Cmd) config.AddBsUserOptionFlag(pCmd.Cmd) config.AddBsPasswordOptionFlag(pCmd.Cmd) } @@ -103,16 +104,16 @@ func (pCmd *DirCommand) Init(cmd *cobra.Command, args []string) error { timeout := config.GetFlagDuration(pCmd.Cmd, config.RPCTIMEOUT) retrytimes := config.GetFlagInt32(pCmd.Cmd, config.RPCRETRYTIMES) - fileName := config.GetBsFlagString(pCmd.Cmd, config.CURVEBS_DIR) + path := config.GetBsFlagString(pCmd.Cmd, config.CURVEBS_PATH) owner := config.GetBsFlagString(pCmd.Cmd, config.CURVEBS_USER) date, errDat := cobrautil.GetTimeofDayUs() if errDat.TypeCode() != cmderror.CODE_SUCCESS { return errDat.ToError() } - rpc := &ListDirRpc{ + pCmd.Rpc = &ListDirRpc{ Request: &nameserver2.ListDirRequest{ - FileName: &fileName, + FileName: &path, Owner: &owner, Date: &date, }, @@ -123,9 +124,9 @@ func (pCmd *DirCommand) Init(cmd *cobra.Command, args []string) error { if owner == viper.GetString(config.VIPER_CURVEBS_USER) && len(password) != 0 { strSig := cobrautil.GetString2Signature(date, owner) sig := cobrautil.CalcString2Signature(strSig, password) - rpc.Request.Signature = &sig + pCmd.Rpc.Request.Signature = &sig } - pCmd.Rpc = append(pCmd.Rpc, rpc) + header := []string{ cobrautil.ROW_ID, cobrautil.ROW_FILE_NAME, @@ -150,69 +151,63 @@ func (pCmd *DirCommand) Print(cmd *cobra.Command, args []string) error { // RunCommand implements basecmd.FinalCurveCmdFunc func (pCmd *DirCommand) RunCommand(cmd *cobra.Command, args []string) error { - var infos []*basecmd.Rpc - var funcs []basecmd.RpcFunc - for _, rpc := range pCmd.Rpc { - infos = append(infos, rpc.Info) - funcs = append(funcs, rpc) - } - results, errs := basecmd.GetRpcListResponse(infos, funcs) - if len(errs) == len(infos) { - mergeErr := cmderror.MergeCmdErrorExceptSuccess(errs) - return mergeErr.ToError() + result, err := basecmd.GetRpcResponse(pCmd.Rpc.Info, pCmd.Rpc) + if err.TypeCode() != cmderror.CODE_SUCCESS { + pCmd.Error = err + pCmd.Result = result + return err.ToError() } - var errors []*cmderror.CmdError + pCmd.Response = result.(*nameserver2.ListDirResponse) + infos := pCmd.Response.GetFileInfo() rows := make([]map[string]string, 0) - for _, res := range results { - infos := res.(*nameserver2.ListDirResponse).GetFileInfo() - for _, info := range infos { - row := make(map[string]string) - dirName := config.GetBsFlagString(pCmd.Cmd, config.CURVEBS_DIR) - var fileName string - if dirName == "/" { - fileName = dirName + info.GetFileName() - } else { - fileName = dirName + "/" + info.GetFileName() - } - row[cobrautil.ROW_ID] = fmt.Sprintf("%d", info.GetId()) - row[cobrautil.ROW_FILE_NAME] = fileName - row[cobrautil.ROW_PARENT_ID] = fmt.Sprintf("%d", info.GetParentId()) - row[cobrautil.ROW_FILE_TYPE] = fmt.Sprintf("%v", info.GetFileType()) - row[cobrautil.ROW_OWNER] = info.GetOwner() - row[cobrautil.ROW_CTIME] = time.Unix(int64(info.GetCtime()/1000000), 0).Format("2006-01-02 15:04:05") - - // generate a query file command - fInfoCmd := file.NewQueryFileCommand() - config.AlignFlagsValue(pCmd.Cmd, fInfoCmd.Cmd, []string{ - config.RPCRETRYTIMES, config.RPCTIMEOUT, config.CURVEBS_MDSADDR, - config.CURVEBS_PATH, - }) - fInfoCmd.Cmd.Flags().Set("path", fileName) - - // Get file size - sizeRes, err := file.GetFileSize(fInfoCmd.Cmd) - if err.TypeCode() != cmderror.CODE_SUCCESS { - // TODO handle err - log.Printf("%s failed to get file size: %v", info.GetFileName(), err) - //return err.ToError() - } - row[cobrautil.ROW_FILE_SIZE] = fmt.Sprintf("%s", humanize.IBytes(sizeRes.GetFileSize())) - // Get allocated size - allocRes, err := file.GetAllocatedSize(fInfoCmd.Cmd) - if err.TypeCode() != cmderror.CODE_SUCCESS { - // TODO handle err - log.Printf("%s failed to get allocated size: %v", info.GetFileName(), err) - //return err.ToError() - } - row[cobrautil.ROW_ALLOC_SIZE] = fmt.Sprintf("%s", humanize.IBytes(allocRes.GetAllocatedSize())) - rows = append(rows, row) + var errs []*cmderror.CmdError + for _, info := range infos { + row := make(map[string]string) + var fileName string + path := config.GetBsFlagString(pCmd.Cmd, config.CURVEBS_PATH) + if path[len(path)-1] == '/' { + fileName = path + info.GetFileName() + } else { + fileName = path + "/" + info.GetFileName() } + row[cobrautil.ROW_ID] = fmt.Sprintf("%d", info.GetId()) + row[cobrautil.ROW_FILE_NAME] = fileName + row[cobrautil.ROW_PARENT_ID] = fmt.Sprintf("%d", info.GetParentId()) + row[cobrautil.ROW_FILE_TYPE] = fmt.Sprintf("%v", info.GetFileType()) + row[cobrautil.ROW_OWNER] = info.GetOwner() + row[cobrautil.ROW_CTIME] = time.Unix(int64(info.GetCtime()/1000000), 0).Format("2006-01-02 15:04:05") + + // generate a query file command + fInfoCmd := file.NewQueryFileCommand() + config.AlignFlagsValue(pCmd.Cmd, fInfoCmd.Cmd, []string{ + config.RPCRETRYTIMES, config.RPCTIMEOUT, config.CURVEBS_MDSADDR, + }) + fInfoCmd.Cmd.Flags().Set("path", fileName) + + // Get file size + sizeRes, err := file.GetFileSize(fInfoCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + errs = append(errs, err) + continue + } + row[cobrautil.ROW_FILE_SIZE] = fmt.Sprintf("%s", humanize.IBytes(sizeRes.GetFileSize())) + // Get allocated size + allocRes, err := file.GetAllocatedSize(fInfoCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + errs = append(errs, err) + continue + } + row[cobrautil.ROW_ALLOC_SIZE] = fmt.Sprintf("%s", humanize.IBytes(allocRes.GetAllocatedSize())) + rows = append(rows, row) } list := cobrautil.ListMap2ListSortByKeys(rows, pCmd.Header, []string{cobrautil.ROW_OWNER, cobrautil.ROW_FILE_TYPE, cobrautil.ROW_PARENT_ID}) pCmd.TableNew.AppendBulk(list) - errRet := cmderror.MergeCmdError(errors) - pCmd.Error = &errRet - pCmd.Result = results + if len(errs) != 0 { + mergeErr := cmderror.MergeCmdError(errs) + pCmd.Result, pCmd.Error = result, mergeErr + return mergeErr.ToError() + } + pCmd.Result, pCmd.Error = result, cmderror.Success() return nil } @@ -220,3 +215,21 @@ func (pCmd *DirCommand) RunCommand(cmd *cobra.Command, args []string) error { func (pCmd *DirCommand) ResultPlainOutput() error { return output.FinalCmdOutputPlain(&pCmd.FinalCurveCmd) } + +func ListDir(caller *cobra.Command) (*nameserver2.ListDirResponse, *cmderror.CmdError) { + lsCmd := NewListDirCommand() + config.AlignFlagsValue(caller, lsCmd.Cmd, []string{ + config.RPCRETRYTIMES, config.RPCTIMEOUT, config.CURVEBS_MDSADDR, + config.CURVEBS_PATH, config.CURVEBS_USER, config.CURVEBS_PASSWORD, + }) + lsCmd.Cmd.SilenceErrors = true + lsCmd.Cmd.SilenceUsage = true + lsCmd.Cmd.SetArgs([]string{"--format", config.FORMAT_NOOUT}) + err := lsCmd.Cmd.Execute() + if err != nil { + retErr := cmderror.ErrBsListDir() + retErr.Format(err.Error()) + return lsCmd.Response, retErr + } + return lsCmd.Response, cmderror.Success() +} diff --git a/tools-v2/pkg/cli/command/curvebs/list/list.go b/tools-v2/pkg/cli/command/curvebs/list/list.go index a7c7f81e61..e85bffbe73 100644 --- a/tools-v2/pkg/cli/command/curvebs/list/list.go +++ b/tools-v2/pkg/cli/command/curvebs/list/list.go @@ -24,10 +24,13 @@ package list import ( basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/list/chunkserver" "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/list/client" "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/list/dir" logicalpool "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/list/logicalPool" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/list/scanstatus" "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/list/server" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/list/space" "github.com/spf13/cobra" ) @@ -43,6 +46,9 @@ func (listCmd *ListCommand) AddSubCommands() { server.NewServerCommand(), client.NewClientCommand(), dir.NewDirCommand(), + space.NewSpaceCommand(), + chunkserver.NewChunkServerCommand(), + scanstatus.NewScanStatusCommand(), ) } diff --git a/tools-v2/pkg/cli/command/curvebs/list/logicalPool/logicalPool.go b/tools-v2/pkg/cli/command/curvebs/list/logicalPool/logicalPool.go index 69e010e749..699ed9a811 100644 --- a/tools-v2/pkg/cli/command/curvebs/list/logicalPool/logicalPool.go +++ b/tools-v2/pkg/cli/command/curvebs/list/logicalPool/logicalPool.go @@ -43,8 +43,6 @@ import ( const ( logicalPoolExample = `$ curve bs list logical-pool` - RECYCLEBINDIRNAME = "RecycleBin" - RECYCLEBINDIR = "/" + RECYCLEBINDIRNAME ) type ListLogicalPoolRpc struct { @@ -57,9 +55,13 @@ var _ basecmd.RpcFunc = (*ListLogicalPoolRpc)(nil) // check interface type LogicalPoolCommand struct { basecmd.FinalCurveCmd - Rpc []*ListLogicalPoolRpc - Metric *basecmd.Metric - RecycleAlloc *nameserver2.GetAllocatedSizeResponse + Rpc []*ListLogicalPoolRpc + Metric *basecmd.Metric + recycleAllocRes *nameserver2.GetAllocatedSizeResponse + logicalPoolInfo []*topology.ListLogicalPoolResponse + totalCapacity uint64 + allocatedSize uint64 + recycleAllocSize uint64 } var _ basecmd.FinalCurveCmdFunc = (*LogicalPoolCommand)(nil) // check interface @@ -124,14 +126,15 @@ func (lCmd *LogicalPoolCommand) Init(cmd *cobra.Command, args []string) error { lCmd.TableNew.SetAutoMergeCellsByColumnIndex(cobrautil.GetIndexSlice( lCmd.Header, []string{cobrautil.ROW_PHYPOOL}, )) - lCmd.Cmd.Flags().String(config.CURVEBS_PATH, RECYCLEBINDIR, "file path") + lCmd.Cmd.Flags().String(config.CURVEBS_PATH, cobrautil.RECYCLEBIN_PATH, "file path") lCmd.Cmd.Flag(config.CURVEBS_PATH).Changed = true lCmd.Metric = basecmd.NewMetric(mdsAddrs, "", timeout) res, err := file.GetAllocatedSize(lCmd.Cmd) if err.TypeCode() != cmderror.CODE_SUCCESS { return err.ToError() } - lCmd.RecycleAlloc = res + lCmd.recycleAllocRes = res + lCmd.recycleAllocSize = res.GetAllocatedSize() return nil } @@ -154,7 +157,11 @@ func (lCmd *LogicalPoolCommand) RunCommand(cmd *cobra.Command, args []string) er rows := make([]map[string]string, 0) var errors []*cmderror.CmdError for _, res := range results { + if res == nil { + continue + } infos := res.(*topology.ListLogicalPoolResponse) + lCmd.logicalPoolInfo = append(lCmd.logicalPoolInfo, infos) for _, loPoolInfo := range infos.GetLogicalPoolInfos() { row := make(map[string]string) row[cobrautil.ROW_ID] = fmt.Sprintf("%d", loPoolInfo.GetLogicalPoolID()) @@ -163,52 +170,30 @@ func (lCmd *LogicalPoolCommand) RunCommand(cmd *cobra.Command, args []string) er row[cobrautil.ROW_TYPE] = loPoolInfo.GetType().String() row[cobrautil.ROW_ALLOC] = loPoolInfo.GetAllocateStatus().String() row[cobrautil.ROW_SCAN] = fmt.Sprintf("%t", loPoolInfo.GetScanEnable()) - + total := uint64(0) // capacity metricName := cobrautil.GetPoolLogicalCapacitySubUri(loPoolInfo.GetLogicalPoolName()) - lCmd.Metric.SubUri = metricName - metric, err := basecmd.QueryMetric(lCmd.Metric) + value, err := lCmd.queryMetric(metricName) if err.TypeCode() != cmderror.CODE_SUCCESS { errors = append(errors, err) - } else { - valueStr, err := basecmd.GetMetricValue(metric) - if err.TypeCode() != cmderror.CODE_SUCCESS { - errors = append(errors, err) - } - value, errP := strconv.ParseUint(valueStr, 10, 64) - if errP != nil { - pErr := cmderror.ErrParse() - pErr.Format(metricName, pErr) - errors = append(errors, pErr) - } - row[cobrautil.ROW_TOTAL] = humanize.IBytes(value) - total = value } + row[cobrautil.ROW_TOTAL] = humanize.IBytes(value) + total = value + lCmd.totalCapacity += value // alloc size metricName = cobrautil.GetPoolLogicalAllocSubUri(loPoolInfo.GetLogicalPoolName()) - lCmd.Metric.SubUri = metricName - metric, err = basecmd.QueryMetric(lCmd.Metric) + value, err = lCmd.queryMetric(metricName) if err.TypeCode() != cmderror.CODE_SUCCESS { errors = append(errors, err) - } else { - valueStr, err := basecmd.GetMetricValue(metric) - if err.TypeCode() != cmderror.CODE_SUCCESS { - errors = append(errors, err) - } - value, errP := strconv.ParseUint(valueStr, 10, 64) - if errP != nil { - pErr := cmderror.ErrParse() - pErr.Format(metricName, pErr) - errors = append(errors, pErr) - } - row[cobrautil.ROW_USED] = humanize.IBytes(value) - row[cobrautil.ROW_LEFT] = humanize.IBytes(total - value) } + row[cobrautil.ROW_USED] = humanize.IBytes(value) + row[cobrautil.ROW_LEFT] = humanize.IBytes(total - value) + lCmd.allocatedSize += value // recycle - recycle := lCmd.RecycleAlloc.AllocSizeMap[loPoolInfo.GetLogicalPoolID()] + recycle := lCmd.recycleAllocRes.AllocSizeMap[loPoolInfo.GetLogicalPoolID()] row[cobrautil.ROW_RECYCLE] = humanize.IBytes(recycle) rows = append(rows, row) } @@ -218,10 +203,48 @@ func (lCmd *LogicalPoolCommand) RunCommand(cmd *cobra.Command, args []string) er }) lCmd.TableNew.AppendBulk(list) errRet := cmderror.MergeCmdError(errors) - lCmd.Error = &errRet + lCmd.Error = errRet + lCmd.Result = rows return nil } func (lCmd *LogicalPoolCommand) ResultPlainOutput() error { return output.FinalCmdOutputPlain(&lCmd.FinalCurveCmd) } + +func (lCmd *LogicalPoolCommand) queryMetric(metricName string) (uint64, *cmderror.CmdError) { + lCmd.Metric.SubUri = metricName + metric, err := basecmd.QueryMetric(lCmd.Metric) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return 0, err + } else { + valueStr, err := basecmd.GetMetricValue(metric) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return 0, err + } + value, errP := strconv.ParseUint(valueStr, 10, 64) + if errP != nil { + pErr := cmderror.ErrParse() + pErr.Format(metricName, pErr) + return 0, err + } + return value, cmderror.Success() + } +} + +func ListLogicalPoolInfoAndAllocSize(caller *cobra.Command) ([]*topology.ListLogicalPoolResponse, uint64, uint64, uint64, *cmderror.CmdError) { + listCmd := NewListLogicalPoolCommand() + config.AlignFlagsValue(caller, listCmd.Cmd, []string{ + config.CURVEBS_MDSADDR, config.RPCRETRYTIMES, config.RPCTIMEOUT, + }) + listCmd.Cmd.SilenceErrors = true + listCmd.Cmd.SilenceUsage = true + listCmd.Cmd.SetArgs([]string{fmt.Sprintf("--%s", config.FORMAT), config.FORMAT_NOOUT}) + err := listCmd.Cmd.Execute() + if err != nil { + retErr := cmderror.ErrBsListLogicalPoolInfo() + retErr.Format(err.Error()) + return nil, 0, 0, 0, retErr + } + return listCmd.logicalPoolInfo, listCmd.totalCapacity, listCmd.allocatedSize, listCmd.recycleAllocSize, cmderror.Success() +} diff --git a/tools-v2/pkg/cli/command/curvebs/list/scanstatus/scanstatus.go b/tools-v2/pkg/cli/command/curvebs/list/scanstatus/scanstatus.go new file mode 100644 index 0000000000..ef47434b7d --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/list/scanstatus/scanstatus.go @@ -0,0 +1,120 @@ +/* +* Copyright (c) 2023 NetEase Inc. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ +/* +* Project: curve +* Created Date: 2023-05-18 +* Author: lianzhanbiao + */ + +package scanstatus + +import ( + "fmt" + "time" + + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/list/copyset" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/opencurve/curve/tools-v2/proto/proto/common" + "github.com/spf13/cobra" +) + +const ( + copysetExample = `$ curve bs list scan-status` +) + +type ScanStatusCommand struct { + CopysetInfoList []*common.CopysetInfo + basecmd.FinalCurveCmd +} + +var _ basecmd.FinalCurveCmdFunc = (*ScanStatusCommand)(nil) // check interface + +func NewListScanStatusCommand() *ScanStatusCommand { + scanStatusCommand := &ScanStatusCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{ + Use: "scan-status", + Short: "list curvebs all copyset that scanning is false", + Example: copysetExample, + }, + } + + basecmd.NewFinalCurveCli(&scanStatusCommand.FinalCurveCmd, scanStatusCommand) + return scanStatusCommand +} + +func NewScanStatusCommand() *cobra.Command { + return NewListScanStatusCommand().Cmd +} + +func (sCmd *ScanStatusCommand) AddFlags() { + config.AddRpcTimeoutFlag(sCmd.Cmd) + config.AddRpcRetryTimesFlag(sCmd.Cmd) + config.AddBsMdsFlagOption(sCmd.Cmd) +} + +func (sCmd *ScanStatusCommand) Init(cmd *cobra.Command, args []string) error { + header := []string{ + cobrautil.ROW_LOGICALPOOL, + cobrautil.ROW_COPYSET_ID, + cobrautil.ROW_SCAN, + cobrautil.ROW_LASTSCAN, + cobrautil.ROW_LAST_SCAN_CONSISTENT, + } + sCmd.SetHeader(header) + sCmd.TableNew.SetAutoMergeCellsByColumnIndex(cobrautil.GetIndexSlice( + sCmd.Header, []string{cobrautil.ROW_COPYSET_ID, cobrautil.ROW_POOL_ID}, + )) + return nil +} + +func (sCmd *ScanStatusCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&sCmd.FinalCurveCmd, sCmd) +} + +func (sCmd *ScanStatusCommand) RunCommand(cmd *cobra.Command, args []string) error { + config.AddBsFilterOptionFlag(sCmd.Cmd) + sCmd.Cmd.ParseFlags([]string{fmt.Sprintf("--%s=%s", config.CURVEBS_FIlTER, cobrautil.FALSE_STRING)}) + copysetInfoList, err := copyset.GetCopySetsInCluster(cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + sCmd.CopysetInfoList = copysetInfoList + rows := make([]map[string]string, 0) + for _, info := range sCmd.CopysetInfoList { + row := make(map[string]string) + row[cobrautil.ROW_LOGICALPOOL] = fmt.Sprintf("%d", info.GetLogicalPoolId()) + row[cobrautil.ROW_COPYSET_ID] = fmt.Sprintf("%d", info.GetCopysetId()) + row[cobrautil.ROW_SCAN] = fmt.Sprintf("%v", info.GetScaning()) + row[cobrautil.ROW_LASTSCAN] = fmt.Sprintf("%v", time.Unix(int64(info.GetLastScanSec()), 0)) + row[cobrautil.ROW_LAST_SCAN_CONSISTENT] = fmt.Sprintf("%v", info.GetLastScanConsistent()) + rows = append(rows, row) + } + list := cobrautil.ListMap2ListSortByKeys(rows, sCmd.Header, []string{ + cobrautil.ROW_LOGICALPOOL, + cobrautil.ROW_COPYSET_ID, + }) + sCmd.TableNew.AppendBulk(list) + sCmd.Error = cmderror.ErrSuccess() + return nil +} + +func (sCmd *ScanStatusCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&sCmd.FinalCurveCmd) +} diff --git a/tools-v2/pkg/cli/command/curvebs/list/server/server.go b/tools-v2/pkg/cli/command/curvebs/list/server/server.go index 2c0796b1ef..a659edf452 100644 --- a/tools-v2/pkg/cli/command/curvebs/list/server/server.go +++ b/tools-v2/pkg/cli/command/curvebs/list/server/server.go @@ -51,7 +51,8 @@ var _ basecmd.RpcFunc = (*ListServerRpc)(nil) // check interface type ServerCommand struct { basecmd.FinalCurveCmd - Rpc []*ListServerRpc + Rpc []*ListServerRpc + Servers []*topology.ServerInfo } var _ basecmd.FinalCurveCmdFunc = (*ServerCommand)(nil) // check interface @@ -108,7 +109,7 @@ func (pCmd *ServerCommand) Init(cmd *cobra.Command, args []string) error { } pCmd.Rpc = append(pCmd.Rpc, rpc) } - header := []string{cobrautil.ROW_ID, cobrautil.ROW_HOSTNAME, + header := []string{cobrautil.ROW_ID, cobrautil.ROW_HOSTNAME, cobrautil.ROW_ZONE, cobrautil.ROW_PHYPOOL, cobrautil.ROW_INTERNAL_ADDR, cobrautil.ROW_EXTERNAL_ADDR, } @@ -124,20 +125,23 @@ func (pCmd *ServerCommand) Print(cmd *cobra.Command, args []string) error { } func (pCmd *ServerCommand) RunCommand(cmd *cobra.Command, args []string) error { - var infos []*basecmd.Rpc + var rpcs []*basecmd.Rpc var funcs []basecmd.RpcFunc for _, rpc := range pCmd.Rpc { - infos = append(infos, rpc.Info) + rpcs = append(rpcs, rpc.Info) funcs = append(funcs, rpc) } - results, errs := basecmd.GetRpcListResponse(infos, funcs) - if len(errs) == len(infos) { + results, errs := basecmd.GetRpcListResponse(rpcs, funcs) + if len(errs) == len(rpcs) { mergeErr := cmderror.MergeCmdErrorExceptSuccess(errs) return mergeErr.ToError() } var errors []*cmderror.CmdError rows := make([]map[string]string, 0) for _, res := range results { + if res == nil { + continue + } infos := res.(*topology.ListZoneServerResponse).GetServerInfo() for _, info := range infos { row := make(map[string]string) @@ -145,19 +149,20 @@ func (pCmd *ServerCommand) RunCommand(cmd *cobra.Command, args []string) error { row[cobrautil.ROW_HOSTNAME] = info.GetHostName() row[cobrautil.ROW_ZONE] = fmt.Sprintf("%d", info.GetZoneID()) row[cobrautil.ROW_PHYPOOL] = fmt.Sprintf("%d", info.GetPhysicalPoolID()) - row[cobrautil.ROW_INTERNAL_ADDR] = fmt.Sprintf("%s:%d", + row[cobrautil.ROW_INTERNAL_ADDR] = fmt.Sprintf("%s:%d", info.GetInternalIp(), info.GetInternalPort()) row[cobrautil.ROW_EXTERNAL_ADDR] = fmt.Sprintf("%s:%d", info.GetExternalIp(), info.GetExternalPort()) rows = append(rows, row) } + pCmd.Servers = append(pCmd.Servers, infos...) } - list := cobrautil.ListMap2ListSortByKeys(rows, pCmd.Header, []string { + list := cobrautil.ListMap2ListSortByKeys(rows, pCmd.Header, []string{ cobrautil.ROW_PHYPOOL, cobrautil.ROW_ZONE, }) pCmd.TableNew.AppendBulk(list) errRet := cmderror.MergeCmdError(errors) - pCmd.Error = &errRet + pCmd.Error = errRet pCmd.Result = results return nil } @@ -165,3 +170,19 @@ func (pCmd *ServerCommand) RunCommand(cmd *cobra.Command, args []string) error { func (pCmd *ServerCommand) ResultPlainOutput() error { return output.FinalCmdOutputPlain(&pCmd.FinalCurveCmd) } + +func ListServer(caller *cobra.Command) ([]*topology.ServerInfo, *cmderror.CmdError) { + listServer := NewListServerCommand() + listServer.Cmd.SetArgs([]string{"--format", config.FORMAT_NOOUT}) + listServer.Cmd.SilenceErrors = true + config.AlignFlagsValue(caller, listServer.Cmd, []string{ + config.CURVEBS_MDSADDR, + }) + err := listServer.Cmd.Execute() + if err != nil { + retErr := cmderror.ErrBsListZone() + retErr.Format(err.Error()) + return listServer.Servers, retErr + } + return listServer.Servers, cmderror.ErrSuccess() +} diff --git a/tools-v2/pkg/cli/command/curvebs/list/space/space.go b/tools-v2/pkg/cli/command/curvebs/list/space/space.go new file mode 100644 index 0000000000..9e7f03a1bb --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/list/space/space.go @@ -0,0 +1,189 @@ +/* +* Copyright (c) 2023 NetEase Inc. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ +/* +* Project: curve +* Created Date: 2023-04-17 +* Author: chengyi01 + */ +package space + +import ( + "fmt" + "strconv" + + "github.com/dustin/go-humanize" + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + logicalpool "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/list/logicalPool" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/query/file" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/opencurve/curve/tools-v2/proto/proto/topology" + "github.com/spf13/cobra" +) + +const ( + serverExample = `$ curve bs list space` +) + +type SpaceCommand struct { + basecmd.FinalCurveCmd + Metric *basecmd.Metric + TotalChunkSize uint64 // total chunk size + UsedChunkSize uint64 // used chunk size + TotalCapacity uint64 // total capacity + AllocatedSize uint64 // total allocated size + RecycleAllocSize uint64 // recycle allocated size + CurrentFileSize uint64 // root dir size +} + +var _ basecmd.FinalCurveCmdFunc = (*SpaceCommand)(nil) // check interface + +func NewSpaceCommand() *cobra.Command { + return NewListSpaceCommand().Cmd +} + +func NewListSpaceCommand() *SpaceCommand { + lsCmd := &SpaceCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{ + Use: "space", + Short: "show curvebs all disk type space, include total space and used space", + Example: serverExample, + }, + } + + basecmd.NewFinalCurveCli(&lsCmd.FinalCurveCmd, lsCmd) + return lsCmd +} + +func (sCmd *SpaceCommand) AddFlags() { + config.AddHttpTimeoutFlag(sCmd.Cmd) + config.AddBsMdsFlagOption(sCmd.Cmd) + config.AddRpcRetryTimesFlag(sCmd.Cmd) + config.AddRpcTimeoutFlag(sCmd.Cmd) +} + +func (sCmd *SpaceCommand) Init(cmd *cobra.Command, args []string) error { + logicalRes, totalCapacity, allocatedSize, recycleAllocSize, err := logicalpool.ListLogicalPoolInfoAndAllocSize(sCmd.Cmd) + sCmd.TotalCapacity = totalCapacity + sCmd.AllocatedSize = allocatedSize + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + logicalPoolInfos := make([]*topology.LogicalPoolInfo, 0) + for _, info := range logicalRes { + logicalPoolInfos = append(logicalPoolInfos, info.GetLogicalPoolInfos()...) + } + sCmd.RecycleAllocSize = recycleAllocSize + + sCmd.TotalChunkSize = 0 + timeout := config.GetFlagDuration(sCmd.Cmd, config.HTTPTIMEOUT) + mainAddrs, addrErr := config.GetBsMdsAddrSlice(sCmd.Cmd) + if addrErr.TypeCode() != cmderror.CODE_SUCCESS { + return addrErr.ToError() + } + sCmd.Metric = basecmd.NewMetric(mainAddrs, "", timeout) + for _, lgPool := range logicalPoolInfos { + // total chunk size + metricName := cobrautil.GetPoolTotalChunkSizeName(lgPool.GetLogicalPoolName()) + value, err := sCmd.queryMetric(metricName) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + sCmd.TotalChunkSize += value + + // used chunk size + metricName = cobrautil.GetPoolUsedChunkSizeName(lgPool.GetLogicalPoolName()) + value, err = sCmd.queryMetric(metricName) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + sCmd.UsedChunkSize += value + } + + config.AddBsPathRequiredFlag(sCmd.Cmd) + sCmd.Cmd.ParseFlags([]string{ + fmt.Sprintf("--%s", config.CURVEBS_PATH), + cobrautil.ROOT_PATH, + }) + + rootSizeRes, err := file.GetFileSize(sCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + sCmd.CurrentFileSize = rootSizeRes.GetFileSize() + + sCmd.SetHeader([]string{cobrautil.ROW_TYPE, cobrautil.ROW_TOTAL, cobrautil.ROW_USED, + cobrautil.ROW_LEFT, cobrautil.ROW_RECYCLABLE, cobrautil.ROW_CREATED, + }) + + return nil +} + +func (sCmd *SpaceCommand) queryMetric(metricName string) (uint64, *cmderror.CmdError) { + sCmd.Metric.SubUri = metricName + metric, err := basecmd.QueryMetric(sCmd.Metric) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return 0, err + } else { + valueStr, err := basecmd.GetMetricValue(metric) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return 0, err + } + value, errP := strconv.ParseUint(valueStr, 10, 64) + if errP != nil { + pErr := cmderror.ErrParse() + pErr.Format(metricName, pErr) + return 0, err + } + return value, cmderror.Success() + } +} + +func (sCmd *SpaceCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&sCmd.FinalCurveCmd, sCmd) +} + +func (sCmd *SpaceCommand) RunCommand(cmd *cobra.Command, args []string) error { + rows := make([]map[string]string, 0) + row := make(map[string]string) + row[cobrautil.ROW_TYPE] = cobrautil.ROW_VALUE_PHYSICAL + row[cobrautil.ROW_TOTAL] = humanize.IBytes(sCmd.TotalChunkSize) + row[cobrautil.ROW_USED] = humanize.IBytes(sCmd.UsedChunkSize) + row[cobrautil.ROW_LEFT] = humanize.IBytes(sCmd.TotalChunkSize - sCmd.UsedChunkSize) + row[cobrautil.ROW_RECYCLABLE] = cobrautil.ROW_VALUE_NO_VALUE + row[cobrautil.ROW_CREATED] = cobrautil.ROW_VALUE_NO_VALUE + rows = append(rows, row) + + row = make(map[string]string) + row[cobrautil.ROW_TYPE] = cobrautil.ROW_VALUE_LOGICAL + row[cobrautil.ROW_TOTAL] = humanize.IBytes(sCmd.TotalCapacity) + row[cobrautil.ROW_USED] = humanize.IBytes(sCmd.AllocatedSize) + row[cobrautil.ROW_LEFT] = humanize.IBytes(sCmd.TotalCapacity - sCmd.AllocatedSize) + row[cobrautil.ROW_RECYCLABLE] = humanize.IBytes(sCmd.RecycleAllocSize) + row[cobrautil.ROW_CREATED] = humanize.IBytes(sCmd.CurrentFileSize) + rows = append(rows, row) + list := cobrautil.ListMap2ListSortByKeys(rows, sCmd.Header, []string{}) + sCmd.TableNew.AppendBulk(list) + sCmd.Error = cmderror.Success() + sCmd.Result = rows + return nil +} + +func (sCmd *SpaceCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&sCmd.FinalCurveCmd) +} diff --git a/tools-v2/pkg/cli/command/curvebs/list/unavailcopysets/unavailcopysets.go b/tools-v2/pkg/cli/command/curvebs/list/unavailcopysets/unavailcopysets.go new file mode 100644 index 0000000000..f9e9f5a103 --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/list/unavailcopysets/unavailcopysets.go @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: CurveCli + * Created Date: 2023-04-24 + * Author: baytan + */ + +package unavailcopysets + +import ( + "context" + + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + common "github.com/opencurve/curve/tools-v2/proto/proto/common" + "github.com/opencurve/curve/tools-v2/proto/proto/topology" + "github.com/opencurve/curve/tools-v2/proto/proto/topology/statuscode" + "github.com/spf13/cobra" + "google.golang.org/grpc" +) + +type ListUnAvailCopySets struct { + Info *basecmd.Rpc + Request *topology.ListUnAvailCopySetsRequest + topologyClient topology.TopologyServiceClient +} + +var _ basecmd.RpcFunc = (*ListUnAvailCopySets)(nil) // check interface + +func (lRpc *ListUnAvailCopySets) NewRpcClient(cc grpc.ClientConnInterface) { + lRpc.topologyClient = topology.NewTopologyServiceClient(cc) +} + +func (lRpc *ListUnAvailCopySets) Stub_Func(ctx context.Context) (interface{}, error) { + return lRpc.topologyClient.ListUnAvailCopySets(ctx, lRpc.Request) +} + +type UnAvailCopySetsCommand struct { + basecmd.FinalCurveCmd + Rpc *ListUnAvailCopySets + response []*common.CopysetInfo +} + +var _ basecmd.FinalCurveCmdFunc = (*UnAvailCopySetsCommand)(nil) // check interface + +func NewUnAvailCopySetsCommand() *cobra.Command { + return NewListUnAvailCopySetsCommand().Cmd +} + +func NewListUnAvailCopySetsCommand() *UnAvailCopySetsCommand { + uCmd := &UnAvailCopySetsCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{}, + } + + basecmd.NewFinalCurveCli(&uCmd.FinalCurveCmd, uCmd) + return uCmd +} + +func (uCmd *UnAvailCopySetsCommand) AddFlags() { + config.AddBsMdsFlagOption(uCmd.Cmd) + config.AddRpcRetryTimesFlag(uCmd.Cmd) + config.AddRpcTimeoutFlag(uCmd.Cmd) +} + +func (uCmd *UnAvailCopySetsCommand) Init(cmd *cobra.Command, args []string) error { + mdsAddrs, err := config.GetBsMdsAddrSlice(uCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + timeout := config.GetFlagDuration(uCmd.Cmd, config.RPCTIMEOUT) + retrytimes := config.GetFlagInt32(uCmd.Cmd, config.RPCRETRYTIMES) + uCmd.Rpc = &ListUnAvailCopySets{ + Request: &topology.ListUnAvailCopySetsRequest{}, + Info: basecmd.NewRpc(mdsAddrs, timeout, retrytimes, "ListUnAvailCopySets"), + } + return nil +} + +func (uCmd *UnAvailCopySetsCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&uCmd.FinalCurveCmd, uCmd) +} + +func (uCmd *UnAvailCopySetsCommand) RunCommand(cmd *cobra.Command, args []string) error { + result, errCmd := basecmd.GetRpcResponse(uCmd.Rpc.Info, uCmd.Rpc) + if errCmd.TypeCode() != cmderror.CODE_SUCCESS { + return errCmd.ToError() + } + response := result.(*topology.ListUnAvailCopySetsResponse) + if response.StatusCode == nil || + response.GetStatusCode() != int32(statuscode.TopoStatusCode_Success) { + code := statuscode.TopoStatusCode(response.GetStatusCode()) + return cmderror.ErrBsListPhysicalPoolRpc(code).ToError() + } + uCmd.response = response.Copysets + return nil +} + +func (uCmd *UnAvailCopySetsCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&uCmd.FinalCurveCmd) +} + +func GetUnAvailCopySets(caller *cobra.Command) ([]*common.CopysetInfo, *cmderror.CmdError) { + listUnAvailCopySets := NewListUnAvailCopySetsCommand() + listUnAvailCopySets.Cmd.SetArgs([]string{"--format", config.FORMAT_NOOUT}) + listUnAvailCopySets.Cmd.SilenceErrors = true + config.AlignFlagsValue(caller, listUnAvailCopySets.Cmd, []string{ + config.RPCRETRYTIMES, config.RPCTIMEOUT, config.CURVEBS_MDSADDR, + }) + err := listUnAvailCopySets.Cmd.Execute() + if err != nil { + retErr := cmderror.ErrBsGetUnavailCopysets() + retErr.Format(err.Error()) + return nil, retErr + } + return listUnAvailCopySets.response, cmderror.ErrSuccess() +} diff --git a/tools-v2/pkg/cli/command/curvebs/list/zone/zone.go b/tools-v2/pkg/cli/command/curvebs/list/zone/zone.go index 5fcfe4f741..ca7e31e0c1 100644 --- a/tools-v2/pkg/cli/command/curvebs/list/zone/zone.go +++ b/tools-v2/pkg/cli/command/curvebs/list/zone/zone.go @@ -120,8 +120,10 @@ func (pCmd *PoolZoneCommand) RunCommand(cmd *cobra.Command, args []string) error mergeErr := cmderror.MergeCmdErrorExceptSuccess(errs) return mergeErr.ToError() } - var errors []*cmderror.CmdError for _, res := range results { + if res == nil { + continue + } info := res.(*topology.ListPoolZoneResponse) if info.GetStatusCode() != int32(statuscode.TopoStatusCode_Success) { err := cmderror.ErrBsListPoolZoneRpc( @@ -133,8 +135,8 @@ func (pCmd *PoolZoneCommand) RunCommand(cmd *cobra.Command, args []string) error zones := info.GetZones() pCmd.Zones = append(pCmd.Zones, zones...) } - errRet := cmderror.MergeCmdError(errors) - pCmd.Error = &errRet + errRet := cmderror.MergeCmdError(errs) + pCmd.Error = errRet return nil } diff --git a/tools-v2/pkg/cli/command/curvebs/query/chunk/chunk.go b/tools-v2/pkg/cli/command/curvebs/query/chunk/chunk.go new file mode 100644 index 0000000000..2a48ccdee9 --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/query/chunk/chunk.go @@ -0,0 +1,139 @@ +/* +* Copyright (c) 2023 NetEase Inc. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ +/* +* Project: curve +* Created Date: 2023-04-12 +* Author: chengyi01 + */ + +package chunk + +import ( + "fmt" + "strings" + + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/query/chunkserver" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/opencurve/curve/tools-v2/proto/proto/common" + "github.com/opencurve/curve/tools-v2/proto/proto/nameserver2" + "github.com/spf13/cobra" +) + +type ChunkCommand struct { + basecmd.FinalCurveCmd + FileInfo *nameserver2.FileInfo + ChunkId uint64 + LogicalpoolId uint32 + CopysetId uint32 + GroupId uint64 + ChunkServerList []*common.ChunkServerLocation +} + +var _ basecmd.FinalCurveCmdFunc = (*ChunkCommand)(nil) // check interface + +const ( + chunkExample = `$ curve bs query chunk --path /pagefile --offset 1024` +) + +func NewQueryChunkCommand() *ChunkCommand { + chunkCmd := &ChunkCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{ + Use: "chunk", + Short: "query the location of the chunk corresponding to the offset", + Example: chunkExample, + }, + } + + basecmd.NewFinalCurveCli(&chunkCmd.FinalCurveCmd, chunkCmd) + return chunkCmd +} + +func NewChunkCommand() *cobra.Command { + return NewQueryChunkCommand().Cmd +} + +func (cCmd *ChunkCommand) AddFlags() { + config.AddBsMdsFlagOption(cCmd.Cmd) + config.AddRpcRetryTimesFlag(cCmd.Cmd) + config.AddRpcTimeoutFlag(cCmd.Cmd) + config.AddBsPathRequiredFlag(cCmd.Cmd) + config.AddBsOffsetRequiredFlag(cCmd.Cmd) + config.AddBsUserOptionFlag(cCmd.Cmd) + config.AddBsPasswordOptionFlag(cCmd.Cmd) +} + +func (cCmd *ChunkCommand) Init(cmd *cobra.Command, args []string) error { + var err *cmderror.CmdError + cCmd.ChunkId, cCmd.LogicalpoolId, cCmd.CopysetId, err = QueryChunkCopyset(cCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + config.AddBsLogicalPoolIdRequiredFlag(cCmd.Cmd) + config.AddBsCopysetIdSliceRequiredFlag(cCmd.Cmd) + cCmd.Cmd.ParseFlags([]string{ + fmt.Sprintf("--%s", config.CURVEBS_LOGIC_POOL_ID), + fmt.Sprintf("%d", cCmd.LogicalpoolId), + fmt.Sprintf("--%s", config.CURVEBS_COPYSET_ID), + fmt.Sprintf("%d", cCmd.CopysetId), + }) + key2Location, err := chunkserver.GetChunkServerListInCopySets(cCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + key := cobrautil.GetCopysetKey(uint64(cCmd.LogicalpoolId), uint64(cCmd.CopysetId)) + cCmd.ChunkServerList = (*key2Location)[key] + header := []string{cobrautil.ROW_CHUNK, cobrautil.ROW_LOGICALPOOL, + cobrautil.ROW_COPYSET, cobrautil.ROW_GROUP, cobrautil.ROW_LOCATION, + } + cCmd.SetHeader(header) + cCmd.TableNew.SetAutoMergeCellsByColumnIndex( + cobrautil.GetIndexSlice(header, []string{cobrautil.ROW_CHUNK, + cobrautil.ROW_LOGICALPOOL, cobrautil.ROW_COPYSET, cobrautil.ROW_GROUP, + }), + ) + + return nil +} + +func (cCmd *ChunkCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&cCmd.FinalCurveCmd, cCmd) +} + +func (cCmd *ChunkCommand) RunCommand(cmd *cobra.Command, args []string) error { + cCmd.GroupId = (uint64(cCmd.LogicalpoolId) << 32) | uint64(cCmd.CopysetId) + locations := []string{} + for _, chunkServer := range cCmd.ChunkServerList { + locations = append(locations, fmt.Sprintf("%s:%d", chunkServer.GetHostIp(), chunkServer.GetPort())) + } + location := strings.Join(locations, "\n") + row := make(map[string]string) + row[cobrautil.ROW_CHUNK] = fmt.Sprintf("%d", cCmd.ChunkId) + row[cobrautil.ROW_LOGICALPOOL] = fmt.Sprintf("%d", cCmd.LogicalpoolId) + row[cobrautil.ROW_COPYSET] = fmt.Sprintf("%d", cCmd.CopysetId) + row[cobrautil.ROW_GROUP] = fmt.Sprintf("%d", cCmd.GroupId) + row[cobrautil.ROW_LOCATION] = location + list := cobrautil.Map2List(row, cCmd.Header) + cCmd.TableNew.Append(list) + return nil +} + +func (cCmd *ChunkCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&cCmd.FinalCurveCmd) +} diff --git a/tools-v2/pkg/cli/command/curvebs/query/chunk/chunkinfo.go b/tools-v2/pkg/cli/command/curvebs/query/chunk/chunkinfo.go new file mode 100644 index 0000000000..dfe3d451c7 --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/query/chunk/chunkinfo.go @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: tools-v2 + * Created Date: 2023-04-24 + * Author: baytan0720 + */ + +package chunk + +import ( + "context" + "fmt" + + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/opencurve/curve/tools-v2/proto/proto/chunk" + "github.com/spf13/cobra" + "google.golang.org/grpc" +) + +type GetChunkInfoRpc struct { + Info *basecmd.Rpc + Request *chunk.GetChunkInfoRequest + mdsClient chunk.ChunkServiceClient +} + +var _ basecmd.RpcFunc = (*GetChunkInfoRpc)(nil) // check interface + +func (gRpc *GetChunkInfoRpc) NewRpcClient(cc grpc.ClientConnInterface) { + gRpc.mdsClient = chunk.NewChunkServiceClient(cc) +} + +func (gRpc *GetChunkInfoRpc) Stub_Func(ctx context.Context) (interface{}, error) { + return gRpc.mdsClient.GetChunkInfo(ctx, gRpc.Request) +} + +type GetChunkInfoCommand struct { + basecmd.FinalCurveCmd + Rpc []*GetChunkInfoRpc + addr2Chunk *map[string]*chunk.GetChunkInfoResponse +} + +var _ basecmd.FinalCurveCmdFunc = (*GetChunkInfoCommand)(nil) // check interface + +func NewChunkInfoCommand() *cobra.Command { + return NewGetChunkInfoCommand().Cmd +} + +func NewGetChunkInfoCommand() *GetChunkInfoCommand { + chunkCmd := &GetChunkInfoCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{}, + } + basecmd.NewFinalCurveCli(&chunkCmd.FinalCurveCmd, chunkCmd) + return chunkCmd +} + +func (cCmd *GetChunkInfoCommand) Init(cmd *cobra.Command, args []string) error { + timeout := config.GetFlagDuration(cCmd.Cmd, config.RPCTIMEOUT) + retrytimes := config.GetFlagInt32(cCmd.Cmd, config.RPCRETRYTIMES) + + logicalpoolidList := config.GetBsFlagStringSlice(cCmd.Cmd, config.CURVEBS_LOGIC_POOL_ID) + copysetidList := config.GetBsFlagStringSlice(cCmd.Cmd, config.CURVEBS_COPYSET_ID) + chunkidList := config.GetBsFlagStringSlice(cCmd.Cmd, config.CURVEBS_CHUNK_ID) + addressList := config.GetBsFlagStringSlice(cCmd.Cmd, config.CURVEBS_CHUNKSERVER_ADDRESS) + if len(copysetidList) != len(logicalpoolidList) || len(copysetidList) != len(chunkidList) || len(copysetidList) != len(addressList) { + return fmt.Errorf("copysetid, logicalpoolid, chunkid, address length not equal") + } + logicalpoolIds, errParse := cobrautil.StringList2Uint32List(logicalpoolidList) + if errParse != nil { + return fmt.Errorf("parse logicalpoolid %v fail", logicalpoolidList) + } + copysetIds, errParse := cobrautil.StringList2Uint32List(copysetidList) + if errParse != nil { + return fmt.Errorf("parse copysetid %v fail", copysetidList) + } + chunkIds, errParse := cobrautil.StringList2Uint64List(chunkidList) + if errParse != nil { + return fmt.Errorf("parse chunkid %v fail", chunkidList) + } + for i := 0; i < len(copysetIds); i++ { + rpc := &GetChunkInfoRpc{ + Request: &chunk.GetChunkInfoRequest{ + LogicPoolId: &logicalpoolIds[i], + CopysetId: ©setIds[i], + ChunkId: &chunkIds[i], + }, + Info: basecmd.NewRpc([]string{addressList[i]}, timeout, retrytimes, "GetChunkInfo"), + } + cCmd.Rpc = append(cCmd.Rpc, rpc) + } + return nil +} + +func (cCmd *GetChunkInfoCommand) RunCommand(cmd *cobra.Command, args []string) error { + var infos []*basecmd.Rpc + var funcs []basecmd.RpcFunc + for _, rpc := range cCmd.Rpc { + infos = append(infos, rpc.Info) + funcs = append(funcs, rpc) + } + results, errs := basecmd.GetRpcListResponse(infos, funcs) + if len(errs) == len(infos) { + mergeErr := cmderror.MergeCmdErrorExceptSuccess(errs) + return mergeErr.ToError() + } + addr2Chunk := make(map[string]*chunk.GetChunkInfoResponse) + cCmd.addr2Chunk = &addr2Chunk + for i, result := range results { + if resp, ok := result.(*chunk.GetChunkInfoResponse); ok { + (*cCmd.addr2Chunk)[cCmd.Rpc[i].Info.Addrs[0]] = resp + } else { + (*cCmd.addr2Chunk)[cCmd.Rpc[i].Info.Addrs[0]] = nil + } + } + return nil +} + +func (cCmd *GetChunkInfoCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&cCmd.FinalCurveCmd, cCmd) +} + +func (cCmd *GetChunkInfoCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&cCmd.FinalCurveCmd) +} + +func (cCmd *GetChunkInfoCommand) AddFlags() { + config.AddBsMdsFlagOption(cCmd.Cmd) + config.AddRpcRetryTimesFlag(cCmd.Cmd) + config.AddRpcTimeoutFlag(cCmd.Cmd) + + config.AddBsCopysetIdSliceRequiredFlag(cCmd.Cmd) + config.AddBsLogicalPoolIdSliceRequiredFlag(cCmd.Cmd) + config.AddBsChunkIdSliceRequiredFlag(cCmd.Cmd) + config.AddBsChunkServerAddressSliceRequiredFlag(cCmd.Cmd) +} + +func GetChunkInfo(caller *cobra.Command) (*map[string]*chunk.GetChunkInfoResponse, *cmderror.CmdError) { + sCmd := NewGetChunkInfoCommand() + config.AlignFlagsValue(caller, sCmd.Cmd, []string{ + config.RPCRETRYTIMES, config.RPCTIMEOUT, config.CURVEBS_MDSADDR, + config.CURVEBS_LOGIC_POOL_ID, config.CURVEBS_COPYSET_ID, config.CURVEBS_CHUNK_ID, config.CURVEBS_CHUNKSERVER_ADDRESS, + }) + sCmd.Cmd.SilenceErrors = true + sCmd.Cmd.SilenceUsage = true + sCmd.Cmd.SetArgs([]string{"--format", config.FORMAT_NOOUT}) + err := sCmd.Cmd.Execute() + if err != nil { + retErr := cmderror.ErrBsGetChunkInfo() + retErr.Format(err.Error()) + return nil, retErr + } + return sCmd.addr2Chunk, cmderror.Success() +} diff --git a/tools-v2/pkg/cli/command/curvebs/query/chunk/copyset.go b/tools-v2/pkg/cli/command/curvebs/query/chunk/copyset.go new file mode 100644 index 0000000000..8bbedde990 --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/query/chunk/copyset.go @@ -0,0 +1,137 @@ +/* +* Copyright (c) 2023 NetEase Inc. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ +/* +* Project: curve +* Created Date: 2023-04-12 +* Author: chengyi01 + */ + +package chunk + +import ( + "fmt" + + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/query/file" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/query/seginfo" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/opencurve/curve/tools-v2/proto/proto/nameserver2" + "github.com/spf13/cobra" +) + +type ChunkCopysetCommand struct { + basecmd.FinalCurveCmd + FileInfo *nameserver2.FileInfo + ChunkId uint64 + LogicalpoolId uint32 + CopysetId uint32 +} + +var _ basecmd.FinalCurveCmdFunc = (*ChunkCopysetCommand)(nil) // check interface + +func NewQueryChunkCopysetCommand() *ChunkCopysetCommand { + chunkCmd := &ChunkCopysetCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{}, + } + + basecmd.NewFinalCurveCli(&chunkCmd.FinalCurveCmd, chunkCmd) + return chunkCmd +} + +func NewChunkCopysetCommand() *cobra.Command { + return NewQueryChunkCopysetCommand().Cmd +} + +func (cCmd *ChunkCopysetCommand) AddFlags() { + config.AddBsMdsFlagOption(cCmd.Cmd) + config.AddRpcRetryTimesFlag(cCmd.Cmd) + config.AddRpcTimeoutFlag(cCmd.Cmd) + config.AddBsPathRequiredFlag(cCmd.Cmd) + config.AddBsOffsetRequiredFlag(cCmd.Cmd) + config.AddBsUserOptionFlag(cCmd.Cmd) + config.AddBsPasswordOptionFlag(cCmd.Cmd) +} + +func (cCmd *ChunkCopysetCommand) Init(cmd *cobra.Command, args []string) error { + fileInfoResponse, err := file.GetFileInfo(cCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + cCmd.FileInfo = fileInfoResponse.GetFileInfo() + if cCmd.FileInfo.GetFileType() != nameserver2.FileType_INODE_PAGEFILE { + filepath := config.GetBsFlagString(cCmd.Cmd, config.CURVEBS_PATH) + return fmt.Errorf("file %s is not a pagefile", filepath) + } + return nil +} + +func (cCmd *ChunkCopysetCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&cCmd.FinalCurveCmd, cCmd) +} + +func (cCmd *ChunkCopysetCommand) RunCommand(cmd *cobra.Command, args []string) error { + segmentSize := uint64(cCmd.FileInfo.GetSegmentSize()) + offset := config.GetBsFlagUint64(cCmd.Cmd, config.CURVEBS_OFFSET) + segOffset := (offset / segmentSize) * segmentSize + + cCmd.Cmd.ParseFlags([]string{ + fmt.Sprintf("--%s", config.CURVEBS_OFFSET), fmt.Sprintf("%d", segOffset), + }) + segmentRes, err := seginfo.GetSegment(cCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + segment := segmentRes.GetPageFileSegment() + chunkSize := segment.GetChunkSize() + if chunkSize == 0 { + return fmt.Errorf("no chunks in segment") + } + chunkIndex := (offset - segOffset) / uint64(chunkSize) + chunks := segment.GetChunks() + if chunkIndex >= uint64(len(chunks)) { + return fmt.Errorf("chunkIndex exceed chunks num in segment") + } + chunk := chunks[chunkIndex] + cCmd.ChunkId = chunk.GetChunkID() + cCmd.LogicalpoolId = segment.GetLogicalPoolID() + cCmd.CopysetId = chunk.GetCopysetID() + return nil +} + +func (cCmd *ChunkCopysetCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&cCmd.FinalCurveCmd) +} + +// return chunkId, logicalpoolId, copysetId, err +func QueryChunkCopyset(caller *cobra.Command) (uint64, uint32, uint32, *cmderror.CmdError) { + queryCmd := NewQueryChunkCopysetCommand() + config.AlignFlagsValue(caller, queryCmd.Cmd, []string{ + config.RPCRETRYTIMES, config.RPCTIMEOUT, config.CURVEBS_MDSADDR, + config.CURVEBS_PATH, config.CURVEBS_USER, config.CURVEBS_PASSWORD, config.CURVEBS_OFFSET, + }) + queryCmd.Cmd.SilenceErrors = true + queryCmd.Cmd.SilenceUsage = true + queryCmd.Cmd.SetArgs([]string{"--format", config.FORMAT_NOOUT}) + err := queryCmd.Cmd.Execute() + if err != nil { + retErr := cmderror.ErrBsGetChunkCopyset() + retErr.Format(err.Error()) + return 0, 0, 0, retErr + } + return queryCmd.ChunkId, queryCmd.LogicalpoolId, queryCmd.CopysetId, cmderror.Success() +} diff --git a/tools-v2/pkg/cli/command/curvebs/query/chunkserver/chunkserver.go b/tools-v2/pkg/cli/command/curvebs/query/chunkserver/chunkserver.go new file mode 100644 index 0000000000..3ffd9d4d96 --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/query/chunkserver/chunkserver.go @@ -0,0 +1,188 @@ +/* +* Copyright (c) 2023 NetEase Inc. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ +/* +* Project: curve +* Created Date: 2023-04-12 +* Author: chengyi01 + */ + +package chunkserver + +import ( + "context" + "fmt" + + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/opencurve/curve/tools-v2/proto/proto/common" + "github.com/opencurve/curve/tools-v2/proto/proto/topology" + "github.com/opencurve/curve/tools-v2/proto/proto/topology/statuscode" + "github.com/spf13/cobra" + "google.golang.org/grpc" +) + +type GetChunkServerListRpc struct { + Info *basecmd.Rpc + Request *topology.GetChunkServerListInCopySetsRequest + mdsClient topology.TopologyServiceClient +} + +var _ basecmd.RpcFunc = (*GetChunkServerListRpc)(nil) // check interface + +func (gRpc *GetChunkServerListRpc) NewRpcClient(cc grpc.ClientConnInterface) { + gRpc.mdsClient = topology.NewTopologyServiceClient(cc) +} + +func (gRpc *GetChunkServerListRpc) Stub_Func(ctx context.Context) (interface{}, error) { + return gRpc.mdsClient.GetChunkServerListInCopySets(ctx, gRpc.Request) +} + +type ChunkServerListInCoysetCommand struct { + basecmd.FinalCurveCmd + Rpc []*GetChunkServerListRpc + key2Location *map[uint64][]*common.ChunkServerLocation +} + +var _ basecmd.FinalCurveCmdFunc = (*ChunkServerListInCoysetCommand)(nil) // check interface + +func NewQueryChunkServerListCommand() *ChunkServerListInCoysetCommand { + chunkCmd := &ChunkServerListInCoysetCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{}, + } + + basecmd.NewFinalCurveCli(&chunkCmd.FinalCurveCmd, chunkCmd) + return chunkCmd +} + +func NewChunkServerListCommand() *cobra.Command { + return NewQueryChunkServerListCommand().Cmd +} + +func (cCmd *ChunkServerListInCoysetCommand) AddFlags() { + config.AddBsMdsFlagOption(cCmd.Cmd) + config.AddRpcRetryTimesFlag(cCmd.Cmd) + config.AddRpcTimeoutFlag(cCmd.Cmd) + config.AddBsLogicalPoolIdSliceRequiredFlag(cCmd.Cmd) + config.AddBsCopysetIdSliceRequiredFlag(cCmd.Cmd) +} + +func (cCmd *ChunkServerListInCoysetCommand) Init(cmd *cobra.Command, args []string) error { + mdsAddrs, err := config.GetBsMdsAddrSlice(cCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + timeout := config.GetFlagDuration(cCmd.Cmd, config.RPCTIMEOUT) + retrytimes := config.GetFlagInt32(cCmd.Cmd, config.RPCRETRYTIMES) + logicalpoolidList := config.GetBsFlagStringSlice(cCmd.Cmd, config.CURVEBS_LOGIC_POOL_ID) + copysetidList := config.GetBsFlagStringSlice(cCmd.Cmd, config.CURVEBS_COPYSET_ID) + if len(logicalpoolidList) != len(copysetidList) { + return fmt.Errorf("logicalpoolidList and copysetidList length not equal") + } + logicalpoolIds, errParse := cobrautil.StringList2Uint64List(logicalpoolidList) + if errParse != nil { + return fmt.Errorf("Parse logicalpoolid", logicalpoolidList, " fail!") + } + copysetIds, errParse := cobrautil.StringList2Uint64List(copysetidList) + if errParse != nil { + return fmt.Errorf("Parse copysetid", copysetidList, " fail!") + } + logicalpool2copysets := make(map[uint32][]uint32) + for i := 0; i < len(logicalpoolidList); i++ { + lpid := logicalpoolIds[i] + cpid := copysetIds[i] + logicalpool2copysets[uint32(lpid)] = append(logicalpool2copysets[uint32(lpid)], uint32(cpid)) + } + + for logicalpoolId, copysetIds := range logicalpool2copysets { + // for get pointer + logicalpoolId := logicalpoolId + cCmd.Rpc = append(cCmd.Rpc, &GetChunkServerListRpc{ + Info: basecmd.NewRpc(mdsAddrs, timeout, retrytimes, "GetFileInfo"), + Request: &topology.GetChunkServerListInCopySetsRequest{ + LogicalPoolId: &logicalpoolId, + CopysetId: copysetIds, + }, + }) + } + return nil +} + +func (cCmd *ChunkServerListInCoysetCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&cCmd.FinalCurveCmd, cCmd) +} + +func (cCmd *ChunkServerListInCoysetCommand) RunCommand(cmd *cobra.Command, args []string) error { + var infos []*basecmd.Rpc + var funcs []basecmd.RpcFunc + for _, rpc := range cCmd.Rpc { + infos = append(infos, rpc.Info) + funcs = append(funcs, rpc) + } + results, errs := basecmd.GetRpcListResponse(infos, funcs) + if len(errs) == len(infos) { + mergeErr := cmderror.MergeCmdErrorExceptSuccess(errs) + return mergeErr.ToError() + } + + key2Location := make(map[uint64][]*common.ChunkServerLocation) + cCmd.key2Location = &key2Location + for i, result := range results { + logicalpoolid := cCmd.Rpc[i].Request.GetLogicalPoolId() + copysetids := cCmd.Rpc[i].Request.GetCopysetId() + response := result.(*topology.GetChunkServerListInCopySetsResponse) + if response.GetStatusCode() != int32(statuscode.TopoStatusCode_Success) { + err := cmderror.ErrGetChunkServerListInCopySets(statuscode.TopoStatusCode(response.GetStatusCode()), + logicalpoolid, copysetids) + errs = append(errs, err) + continue + } + for _, info := range response.CsInfo { + key := cobrautil.GetCopysetKey(uint64(logicalpoolid), uint64(*info.CopysetId)) + (*cCmd.key2Location)[key] = info.CsLocs + } + } + errRet := cmderror.MergeCmdErrorExceptSuccess(errs) + cCmd.Error = errRet + if errRet.TypeCode() != cmderror.CODE_SUCCESS { + return errRet.ToError() + } + return nil +} + +func (cCmd *ChunkServerListInCoysetCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&cCmd.FinalCurveCmd) +} + +func GetChunkServerListInCopySets(caller *cobra.Command) (*map[uint64][]*common.ChunkServerLocation, *cmderror.CmdError) { + getCmd := NewQueryChunkServerListCommand() + config.AlignFlagsValue(caller, getCmd.Cmd, []string{ + config.RPCRETRYTIMES, config.RPCTIMEOUT, config.CURVEBS_MDSADDR, + config.CURVEBS_LOGIC_POOL_ID, config.CURVEBS_COPYSET_ID, + }) + getCmd.Cmd.SilenceErrors = true + getCmd.Cmd.SilenceUsage = true + getCmd.Cmd.SetArgs([]string{"--format", config.FORMAT_NOOUT}) + err := getCmd.Cmd.Execute() + if err != nil { + retErr := cmderror.ErrBsChunkServerListInCopySets() + retErr.Format(err.Error()) + return nil, retErr + } + return getCmd.key2Location, cmderror.Success() +} diff --git a/tools-v2/pkg/cli/command/curvebs/query/chunkserver/chunkserver_recover.go b/tools-v2/pkg/cli/command/curvebs/query/chunkserver/chunkserver_recover.go new file mode 100644 index 0000000000..64b62c8bf1 --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/query/chunkserver/chunkserver_recover.go @@ -0,0 +1,132 @@ +/* +* Copyright (c) 2023 NetEase Inc. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ +/* +* Project: CurveCli +* Created Date: 2023-05-15 +* Author: chengyi01 + */ +package chunkserver + +import ( + "context" + + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/opencurve/curve/tools-v2/proto/proto/schedule" + "github.com/opencurve/curve/tools-v2/proto/proto/topology/statuscode" + "github.com/spf13/cobra" + "google.golang.org/grpc" +) + +type GetChunkServerRecoverStatusRpc struct { + Info *basecmd.Rpc + Request *schedule.QueryChunkServerRecoverStatusRequest + mdsClient schedule.ScheduleServiceClient +} + +var _ basecmd.RpcFunc = (*GetChunkServerRecoverStatusRpc)(nil) + +func (gRpc *GetChunkServerRecoverStatusRpc) NewRpcClient(cc grpc.ClientConnInterface) { + gRpc.mdsClient = schedule.NewScheduleServiceClient(cc) +} + +func (gRpc *GetChunkServerRecoverStatusRpc) Stub_Func(ctx context.Context) (interface{}, error) { + return gRpc.mdsClient.QueryChunkServerRecoverStatus(ctx, gRpc.Request) +} + +type ChunkServerRecoverStatusCommand struct { + basecmd.FinalCurveCmd + Rpc *GetChunkServerRecoverStatusRpc + RecoverStatusMap map[uint32]bool +} + +var _ basecmd.FinalCurveCmdFunc = (*ChunkServerRecoverStatusCommand)(nil) + +func (csrsCmd *ChunkServerRecoverStatusCommand) Init(cmd *cobra.Command, args []string) error { + mdsAddrs, err := config.GetBsMdsAddrSlice(csrsCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + timeout := config.GetFlagDuration(csrsCmd.Cmd, config.RPCTIMEOUT) + retrytimes := config.GetFlagInt32(csrsCmd.Cmd, config.RPCRETRYTIMES) + + request := schedule.QueryChunkServerRecoverStatusRequest{} + chunkseverIdSlice := config.GetBsChunkServerId(csrsCmd.Cmd) + request.ChunkServerID = append(request.ChunkServerID, chunkseverIdSlice...) + + csrsCmd.Rpc = &GetChunkServerRecoverStatusRpc{ + Request: &request, + Info: basecmd.NewRpc(mdsAddrs, timeout, retrytimes, "QueryChunkServerRecoverStatus"), + } + + return nil +} + +func (csrsCmd *ChunkServerRecoverStatusCommand) RunCommand(cmd *cobra.Command, args []string) error { + result, err := basecmd.GetRpcResponse(csrsCmd.Rpc.Info, csrsCmd.Rpc) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + res := result.(*schedule.QueryChunkServerRecoverStatusResponse) + RetErr := cmderror.ErrBsQueryChunkserverRecoverStatus(statuscode.TopoStatusCode(res.GetStatusCode())) + csrsCmd.RecoverStatusMap = res.GetRecoverStatusMap() + csrsCmd.Result = res + csrsCmd.Error = RetErr + return nil +} + +func (csrsCmd *ChunkServerRecoverStatusCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&csrsCmd.FinalCurveCmd, csrsCmd) +} + +func (csrsCmd *ChunkServerRecoverStatusCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&csrsCmd.FinalCurveCmd) +} + +func (csrsCmd *ChunkServerRecoverStatusCommand) AddFlags() { + config.AddRpcRetryTimesFlag(csrsCmd.Cmd) + config.AddRpcTimeoutFlag(csrsCmd.Cmd) + + config.AddBsMdsFlagOption(csrsCmd.Cmd) + config.AddBsChunkServerIdOptionFlag(csrsCmd.Cmd) +} + +func NewQueryChunkServerRecoverStatusCommand() *ChunkServerRecoverStatusCommand { + csrsCmd := &ChunkServerRecoverStatusCommand{FinalCurveCmd: basecmd.FinalCurveCmd{}} + basecmd.NewFinalCurveCli(&csrsCmd.FinalCurveCmd, csrsCmd) + return csrsCmd +} + +func NewChunkServerRecoverStatusCommand() *cobra.Command { + return NewQueryChunkServerRecoverStatusCommand().Cmd +} + +func GetQueryChunkServerRecoverStatus(caller *cobra.Command) (map[uint32]bool, *cmderror.CmdError) { + getCmd := NewQueryChunkServerRecoverStatusCommand() + config.AlignFlagsValue(caller, getCmd.Cmd, []string{config.CURVEBS_MDSADDR, config.CURVEBS_CHUNKSERVER_ID}) + getCmd.Cmd.SilenceErrors = true + getCmd.Cmd.SilenceUsage = true + getCmd.Cmd.SetArgs([]string{"--format", config.FORMAT_NOOUT}) + err := getCmd.Cmd.Execute() + if err != nil { + retErr := cmderror.ErrBsQueryChunkServerRecoverStatus() + retErr.Format(err.Error()) + return nil, retErr + } + return getCmd.RecoverStatusMap, cmderror.Success() +} diff --git a/tools-v2/pkg/cli/command/curvebs/query/copyset/copyset.go b/tools-v2/pkg/cli/command/curvebs/query/copyset/copyset.go new file mode 100644 index 0000000000..594351b7d7 --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/query/copyset/copyset.go @@ -0,0 +1,174 @@ +/* +* Copyright (c) 2023 NetEase Inc. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ +/* +* Project: curve +* Created Date: 2023-04-26 +* Author: chengyi01 + */ + +package copyset + +import ( + "context" + "fmt" + + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/opencurve/curve/tools-v2/proto/proto/common" + "github.com/opencurve/curve/tools-v2/proto/proto/topology" + "github.com/opencurve/curve/tools-v2/proto/proto/topology/statuscode" + "github.com/spf13/cobra" + "google.golang.org/grpc" +) + +type GetCopysetRpc struct { + Info *basecmd.Rpc + Request *topology.GetCopysetRequest + mdsClient topology.TopologyServiceClient +} + +var _ basecmd.RpcFunc = (*GetCopysetRpc)(nil) // check interface + +func (gRpc *GetCopysetRpc) NewRpcClient(cc grpc.ClientConnInterface) { + gRpc.mdsClient = topology.NewTopologyServiceClient(cc) +} + +func (gRpc *GetCopysetRpc) Stub_Func(ctx context.Context) (interface{}, error) { + return gRpc.mdsClient.GetCopyset(ctx, gRpc.Request) +} + +type CopysetCommand struct { + CopysetInfoList []*common.CopysetInfo + Rpc []*GetCopysetRpc + basecmd.FinalCurveCmd +} + +var _ basecmd.FinalCurveCmdFunc = (*CopysetCommand)(nil) // check interface + +func NewQueryCopysetCommand() *CopysetCommand { + copysetCmd := &CopysetCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{}, + } + + basecmd.NewFinalCurveCli(©setCmd.FinalCurveCmd, copysetCmd) + return copysetCmd +} + +func NewCopysetCommand() *cobra.Command { + return NewQueryCopysetCommand().Cmd +} + +func (cCmd *CopysetCommand) AddFlags() { + config.AddRpcTimeoutFlag(cCmd.Cmd) + config.AddRpcRetryTimesFlag(cCmd.Cmd) + config.AddBsMdsFlagOption(cCmd.Cmd) + config.AddBsCopysetIdSliceRequiredFlag(cCmd.Cmd) + config.AddBsLogicalPoolIdSliceRequiredFlag(cCmd.Cmd) +} + +func (cCmd *CopysetCommand) Init(cmd *cobra.Command, args []string) error { + logicalpoolidList := config.GetBsFlagStringSlice(cCmd.Cmd, config.CURVEBS_LOGIC_POOL_ID) + copysetidList := config.GetBsFlagStringSlice(cCmd.Cmd, config.CURVEBS_COPYSET_ID) + if len(logicalpoolidList) != len(copysetidList) { + return fmt.Errorf("logicalpoolidList and copysetidList length not equal") + } + logicalpoolIds, errParse := cobrautil.StringList2Uint64List(logicalpoolidList) + if errParse != nil { + return fmt.Errorf("parse logicalpoolid%v fail", logicalpoolidList) + } + copysetIds, errParse := cobrautil.StringList2Uint64List(copysetidList) + if errParse != nil { + return fmt.Errorf("parse copysetid%v fail", copysetidList) + } + mdsAddrs, err := config.GetBsMdsAddrSlice(cCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + timeout := config.GetFlagDuration(cCmd.Cmd, config.RPCTIMEOUT) + retrytimes := config.GetFlagInt32(cCmd.Cmd, config.RPCRETRYTIMES) + for i, logicalPool := range logicalpoolIds { + lpId := uint32(logicalPool) + cpId := uint32(copysetIds[i]) + cCmd.Rpc = append(cCmd.Rpc, &GetCopysetRpc{ + Info: basecmd.NewRpc(mdsAddrs, timeout, retrytimes, "GetCopyset"), + Request: &topology.GetCopysetRequest{ + LogicalPoolId: &lpId, + CopysetId: &cpId, + }, + }) + } + + return nil +} + +func (cCmd *CopysetCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&cCmd.FinalCurveCmd, cCmd) +} + +func (cCmd *CopysetCommand) RunCommand(cmd *cobra.Command, args []string) error { + var infos []*basecmd.Rpc + var funcs []basecmd.RpcFunc + for _, rpc := range cCmd.Rpc { + infos = append(infos, rpc.Info) + funcs = append(funcs, rpc) + } + results, errs := basecmd.GetRpcListResponse(infos, funcs) + if len(errs) == len(infos) { + mergeErr := cmderror.MergeCmdErrorExceptSuccess(errs) + return mergeErr.ToError() + } + for i, result := range results { + if result == nil { + continue + } + res := result.(*topology.GetCopysetResponse) + request := cCmd.Rpc[i].Request + err := cmderror.ErrBsGetCopyset(statuscode.TopoStatusCode(res.GetStatusCode()), request.GetLogicalPoolId(), request.GetCopysetId()) + if err.TypeCode() != cmderror.CODE_SUCCESS { + errs = append(errs, err) + } else { + cCmd.CopysetInfoList = append(cCmd.CopysetInfoList, res.GetCopysetInfo()) + } + } + cCmd.Result = cCmd.CopysetInfoList + cCmd.Error = cmderror.MergeCmdErrorExceptSuccess(errs) + return nil +} + +func (cCmd *CopysetCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&cCmd.FinalCurveCmd) +} + +func GetCopyset(caller *cobra.Command) ([]*common.CopysetInfo, *cmderror.CmdError) { + getCmd := NewQueryCopysetCommand() + config.AlignFlagsValue(caller, getCmd.Cmd, []string{ + config.CURVEBS_MDSADDR, config.RPCRETRYTIMES, config.RPCTIMEOUT, + config.CURVEBS_LOGIC_POOL_ID, config.CURVEBS_COPYSET_ID, + }) + getCmd.Cmd.SilenceErrors = true + getCmd.Cmd.SilenceUsage = true + getCmd.Cmd.SetArgs([]string{"--format", config.FORMAT_NOOUT}) + err := getCmd.Cmd.Execute() + if err != nil { + retErr := cmderror.ErrBsGetScanStatus() + retErr.Format(err.Error()) + return getCmd.CopysetInfoList, retErr + } + return getCmd.CopysetInfoList, cmderror.Success() +} diff --git a/tools-v2/pkg/cli/command/curvebs/query/copyset/get_copyset_status.go b/tools-v2/pkg/cli/command/curvebs/query/copyset/get_copyset_status.go new file mode 100644 index 0000000000..377a3f1e8a --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/query/copyset/get_copyset_status.go @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2022 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: tools-v2 + * Created Date: 2023-04-24 + * Author: baytan0720 + */ + +package copyset + +import ( + "context" + + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/opencurve/curve/tools-v2/proto/proto/common" + "github.com/opencurve/curve/tools-v2/proto/proto/copyset" + "github.com/spf13/cobra" + "google.golang.org/grpc" +) + +type GetCopysetStatusRpc struct { + Info *basecmd.Rpc + Request *copyset.CopysetStatusRequest + mdsClient copyset.CopysetServiceClient +} + +var _ basecmd.RpcFunc = (*GetCopysetStatusRpc)(nil) // check interface + +func (gRpc *GetCopysetStatusRpc) NewRpcClient(cc grpc.ClientConnInterface) { + gRpc.mdsClient = copyset.NewCopysetServiceClient(cc) +} + +func (gRpc *GetCopysetStatusRpc) Stub_Func(ctx context.Context) (interface{}, error) { + return gRpc.mdsClient.GetCopysetStatus(ctx, gRpc.Request) +} + +type GetCopysetStatusCommand struct { + basecmd.FinalCurveCmd + Rpc []*GetCopysetStatusRpc + peer2Status *map[string]*copyset.CopysetStatusResponse +} + +var _ basecmd.FinalCurveCmdFunc = (*GetCopysetStatusCommand)(nil) // check interface + +func NewCopysetStatusCommand() *cobra.Command { + return NewGetCopysetStatusCommand().Cmd +} + +func NewGetCopysetStatusCommand() *GetCopysetStatusCommand { + copysetCmd := &GetCopysetStatusCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{}, + } + basecmd.NewFinalCurveCli(©setCmd.FinalCurveCmd, copysetCmd) + return copysetCmd +} + +func (cCmd *GetCopysetStatusCommand) Init(cmd *cobra.Command, args []string) error { + timeout := config.GetFlagDuration(cCmd.Cmd, config.RPCTIMEOUT) + retrytimes := config.GetFlagInt32(cCmd.Cmd, config.RPCRETRYTIMES) + + copysetid := config.GetBsFlagUint32(cCmd.Cmd, config.CURVEBS_COPYSET_ID) + logicalpoolid := config.GetBsFlagUint32(cCmd.Cmd, config.CURVEBS_LOGIC_POOL_ID) + peersAddress := config.GetBsFlagStringSlice(cCmd.Cmd, config.CURVEBS_PEERS_ADDRESS) + queryHash := false + for _, address := range peersAddress { + rpc := &GetCopysetStatusRpc{ + Request: ©set.CopysetStatusRequest{ + CopysetId: ©setid, + LogicPoolId: &logicalpoolid, + Peer: &common.Peer{Address: &address}, + QueryHash: &queryHash, + }, + } + rpc.Info = basecmd.NewRpc([]string{address}, timeout, retrytimes, "GetCopysetStatus") + cCmd.Rpc = append(cCmd.Rpc, rpc) + } + return nil +} + +func (cCmd *GetCopysetStatusCommand) RunCommand(cmd *cobra.Command, args []string) error { + var infos []*basecmd.Rpc + var funcs []basecmd.RpcFunc + for _, rpc := range cCmd.Rpc { + infos = append(infos, rpc.Info) + funcs = append(funcs, rpc) + } + results, errs := basecmd.GetRpcListResponse(infos, funcs) + if len(errs) == len(infos) { + mergeErr := cmderror.MergeCmdErrorExceptSuccess(errs) + return mergeErr.ToError() + } + addr2Status := make(map[string]*copyset.CopysetStatusResponse) + cCmd.peer2Status = &addr2Status + for i, result := range results { + if respone, ok := result.(*copyset.CopysetStatusResponse); ok { + (*cCmd.peer2Status)[infos[i].Addrs[0]] = respone + } else { + (*cCmd.peer2Status)[infos[i].Addrs[0]] = nil + } + } + return nil +} + +func (cCmd *GetCopysetStatusCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&cCmd.FinalCurveCmd, cCmd) +} + +func (cCmd *GetCopysetStatusCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&cCmd.FinalCurveCmd) +} + +func (cCmd *GetCopysetStatusCommand) AddFlags() { + config.AddBsMdsFlagOption(cCmd.Cmd) + config.AddRpcRetryTimesFlag(cCmd.Cmd) + config.AddRpcTimeoutFlag(cCmd.Cmd) + + config.AddBsCopysetIdRequiredFlag(cCmd.Cmd) + config.AddBsLogicalPoolIdRequiredFlag(cCmd.Cmd) + config.AddBsPeersConfFlag(cCmd.Cmd) +} + +func GetCopysetStatus(caller *cobra.Command) (*map[string]*copyset.CopysetStatusResponse, *cmderror.CmdError) { + sCmd := NewGetCopysetStatusCommand() + config.AlignFlagsValue(caller, sCmd.Cmd, []string{ + config.RPCRETRYTIMES, config.RPCTIMEOUT, config.CURVEBS_MDSADDR, + config.CURVEBS_LOGIC_POOL_ID, config.CURVEBS_COPYSET_ID, config.CURVEBS_PEERS_ADDRESS, + }) + sCmd.Cmd.SilenceErrors = true + sCmd.Cmd.SilenceUsage = true + sCmd.Cmd.SetArgs([]string{"--format", config.FORMAT_NOOUT}) + err := sCmd.Cmd.Execute() + if err != nil { + retErr := cmderror.ErrBsGetCopysetStatus() + retErr.Format(err.Error()) + return nil, retErr + } + return sCmd.peer2Status, cmderror.Success() +} diff --git a/tools-v2/pkg/cli/command/curvebs/query/file/fileinfo.go b/tools-v2/pkg/cli/command/curvebs/query/file/fileinfo.go index e3e316a399..bdcd5ef8b9 100644 --- a/tools-v2/pkg/cli/command/curvebs/query/file/fileinfo.go +++ b/tools-v2/pkg/cli/command/curvebs/query/file/fileinfo.go @@ -100,8 +100,6 @@ func (gCmd *GetFileInfoCommand) Init(cmd *cobra.Command, args []string) error { Owner: &owner, Date: &date, } - // fmt.Println("user:", owner) - // fmt.Println("viper:", viper.GetString(config.VIPER_CURVEBS_USER)) password := config.GetBsFlagString(gCmd.Cmd, config.CURVEBS_PASSWORD) if owner == viper.GetString(config.VIPER_CURVEBS_USER) && len(password) !=0 { strSig := cobrautil.GetString2Signature(date, owner) diff --git a/tools-v2/pkg/cli/command/curvebs/query/pool/chunk.go b/tools-v2/pkg/cli/command/curvebs/query/pool/chunk.go new file mode 100644 index 0000000000..581d61d361 --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/query/pool/chunk.go @@ -0,0 +1,74 @@ +/* +* Copyright (c) 2023 NetEase Inc. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ +/* +* Project: curve +* Created Date: 2023-04-18 +* Author: chengyi01 + */ +package pool + +import ( + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/spf13/cobra" +) + +const ( +) + +// get pool total chunk size +type ChunkSizeCommand struct { + basecmd.FinalCurveCmd + metrics []*basecmd.Metric +} + +var _ basecmd.FinalCurveCmdFunc = (*ChunkSizeCommand)(nil) // check interface + +func NewChunkSizeCommand() *cobra.Command { + return NewQueryChunkSizeCommand().Cmd +} + +func NewQueryChunkSizeCommand() *ChunkSizeCommand { + queryCmd := &ChunkSizeCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{}, + } + + basecmd.NewFinalCurveCli(&queryCmd.FinalCurveCmd, queryCmd) + return queryCmd +} + +func (cCmd *ChunkSizeCommand) AddFlags() { + config.AddBsMdsFlagOption(cCmd.Cmd) + config.AddRpcRetryTimesFlag(cCmd.Cmd) + config.AddRpcTimeoutFlag(cCmd.Cmd) +} + +func (cCmd *ChunkSizeCommand) Init(cmd *cobra.Command, args []string) error { + return nil +} + +func (cCmd *ChunkSizeCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&cCmd.FinalCurveCmd, cCmd) +} + +func (cCmd *ChunkSizeCommand) RunCommand(cmd *cobra.Command, args []string) error { + return nil +} + +func (cCmd *ChunkSizeCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&cCmd.FinalCurveCmd) +} diff --git a/tools-v2/pkg/cli/command/curvebs/query/query.go b/tools-v2/pkg/cli/command/curvebs/query/query.go index 0cece0fbeb..c26a35f48a 100644 --- a/tools-v2/pkg/cli/command/curvebs/query/query.go +++ b/tools-v2/pkg/cli/command/curvebs/query/query.go @@ -24,24 +24,30 @@ package query import ( basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/query/chunk" "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/query/file" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/query/scanstatus" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/query/seginfo" "github.com/spf13/cobra" ) -type ListCommand struct { +type QueryCommand struct { basecmd.MidCurveCmd } -var _ basecmd.MidCurveCmdFunc = (*ListCommand)(nil) // check interface +var _ basecmd.MidCurveCmdFunc = (*QueryCommand)(nil) // check interface -func (queryCmd *ListCommand) AddSubCommands() { +func (queryCmd *QueryCommand) AddSubCommands() { queryCmd.Cmd.AddCommand( file.NewFileCommand(), + seginfo.NewSeginfoCommand(), + chunk.NewChunkCommand(), + scanstatus.NewScanStatusCommand(), ) } func NewQueryCommand() *cobra.Command { - queryCmd := &ListCommand{ + queryCmd := &QueryCommand{ basecmd.MidCurveCmd{ Use: "query", Short: "query resources in the curvebs", diff --git a/tools-v2/pkg/cli/command/curvebs/query/scanstatus/scanstatus.go b/tools-v2/pkg/cli/command/curvebs/query/scanstatus/scanstatus.go new file mode 100644 index 0000000000..de3a36c46c --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/query/scanstatus/scanstatus.go @@ -0,0 +1,137 @@ +/* +* Copyright (c) 2023 NetEase Inc. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ +/* +* Project: curve +* Created Date: 2023-05-18 +* Author: lianzhanbiao + */ + +package scanstatus + +import ( + "fmt" + "time" + + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/query/copyset" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/opencurve/curve/tools-v2/proto/proto/common" + "github.com/spf13/cobra" +) + +const ( + scanStatusExample = `$ curve bs query scan-status --copysetid 1 --logicalpoolid 1` +) + +type ScanStatusCommand struct { + CopysetInfoList []*common.CopysetInfo + basecmd.FinalCurveCmd +} + +var _ basecmd.FinalCurveCmdFunc = (*ScanStatusCommand)(nil) // check interface + +func NewQueryScanStatusCommand() *ScanStatusCommand { + scanStatusCmd := &ScanStatusCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{ + Use: "scan-status", + Short: "query ScanStatus info in bs", + Example: scanStatusExample, + }, + } + + basecmd.NewFinalCurveCli(&scanStatusCmd.FinalCurveCmd, scanStatusCmd) + return scanStatusCmd +} + +func NewScanStatusCommand() *cobra.Command { + return NewQueryScanStatusCommand().Cmd +} + +func (sCmd *ScanStatusCommand) AddFlags() { + config.AddBsMdsFlagOption(sCmd.Cmd) + config.AddRpcRetryTimesFlag(sCmd.Cmd) + config.AddRpcTimeoutFlag(sCmd.Cmd) + config.AddBsLogicalPoolIdSliceRequiredFlag(sCmd.Cmd) + config.AddBsCopysetIdSliceRequiredFlag(sCmd.Cmd) +} + +func (sCmd *ScanStatusCommand) Init(cmd *cobra.Command, args []string) error { + mdsAddrs := config.GetBsFlagString(cmd, config.CURVEBS_MDSADDR) + retrytime := config.GetBsFlagInt32(cmd, config.RPCRETRYTIMES) + timeout := config.GetFlagDuration(sCmd.Cmd, config.RPCTIMEOUT) + logicalpoolidList := config.GetBsFlagString(sCmd.Cmd, config.CURVEBS_LOGIC_POOL_ID) + copysetidList := config.GetBsFlagString(sCmd.Cmd, config.CURVEBS_COPYSET_ID) + + sCmd.Cmd.ParseFlags([]string{ + fmt.Sprintf("--%s", config.CURVEBS_MDSADDR), mdsAddrs, + fmt.Sprintf("--%s", config.RPCRETRYTIMES), fmt.Sprintf("%d", retrytime), + fmt.Sprintf("--%s", config.RPCTIMEOUT), fmt.Sprintf("%d", timeout), + fmt.Sprintf("--%s", config.CURVEBS_LOGIC_POOL_ID), logicalpoolidList, + fmt.Sprintf("--%s", config.CURVEBS_COPYSET_ID), copysetidList, + }) + + header := []string{ + cobrautil.ROW_LOGICALPOOL, + cobrautil.ROW_COPYSET_ID, + cobrautil.ROW_SCAN, + cobrautil.ROW_LASTSCAN, + cobrautil.ROW_LAST_SCAN_CONSISTENT, + } + sCmd.SetHeader(header) + sCmd.TableNew.SetAutoMergeCellsByColumnIndex(cobrautil.GetIndexSlice( + sCmd.Header, []string{cobrautil.ROW_COPYSET_ID, cobrautil.ROW_POOL_ID, + cobrautil.ROW_SCAN, + }, + )) + + return nil +} + +func (sCmd *ScanStatusCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&sCmd.FinalCurveCmd, sCmd) +} + +func (sCmd *ScanStatusCommand) RunCommand(cmd *cobra.Command, args []string) error { + copysetInfoList, err := copyset.GetCopyset(cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + sCmd.CopysetInfoList = copysetInfoList + rows := make([]map[string]string, 0) + for _, info := range sCmd.CopysetInfoList { + row := make(map[string]string) + row[cobrautil.ROW_LOGICALPOOL] = fmt.Sprintf("%d", info.GetLogicalPoolId()) + row[cobrautil.ROW_COPYSET_ID] = fmt.Sprintf("%d", info.GetCopysetId()) + row[cobrautil.ROW_SCAN] = fmt.Sprintf("%v", info.GetScaning()) + row[cobrautil.ROW_LASTSCAN] = fmt.Sprintf("%v", time.Unix(int64(info.GetLastScanSec()), 0)) + row[cobrautil.ROW_LAST_SCAN_CONSISTENT] = fmt.Sprintf("%v", info.GetLastScanConsistent()) + rows = append(rows, row) + } + list := cobrautil.ListMap2ListSortByKeys(rows, sCmd.Header, []string{ + cobrautil.ROW_LOGICALPOOL, + cobrautil.ROW_COPYSET_ID, + }) + sCmd.TableNew.AppendBulk(list) + sCmd.Error = cmderror.Success() + return nil +} + +func (sCmd *ScanStatusCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&sCmd.FinalCurveCmd) +} diff --git a/tools-v2/pkg/cli/command/curvebs/query/seginfo/seginfo.go b/tools-v2/pkg/cli/command/curvebs/query/seginfo/seginfo.go new file mode 100644 index 0000000000..30b9da3653 --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/query/seginfo/seginfo.go @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: CurveCli + * Created Date: 2023-04-11 + * Author: chengyi (Cyber-SiKu) + */ + +package seginfo + +import ( + "fmt" + + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/query/file" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/opencurve/curve/tools-v2/proto/proto/nameserver2" + "github.com/spf13/cobra" +) + +const ( + seginfoExample = `$ curve bs query seginfo --path /pagefile` +) + +type SeginfoCommand struct { + basecmd.FinalCurveCmd + FileInfo *nameserver2.FileInfo +} + +var _ basecmd.FinalCurveCmdFunc = (*SeginfoCommand)(nil) // check interface + +func NewQuerySeginfoCommand() *SeginfoCommand { + seginfoCmd := &SeginfoCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{ + Use: "seginfo", + Short: "query the segments info of the file", + Example: seginfoExample, + }, + } + + basecmd.NewFinalCurveCli(&seginfoCmd.FinalCurveCmd, seginfoCmd) + return seginfoCmd +} + +func NewSeginfoCommand() *cobra.Command { + return NewQuerySeginfoCommand().Cmd +} + +func (sCmd *SeginfoCommand) AddFlags() { + config.AddBsMdsFlagOption(sCmd.Cmd) + config.AddRpcRetryTimesFlag(sCmd.Cmd) + config.AddRpcTimeoutFlag(sCmd.Cmd) + config.AddBsPathRequiredFlag(sCmd.Cmd) +} + +func (sCmd *SeginfoCommand) Init(cmd *cobra.Command, args []string) error { + fileInfoResponse, err := file.GetFileInfo(sCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + sCmd.FileInfo = fileInfoResponse.GetFileInfo() + if sCmd.FileInfo.GetFileType() != nameserver2.FileType_INODE_PAGEFILE { + filepath := config.GetBsFlagString(sCmd.Cmd, config.CURVEBS_PATH) + return fmt.Errorf("file %s is not a pagefile", filepath) + } + + header := []string{cobrautil.ROW_LOGICALPOOL, cobrautil.ROW_SEGMENT_SIZE, cobrautil.ROW_CHUNK_SIZE, cobrautil.ROW_START, cobrautil.ROW_COPYSET, cobrautil.ROW_CHUNK} + sCmd.SetHeader(header) + sCmd.TableNew.SetAutoMergeCellsByColumnIndex( + cobrautil.GetIndexSlice(header, []string{ + cobrautil.ROW_LOGICALPOOL, cobrautil.ROW_SEGMENT_SIZE, cobrautil.ROW_CHUNK_SIZE, cobrautil.ROW_START, cobrautil.ROW_COPYSET, + cobrautil.ROW_CHUNK, + }), + ) + return nil +} + +func (sCmd *SeginfoCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&sCmd.FinalCurveCmd, sCmd) +} + +func (sCmd *SeginfoCommand) RunCommand(cmd *cobra.Command, args []string) error { + segmentNum := sCmd.FileInfo.GetLength() / uint64(sCmd.FileInfo.GetSegmentSize()) + segmentSize := sCmd.FileInfo.GetSegmentSize() + var errs []*cmderror.CmdError + var segments []*nameserver2.PageFileSegment + config.AddBsOffsetRequiredFlag(sCmd.Cmd) +TaverSegment: + for i := uint64(0); i < segmentNum; i++ { + sCmd.Cmd.ParseFlags([]string{ + fmt.Sprintf("--%s", config.CURVEBS_OFFSET), fmt.Sprintf("%d", i*uint64(segmentSize)), + }) + segmentRes, err := GetSegment(sCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS && segmentRes.GetStatusCode() != nameserver2.StatusCode_kSegmentNotAllocated { + errs = append(errs, err) + continue + } + switch segmentRes.GetStatusCode() { + case nameserver2.StatusCode_kOK: + segments = append(segments, segmentRes.GetPageFileSegment()) + case nameserver2.StatusCode_kFileNotExists: + // The file is deleted during the query process, clear the segment and return 0 + segments = nil + break TaverSegment + default: + continue + } + } + sCmd.Error = cmderror.MergeCmdErrorExceptSuccess(errs) + sCmd.Result = segments + + var rows []map[string]string + for _, segment := range segments { + for _, chunk := range segment.GetChunks() { + row := make(map[string]string) + row[cobrautil.ROW_LOGICALPOOL] = fmt.Sprintf("%d", segment.GetLogicalPoolID()) + row[cobrautil.ROW_SEGMENT_SIZE] = fmt.Sprintf("%d", segment.GetSegmentSize()) + row[cobrautil.ROW_CHUNK_SIZE] = fmt.Sprintf("%d", segment.GetChunkSize()) + row[cobrautil.ROW_START] = fmt.Sprintf("%d", segment.GetStartOffset()) + row[cobrautil.ROW_COPYSET] = fmt.Sprintf("%d", chunk.GetCopysetID()) + row[cobrautil.ROW_CHUNK] = fmt.Sprintf("%d", chunk.GetChunkID()) + rows = append(rows, row) + } + } + list := cobrautil.ListMap2ListSortByKeys(rows, sCmd.Header, []string{ + cobrautil.ROW_LOGICALPOOL, cobrautil.ROW_SEGMENT_SIZE, cobrautil.ROW_CHUNK_SIZE, cobrautil.ROW_START, cobrautil.ROW_COPYSET, + cobrautil.ROW_CHUNK, + }) + sCmd.TableNew.AppendBulk(list) + return nil +} + +func (sCmd *SeginfoCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&sCmd.FinalCurveCmd) +} diff --git a/tools-v2/pkg/cli/command/curvebs/query/seginfo/segment.go b/tools-v2/pkg/cli/command/curvebs/query/seginfo/segment.go new file mode 100644 index 0000000000..8999b4b47a --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/query/seginfo/segment.go @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: CurveCli + * Created Date: 2023-04-11 + * Author: chengyi (Cyber-SiKu) + */ + +package seginfo + +import ( + "context" + + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/opencurve/curve/tools-v2/proto/proto/nameserver2" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "google.golang.org/grpc" +) + +type GetSegmentRpc struct { + Info *basecmd.Rpc + Request *nameserver2.GetOrAllocateSegmentRequest + mdsClient nameserver2.CurveFSServiceClient +} + +var _ basecmd.RpcFunc = (*GetSegmentRpc)(nil) // check interface + +func (gRpc *GetSegmentRpc) NewRpcClient(cc grpc.ClientConnInterface) { + gRpc.mdsClient = nameserver2.NewCurveFSServiceClient(cc) +} + +func (gRpc *GetSegmentRpc) Stub_Func(ctx context.Context) (interface{}, error) { + return gRpc.mdsClient.GetOrAllocateSegment(ctx, gRpc.Request) +} + +type SegmentCommand struct { + Response *nameserver2.GetOrAllocateSegmentResponse + Rpc *GetSegmentRpc + basecmd.FinalCurveCmd +} + +var _ basecmd.FinalCurveCmdFunc = (*SegmentCommand)(nil) // check interface + +func NewQuerySegmentCommand() *SegmentCommand { + segmentCmd := &SegmentCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{}, + } + + basecmd.NewFinalCurveCli(&segmentCmd.FinalCurveCmd, segmentCmd) + return segmentCmd +} + +func NewSegmentCommand() *cobra.Command { + return NewQuerySegmentCommand().Cmd +} + +func (sCmd *SegmentCommand) AddFlags() { + config.AddBsMdsFlagOption(sCmd.Cmd) + config.AddRpcRetryTimesFlag(sCmd.Cmd) + config.AddRpcTimeoutFlag(sCmd.Cmd) + config.AddBsPathRequiredFlag(sCmd.Cmd) + config.AddBsOffsetRequiredFlag(sCmd.Cmd) + config.AddBsUserOptionFlag(sCmd.Cmd) + config.AddBsPasswordOptionFlag(sCmd.Cmd) +} + +func (sCmd *SegmentCommand) Init(cmd *cobra.Command, args []string) error { + mdsAddrs, err := config.GetBsMdsAddrSlice(sCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + timeout := config.GetFlagDuration(sCmd.Cmd, config.RPCTIMEOUT) + retrytimes := config.GetFlagInt32(sCmd.Cmd, config.RPCRETRYTIMES) + filepath := config.GetBsFlagString(sCmd.Cmd, config.CURVEBS_PATH) + owner := config.GetBsFlagString(sCmd.Cmd, config.CURVEBS_USER) + date, errDat := cobrautil.GetTimeofDayUs() + if errDat.TypeCode() != cmderror.CODE_SUCCESS { + return errDat.ToError() + } + offset := config.GetBsFlagUint64(sCmd.Cmd, config.CURVEBS_OFFSET) + allocate := false + request := nameserver2.GetOrAllocateSegmentRequest{ + FileName: &filepath, + Offset: &offset, + Owner: &owner, + Date: &date, + AllocateIfNotExist: &allocate, + } + password := config.GetBsFlagString(sCmd.Cmd, config.CURVEBS_PASSWORD) + if owner == viper.GetString(config.VIPER_CURVEBS_USER) && len(password) != 0 { + strSig := cobrautil.GetString2Signature(date, owner) + sig := cobrautil.CalcString2Signature(strSig, password) + request.Signature = &sig + } + sCmd.Rpc = &GetSegmentRpc{ + Info: basecmd.NewRpc(mdsAddrs, timeout, retrytimes, "GetOrAllocateSegment"), + Request: &request, + } + return nil +} + +func (sCmd *SegmentCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&sCmd.FinalCurveCmd, sCmd) +} + +func (sCmd *SegmentCommand) RunCommand(cmd *cobra.Command, args []string) error { + result, err := basecmd.GetRpcResponse(sCmd.Rpc.Info, sCmd.Rpc) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + sCmd.Response = result.(*nameserver2.GetOrAllocateSegmentResponse) + if sCmd.Response.GetStatusCode() != nameserver2.StatusCode_kOK { + retErr := cmderror.ErrGetOrAllocateSegment(sCmd.Response.GetStatusCode(), + sCmd.Rpc.Request.GetFileName(), sCmd.Rpc.Request.GetOffset()) + return retErr.ToError() + } + return nil +} + +func (sCmd *SegmentCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&sCmd.FinalCurveCmd) +} + +func GetSegment(caller *cobra.Command) (*nameserver2.GetOrAllocateSegmentResponse, *cmderror.CmdError) { + getCmd := NewQuerySegmentCommand() + config.AlignFlagsValue(caller, getCmd.Cmd, []string{ + config.RPCRETRYTIMES, config.RPCTIMEOUT, config.CURVEBS_MDSADDR, + config.CURVEBS_PATH, config.CURVEBS_USER, config.CURVEBS_PASSWORD, + config.CURVEBS_OFFSET, + }) + getCmd.Cmd.SilenceErrors = true + getCmd.Cmd.SilenceUsage = true + getCmd.Cmd.SetArgs([]string{"--format", config.FORMAT_NOOUT}) + err := getCmd.Cmd.Execute() + if err != nil { + retErr := cmderror.ErrBsCreateFileOrDirectoryType() + retErr.Format(err.Error()) + return getCmd.Response, retErr + } + return getCmd.Response, cmderror.Success() +} diff --git a/tools-v2/pkg/cli/command/curvebs/snapshot/copyset/copyset.go b/tools-v2/pkg/cli/command/curvebs/snapshot/copyset/copyset.go new file mode 100644 index 0000000000..5da636c742 --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/snapshot/copyset/copyset.go @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: CurveCli + * Created Date: 2023-04-28 + * Author: Xinlong-Chen + */ + +package copyset + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" + "google.golang.org/grpc" + + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/delete/peer" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/opencurve/curve/tools-v2/proto/proto/cli2" +) + +const ( + updateExample = `$ curve bs snapshot copyset 127.0.0.0:8200:0 --logicalpoolid=1 --copysetid=1` +) + +type SnapshotRpc struct { + Info *basecmd.Rpc + Request *cli2.SnapshotRequest2 + Client cli2.CliService2Client +} + +func (sRpc *SnapshotRpc) NewRpcClient(cc grpc.ClientConnInterface) { + sRpc.Client = cli2.NewCliService2Client(cc) +} + +func (sRpc *SnapshotRpc) Stub_Func(ctx context.Context) (interface{}, error) { + return sRpc.Client.Snapshot(ctx, sRpc.Request) +} + +type SnapshotOneCommand struct { + basecmd.FinalCurveCmd + + Rpc *SnapshotRpc + Response *cli2.SnapshotResponse2 + row map[string]string +} + +var _ basecmd.FinalCurveCmdFunc = (*SnapshotOneCommand)(nil) // check interface + +// NewCommand ... +func NewSnapshotOneCommand() *cobra.Command { + peerCmd := &SnapshotOneCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{ + Use: "copyset", + Short: "take snapshot for copyset", + Example: updateExample, + }, + } + basecmd.NewFinalCurveCli(&peerCmd.FinalCurveCmd, peerCmd) + return peerCmd.Cmd +} + +func (sCmd *SnapshotOneCommand) AddFlags() { + config.AddRpcRetryTimesFlag(sCmd.Cmd) + config.AddRpcTimeoutFlag(sCmd.Cmd) + + config.AddBsLogicalPoolIdRequiredFlag(sCmd.Cmd) + config.AddBsCopysetIdRequiredFlag(sCmd.Cmd) +} + +func (sCmd *SnapshotOneCommand) Init(cmd *cobra.Command, args []string) error { + sCmd.SetHeader([]string{cobrautil.ROW_PEER, cobrautil.ROW_COPYSET, cobrautil.ROW_RESULT}) + sCmd.TableNew.SetAutoMergeCellsByColumnIndex(cobrautil.GetIndexSlice( + sCmd.Header, []string{}, + )) + + timeout := config.GetFlagDuration(sCmd.Cmd, config.RPCTIMEOUT) + retryTimes := config.GetFlagInt32(sCmd.Cmd, config.RPCRETRYTIMES) + + copysetID := config.GetBsFlagUint32(sCmd.Cmd, config.CURVEBS_COPYSET_ID) + + logicalPoolID := config.GetBsFlagUint32(sCmd.Cmd, config.CURVEBS_LOGIC_POOL_ID) + + // parse peer conf + if len(args) < 1 { + pErr := cmderror.ErrGetPeer() + pErr.Format("should specified the peer address") + return pErr.ToError() + } + snapshotPeer, err := peer.ParsePeer(args[0]) + if err != nil { + return err.ToError() + } + + out := make(map[string]string) + out[cobrautil.ROW_PEER] = fmt.Sprintf("%s:%d", snapshotPeer.GetAddress(), snapshotPeer.GetId()) + out[cobrautil.ROW_COPYSET] = fmt.Sprintf("(%d:%d)", logicalPoolID, copysetID) + sCmd.row = out + + sCmd.Rpc = &SnapshotRpc{ + Info: basecmd.NewRpc([]string{snapshotPeer.GetAddress()}, timeout, retryTimes, "Snapshot"), + Request: &cli2.SnapshotRequest2{ + LogicPoolId: &logicalPoolID, + CopysetId: ©setID, + Peer: snapshotPeer, + }, + } + + return nil +} + +func (sCmd *SnapshotOneCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&sCmd.FinalCurveCmd, sCmd) +} + +func (sCmd *SnapshotOneCommand) RunCommand(cmd *cobra.Command, args []string) error { + response, err := basecmd.GetRpcResponse(sCmd.Rpc.Info, sCmd.Rpc) + sCmd.Error = err + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + + sCmd.row[cobrautil.ROW_RESULT] = "success" + sCmd.Response = response.(*cli2.SnapshotResponse2) + + list := cobrautil.Map2List(sCmd.row, sCmd.Header) + sCmd.TableNew.Append(list) + return nil +} + +func (sCmd *SnapshotOneCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&sCmd.FinalCurveCmd) +} diff --git a/tools-v2/pkg/cli/command/curvebs/snapshot/snapshot.go b/tools-v2/pkg/cli/command/curvebs/snapshot/snapshot.go new file mode 100644 index 0000000000..5d240432b4 --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/snapshot/snapshot.go @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: CurveCli + * Created Date: 2023-04-28 + * Author: Xinlong-Chen + */ + +package snapshot + +import ( + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/snapshot/copyset" + "github.com/spf13/cobra" +) + +type SnapshotCommand struct { + basecmd.MidCurveCmd +} + +var _ basecmd.MidCurveCmdFunc = (*SnapshotCommand)(nil) // check interface + +func (statusCmd *SnapshotCommand) AddSubCommands() { + statusCmd.Cmd.AddCommand( + copyset.NewSnapshotOneCommand(), + ) +} + +func NewSnapshotCommand() *cobra.Command { + statusCmd := &SnapshotCommand{ + basecmd.MidCurveCmd{ + Use: "snapshot", + Short: "take snapshot for curvebs resource", + }, + } + return basecmd.NewMidCurveCli(&statusCmd.MidCurveCmd, statusCmd) +} diff --git a/tools-v2/pkg/cli/command/curvebs/status/chunkserver/chunkserver.go b/tools-v2/pkg/cli/command/curvebs/status/chunkserver/chunkserver.go new file mode 100644 index 0000000000..f2e29c2ad6 --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/status/chunkserver/chunkserver.go @@ -0,0 +1,122 @@ +/* +* Copyright (c) 2023 NetEase Inc. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ +/* +* Project: CurveCli +* Created Date: 2023-05-11 +* Author: chengyi01 + */ +package chunkserver + +import ( + "fmt" + + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + listchunkserver "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/list/chunkserver" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/query/chunkserver" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/opencurve/curve/tools-v2/proto/proto/topology" + "github.com/spf13/cobra" +) + +type ChunkServerCommand struct { + basecmd.FinalCurveCmd + + ChunkServerInfos []*topology.ChunkServerInfo + RecoverStatusMap map[uint32]bool +} + +var _ basecmd.FinalCurveCmdFunc = (*ChunkServerCommand)(nil) + +func (csCmd *ChunkServerCommand) Init(cmd *cobra.Command, args []string) error { + recoverMap, err := chunkserver.GetQueryChunkServerRecoverStatus(csCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + csCmd.RecoverStatusMap = recoverMap + + csInfos, err := listchunkserver.GetChunkServerInCluster(csCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + csCmd.ChunkServerInfos = csInfos + header := []string{ + cobrautil.ROW_EXTERNAL_ADDR, cobrautil.ROW_INTERNAL_ADDR, cobrautil.ROW_VERSION, + cobrautil.ROW_STATUS, cobrautil.ROW_RECOVERING, + } + csCmd.SetHeader(header) + csCmd.TableNew.SetAutoMergeCellsByColumnIndex(cobrautil.GetIndexSlice( + csCmd.Header, []string{cobrautil.ROW_STATUS, cobrautil.ROW_RECOVERING, cobrautil.ROW_VERSION}, + )) + return nil +} + +func (csCmd *ChunkServerCommand) RunCommand(cmd *cobra.Command, args []string) error { + rows := make([]map[string]string, 0) + for _, csInfo := range csCmd.ChunkServerInfos { + row := make(map[string]string) + row[cobrautil.ROW_EXTERNAL_ADDR] = fmt.Sprintf("%s:%d", csInfo.GetExternalIp(), csInfo.GetPort()) + row[cobrautil.ROW_INTERNAL_ADDR] = fmt.Sprintf("%s:%d", csInfo.GetHostIp(), csInfo.GetPort()) + row[cobrautil.ROW_VERSION] = csInfo.GetVersion() + state := csInfo.GetOnlineState() + if state == topology.OnlineState_ONLINE { + row[cobrautil.ROW_STATUS] = cobrautil.ROW_VALUE_ONLINE + } else { + row[cobrautil.ROW_STATUS] = cobrautil.ROW_VALUE_OFFLINE + } + row[cobrautil.ROW_RECOVERING] = fmt.Sprintf("%v",csCmd.RecoverStatusMap[csInfo.GetChunkServerID()]) + rows = append(rows, row) + } + list := cobrautil.ListMap2ListSortByKeys(rows, csCmd.Header, []string{ + cobrautil.ROW_STATUS, cobrautil.ROW_RECOVERING, cobrautil.ROW_VERSION, + }) + csCmd.TableNew.AppendBulk(list) + csCmd.Result = rows + csCmd.Error = cmderror.Success() + return nil +} + +func (csCmd *ChunkServerCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&csCmd.FinalCurveCmd, csCmd) +} + +func (csCmd *ChunkServerCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&csCmd.FinalCurveCmd) +} + +func (csCmd *ChunkServerCommand) AddFlags() { + config.AddRpcTimeoutFlag(csCmd.Cmd) + config.AddRpcRetryTimesFlag(csCmd.Cmd) + + config.AddBsMdsFlagOption(csCmd.Cmd) +} + +func NewStatusChunkServerCommand() *ChunkServerCommand { + csCmd := &ChunkServerCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{ + Use: "chunkserver", + Short: "get status of chunkserver", + }, + } + basecmd.NewFinalCurveCli(&csCmd.FinalCurveCmd, csCmd) + return csCmd +} + +func NewChunkServerCommand() *cobra.Command { + return NewStatusChunkServerCommand().Cmd +} diff --git a/tools-v2/pkg/cli/command/curvebs/status/client/client.go b/tools-v2/pkg/cli/command/curvebs/status/client/client.go new file mode 100644 index 0000000000..e056090224 --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/status/client/client.go @@ -0,0 +1,240 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: CurveCli + * Created Date: 2023-04-27 + * Author: Xinlong-Chen + */ + +package client + +import ( + "strconv" + "strings" + + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/list/client" + config "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "golang.org/x/exp/slices" +) + +const ( + clientExample = `$ curve bs status client` +) + +type ClientCommand struct { + basecmd.FinalCurveCmd + metrics []*basecmd.Metric + rows []map[string]string +} + +const ( + PROCESS_CMD_SUBURI = "/vars/process_cmdline" + VERSION_SUBURI = "/vars/curve_version" +) + +const ( + PROCESS_CMD_KEY = "process" + VERSION_KEY = "version" + COUNT_KEY = "count" +) + +const ( + kProcessNebdServer string = "nebd-server" + kProcessQemu string = "qemu" + kProcessPython string = "python" + kProcessOther string = "other" +) + +var _ basecmd.FinalCurveCmdFunc = (*ClientCommand)(nil) // check interface + +func NewClientCommand() *cobra.Command { + return NewStatusClientCommand().Cmd +} + +func (cCmd *ClientCommand) AddFlags() { + config.AddBsMdsFlagOption(cCmd.Cmd) + config.AddRpcRetryTimesFlag(cCmd.Cmd) + config.AddRpcTimeoutFlag(cCmd.Cmd) + config.AddHttpTimeoutFlag(cCmd.Cmd) +} + +func (cCmd *ClientCommand) Init(cmd *cobra.Command, args []string) error { + header := []string{cobrautil.ROW_TYPE, cobrautil.ROW_VERSION, cobrautil.ROW_ADDR, cobrautil.ROW_NUM} + cCmd.SetHeader(header) + cCmd.TableNew.SetAutoMergeCellsByColumnIndex(cobrautil.GetIndexSlice( + cCmd.Header, []string{cobrautil.ROW_TYPE, cobrautil.ROW_VERSION, cobrautil.ROW_NUM}, + )) + + // get client list + results, err := client.GetClientList(cmd) + if err.Code != cmderror.CODE_SUCCESS { + return err.ToError() + } + + if len((*results).([]map[string]string)) == 0 { + retErr := cmderror.ErrBsGetClientStatus() + retErr.Format("Client List is null!") + return retErr.ToError() + } + + clientAddr := make([]string, 0) + for _, res := range (*results).([]map[string]string) { + clientAddr = append(clientAddr, res[cobrautil.ROW_IP]+":"+res[cobrautil.ROW_PORT]) + } + + // Init RPC + // Split client lists to different process + // count version for each process + for _, addr := range clientAddr { + timeout := viper.GetDuration(config.VIPER_GLOBALE_HTTPTIMEOUT) + + addrs := []string{addr} + statusMetric := basecmd.NewMetric(addrs, PROCESS_CMD_SUBURI, timeout) + cCmd.metrics = append(cCmd.metrics, statusMetric) + versionMetric := basecmd.NewMetric(addrs, VERSION_SUBURI, timeout) + cCmd.metrics = append(cCmd.metrics, versionMetric) + } + + return nil +} + +func (cCmd *ClientCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&cCmd.FinalCurveCmd, cCmd) +} + +func (cCmd *ClientCommand) RunCommand(cmd *cobra.Command, args []string) error { + // run metrics request + results := make(chan basecmd.MetricResult, config.MaxChannelSize()) + size := 0 + for _, metric := range cCmd.metrics { + size++ + go func(m *basecmd.Metric) { + result, err := basecmd.QueryMetric(m) + var key string + if m.SubUri == PROCESS_CMD_SUBURI { + key = PROCESS_CMD_KEY + } else { + key = VERSION_KEY + } + var value string + if err.TypeCode() == cmderror.CODE_SUCCESS { + value, err = basecmd.GetMetricValue(result) + if m.SubUri == PROCESS_CMD_SUBURI { + value = cCmd.GetProcessNameFromProcessResp(&value) + } + } + results <- basecmd.MetricResult{ + Addr: m.Addrs[0], + Key: key, + Value: value, + Err: err, + } + }(metric) + } + + clientsInfo := map[string]map[string]string{} + count := 0 + var errs []*cmderror.CmdError + var recordAddrs []string + for res := range results { + if res.Err.TypeCode() != cmderror.CODE_SUCCESS { + index := slices.Index(recordAddrs, res.Addr) + if index == -1 { + errs = append(errs, res.Err) + recordAddrs = append(recordAddrs, res.Addr) + } + } else if _, ok := clientsInfo[res.Addr]; ok { + clientsInfo[res.Addr][res.Key] = res.Value + } else { + clientsInfo[res.Addr] = make(map[string]string) + clientsInfo[res.Addr][res.Key] = res.Value + } + count++ + if count >= size { + break + } + } + + // process type => version => addrs + clientsAddrs := map[string]map[string][]string{} + for addr, mp := range clientsInfo { + if _, ok := clientsAddrs[mp[PROCESS_CMD_KEY]]; !ok { + clientsAddrs[mp[PROCESS_CMD_KEY]] = make(map[string][]string) + } + if _, ok := clientsAddrs[mp[PROCESS_CMD_KEY]][mp[VERSION_KEY]]; !ok { + clientsAddrs[mp[PROCESS_CMD_KEY]][mp[VERSION_KEY]] = make([]string, 0) + } + clientsAddrs[mp[PROCESS_CMD_KEY]][mp[VERSION_KEY]] = append(clientsAddrs[mp[PROCESS_CMD_KEY]][mp[VERSION_KEY]], addr) + } + + for process_type, mp := range clientsAddrs { + for version, addrs := range mp { + for _, addr := range addrs { + row := make(map[string]string) + row[cobrautil.ROW_TYPE] = process_type + row[cobrautil.ROW_VERSION] = version + row[cobrautil.ROW_ADDR] = addr + row[cobrautil.ROW_NUM] = strconv.Itoa(len(addrs)) + cCmd.rows = append(cCmd.rows, row) + } + } + } + + mergeErr := cmderror.MergeCmdErrorExceptSuccess(errs) + cCmd.Error = mergeErr + list := cobrautil.ListMap2ListSortByKeys(cCmd.rows, cCmd.Header, []string{ + cobrautil.ROW_TYPE, cobrautil.ROW_VERSION, cobrautil.ROW_ADDR, + }) + cCmd.TableNew.AppendBulk(list) + cCmd.Result = cCmd.rows + + return nil +} + +func (cCmd *ClientCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&cCmd.FinalCurveCmd) +} + +func (cCmd *ClientCommand) GetProcessNameFromProcessResp(process_resp *string) string { + if find := strings.Contains(*process_resp, kProcessNebdServer); find { + return kProcessNebdServer + } else if find := strings.Contains(*process_resp, kProcessPython); find { + return kProcessPython + } else if find := strings.Contains(*process_resp, kProcessQemu); find { + return kProcessQemu + } else { + return kProcessOther + } +} + +func NewStatusClientCommand() *ClientCommand { + clientCmd := &ClientCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{ + Use: "client", + Short: "get status of client", + Example: clientExample, + }, + } + basecmd.NewFinalCurveCli(&clientCmd.FinalCurveCmd, clientCmd) + return clientCmd +} diff --git a/tools-v2/pkg/cli/command/curvebs/status/copyset/copyset.go b/tools-v2/pkg/cli/command/curvebs/status/copyset/copyset.go new file mode 100644 index 0000000000..a04022b361 --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/status/copyset/copyset.go @@ -0,0 +1,106 @@ +/* +* Copyright (c) 2023 NetEase Inc. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ +/* +* Project: CurveCli +* Created Date: 2023-05-18 +* Author: chengyi01 + */ + +package copyset + +import ( + "fmt" + "strings" + + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + checkcopyset "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/check/copyset" + clustercopyset "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/list/copyset" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/spf13/cobra" +) + +type CopysetCommand struct { + basecmd.FinalCurveCmd +} + +var _ basecmd.FinalCurveCmdFunc = (*CopysetCommand)(nil) // check interface + +func NewCopysetCommand() *cobra.Command { + return NewStatusCopysetCommand().Cmd +} + +func (cCmd *CopysetCommand) AddFlags() { + config.AddRpcTimeoutFlag(cCmd.Cmd) + config.AddRpcRetryTimesFlag(cCmd.Cmd) + config.AddBsMdsFlagOption(cCmd.Cmd) + config.AddBsMarginOptionFlag(cCmd.Cmd) +} + +func (cCmd *CopysetCommand) Init(cmd *cobra.Command, args []string) error { + config.AddBsFilterOptionFlag(cCmd.Cmd) + cCmd.Cmd.ParseFlags([]string{fmt.Sprintf("--%s=%s", config.CURVEBS_FIlTER, cobrautil.FALSE_STRING)}) + copysetsInfo, err := clustercopyset.GetCopySetsInCluster(cCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + var copysetIds []string + var poolIds []string + for _, info := range copysetsInfo { + copysetIds = append(copysetIds, fmt.Sprintf("%d", info.GetCopysetId())) + poolIds = append(poolIds, fmt.Sprintf("%d", info.GetLogicalPoolId())) + } + config.AddBsCopysetIdSliceRequiredFlag(cCmd.Cmd) + config.AddBsLogicalPoolIdSliceRequiredFlag(cCmd.Cmd) + cCmd.Cmd.ParseFlags([]string{ + fmt.Sprintf("--%s", config.CURVEBS_COPYSET_ID), strings.Join(copysetIds, ","), + fmt.Sprintf("--%s", config.CURVEBS_LOGIC_POOL_ID), strings.Join(poolIds, ","), + }) + + checkRes, err := checkcopyset.GetCopysetsStatus(cCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + cCmd.TableNew = checkRes + cCmd.Error = err + + return nil +} + +func (cCmd *CopysetCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&cCmd.FinalCurveCmd, cCmd) +} + +func (cCmd *CopysetCommand) RunCommand(cmd *cobra.Command, args []string) error { + return nil +} + +func (cCmd *CopysetCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&cCmd.FinalCurveCmd) +} + +func NewStatusCopysetCommand() *CopysetCommand { + copysetCmd := &CopysetCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{ + Use: "copyset", + Short: "get status of copysets in cluster", + }, + } + basecmd.NewFinalCurveCli(©setCmd.FinalCurveCmd, copysetCmd) + return copysetCmd +} diff --git a/tools-v2/pkg/cli/command/curvebs/status/etcd/etcd.go b/tools-v2/pkg/cli/command/curvebs/status/etcd/etcd.go index 8729309d2a..8301e2447c 100644 --- a/tools-v2/pkg/cli/command/curvebs/status/etcd/etcd.go +++ b/tools-v2/pkg/cli/command/curvebs/status/etcd/etcd.go @@ -25,7 +25,6 @@ package etcd import ( "fmt" - "github.com/olekukonko/tablewriter" cmderror "github.com/opencurve/curve/tools-v2/internal/error" cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" @@ -68,12 +67,13 @@ func NewEtcdCommand() *cobra.Command { } func (eCmd *EtcdCommand) AddFlags() { - config.AddBsEtcdAddrFlag(eCmd.Cmd) config.AddHttpTimeoutFlag(eCmd.Cmd) + config.AddBsEtcdAddrFlag(eCmd.Cmd) } func (eCmd *EtcdCommand) Init(cmd *cobra.Command, args []string) error { eCmd.health = cobrautil.HEALTH_ERROR + header := []string{cobrautil.ROW_ADDR, cobrautil.ROW_VERSION, cobrautil.ROW_STATUS} eCmd.SetHeader(header) eCmd.TableNew.SetAutoMergeCellsByColumnIndex(cobrautil.GetIndexSlice( @@ -85,6 +85,7 @@ func (eCmd *EtcdCommand) Init(cmd *cobra.Command, args []string) error { if addrErr.TypeCode() != cmderror.CODE_SUCCESS { return fmt.Errorf(addrErr.Message) } + for _, addr := range etcdAddrs { // set metric timeout := viper.GetDuration(config.VIPER_GLOBALE_HTTPTIMEOUT) @@ -117,6 +118,7 @@ func (eCmd *EtcdCommand) RunCommand(cmd *cobra.Command, args []string) error { size++ go func(m *basecmd.Metric) { result, err := basecmd.QueryMetric(m) + var key string var metricKey string if m.SubUri == STATUS_SUBURI { @@ -126,6 +128,7 @@ func (eCmd *EtcdCommand) RunCommand(cmd *cobra.Command, args []string) error { key = "version" metricKey = VARSION_METRIC_KEY } + var value string if err.TypeCode() == cmderror.CODE_SUCCESS { value, err = basecmd.GetKeyValueFromJsonMetric(result, metricKey) @@ -133,6 +136,7 @@ func (eCmd *EtcdCommand) RunCommand(cmd *cobra.Command, args []string) error { errs = append(errs, err) } } + results <- basecmd.MetricResult{ Addr: m.Addrs[0], Key: key, @@ -166,8 +170,6 @@ func (eCmd *EtcdCommand) RunCommand(cmd *cobra.Command, args []string) error { break } } - mergeErr := cmderror.MergeCmdErrorExceptSuccess(errs) - eCmd.Error = &mergeErr if len(errs) > 0 && len(errs) < len(eCmd.rows) { eCmd.health = cobrautil.HEALTH_WARN @@ -175,12 +177,14 @@ func (eCmd *EtcdCommand) RunCommand(cmd *cobra.Command, args []string) error { eCmd.health = cobrautil.HEALTH_OK } + mergeErr := cmderror.MergeCmdErrorExceptSuccess(errs) + eCmd.Error = mergeErr list := cobrautil.ListMap2ListSortByKeys(eCmd.rows, eCmd.Header, []string{ cobrautil.ROW_STATUS, cobrautil.ROW_VERSION, }) eCmd.TableNew.AppendBulk(list) - eCmd.Result = eCmd.rows + return nil } @@ -200,15 +204,20 @@ func NewStatusEtcdCommand() *EtcdCommand { return etcdCmd } -func GetEtcdStatus(caller *cobra.Command) (*interface{}, *tablewriter.Table, *cmderror.CmdError, cobrautil.ClUSTER_HEALTH_STATUS) { +func GetEtcdStatus(caller *cobra.Command) (*interface{}, *cmderror.CmdError, cobrautil.ClUSTER_HEALTH_STATUS) { etcdCmd := NewStatusEtcdCommand() etcdCmd.Cmd.SetArgs([]string{ fmt.Sprintf("--%s", config.FORMAT), config.FORMAT_NOOUT, }) config.AlignFlagsValue(caller, etcdCmd.Cmd, []string{ - config.RPCRETRYTIMES, config.RPCTIMEOUT, config.CURVEFS_MDSADDR, + config.RPCRETRYTIMES, config.RPCTIMEOUT, config.CURVEFS_ETCDADDR, }) etcdCmd.Cmd.SilenceErrors = true - etcdCmd.Cmd.Execute() - return &etcdCmd.Result, etcdCmd.TableNew, etcdCmd.Error, etcdCmd.health + err := etcdCmd.Cmd.Execute() + if err != nil { + retErr := cmderror.ErrBsGetEtcdStatus() + retErr.Format(err.Error()) + return nil, retErr, cobrautil.HEALTH_ERROR + } + return &etcdCmd.Result, cmderror.Success(), etcdCmd.health } diff --git a/tools-v2/pkg/cli/command/curvebs/status/mds/mds.go b/tools-v2/pkg/cli/command/curvebs/status/mds/mds.go index d1ccffedfa..1a97f29020 100644 --- a/tools-v2/pkg/cli/command/curvebs/status/mds/mds.go +++ b/tools-v2/pkg/cli/command/curvebs/status/mds/mds.go @@ -25,7 +25,6 @@ package mds import ( "fmt" - "github.com/olekukonko/tablewriter" cmderror "github.com/opencurve/curve/tools-v2/internal/error" cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" @@ -59,8 +58,8 @@ func NewMdsCommand() *cobra.Command { } func (mCmd *MdsCommand) AddFlags() { - config.AddBsMdsFlagOption(mCmd.Cmd) config.AddHttpTimeoutFlag(mCmd.Cmd) + config.AddBsMdsFlagOption(mCmd.Cmd) config.AddBsMdsDummyFlagOption(mCmd.Cmd) } @@ -84,9 +83,10 @@ func (mCmd *MdsCommand) Init(cmd *cobra.Command, args []string) error { if addrErr.TypeCode() != cmderror.CODE_SUCCESS { return fmt.Errorf(addrErr.Message) } + + timeout := viper.GetDuration(config.VIPER_GLOBALE_HTTPTIMEOUT) for _, addr := range dummyAddrs { // Use the dummy port to access the metric service - timeout := viper.GetDuration(config.VIPER_GLOBALE_HTTPTIMEOUT) addrs := []string{addr} statusMetric := basecmd.NewMetric(addrs, STATUS_SUBURI, timeout) @@ -118,16 +118,20 @@ func (mCmd *MdsCommand) RunCommand(cmd *cobra.Command, args []string) error { size++ go func(m *basecmd.Metric) { result, err := basecmd.QueryMetric(m) + var key string + if m.SubUri == STATUS_SUBURI { key = "status" } else { key = "version" } + var value string if err.TypeCode() == cmderror.CODE_SUCCESS { value, err = basecmd.GetMetricValue(result) } + results <- basecmd.MetricResult{ Addr: m.Addrs[0], Key: key, @@ -157,18 +161,21 @@ func (mCmd *MdsCommand) RunCommand(cmd *cobra.Command, args []string) error { break } } + if len(errs) > 0 && len(errs) < len(mCmd.rows) { mCmd.health = cobrautil.HEALTH_WARN } else if len(errs) == 0 { mCmd.health = cobrautil.HEALTH_OK } + mergeErr := cmderror.MergeCmdErrorExceptSuccess(errs) - mCmd.Error = &mergeErr + mCmd.Error = mergeErr list := cobrautil.ListMap2ListSortByKeys(mCmd.rows, mCmd.Header, []string{ cobrautil.ROW_STATUS, cobrautil.ROW_VERSION, }) mCmd.TableNew.AppendBulk(list) mCmd.Result = mCmd.rows + return nil } @@ -188,13 +195,18 @@ func NewStatusMdsCommand() *MdsCommand { return mdsCmd } -func GetMdsStatus(caller *cobra.Command) (*interface{}, *tablewriter.Table, *cmderror.CmdError, cobrautil.ClUSTER_HEALTH_STATUS) { +func GetMdsStatus(caller *cobra.Command) (*interface{}, *cmderror.CmdError, cobrautil.ClUSTER_HEALTH_STATUS) { mdsCmd := NewStatusMdsCommand() mdsCmd.Cmd.SetArgs([]string{ fmt.Sprintf("--%s", config.FORMAT), config.FORMAT_NOOUT, }) config.AlignFlagsValue(caller, mdsCmd.Cmd, []string{config.RPCRETRYTIMES, config.RPCTIMEOUT, config.CURVEBS_MDSADDR}) mdsCmd.Cmd.SilenceErrors = true - mdsCmd.Cmd.Execute() - return &mdsCmd.Result, mdsCmd.TableNew, mdsCmd.Error, mdsCmd.health + err := mdsCmd.Cmd.Execute() + if err != nil { + retErr := cmderror.ErrBsGetMdsStatus() + retErr.Format(err.Error()) + return nil, retErr, cobrautil.HEALTH_ERROR + } + return &mdsCmd.Result, cmderror.Success(), mdsCmd.health } diff --git a/tools-v2/pkg/cli/command/curvebs/status/snapshot/snapshot.go b/tools-v2/pkg/cli/command/curvebs/status/snapshot/snapshot.go new file mode 100644 index 0000000000..a924adb7eb --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/status/snapshot/snapshot.go @@ -0,0 +1,224 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: CurveCli + * Created Date: 2023-04-25 + * Author: Xinlong-Chen + */ + +package snapshot + +import ( + "fmt" + + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + config "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "golang.org/x/exp/slices" +) + +const ( + snapshotExample = `$ curve bs status snapshotserver` +) + +type SnapshotCommand struct { + basecmd.FinalCurveCmd + metrics []*basecmd.Metric + rows []map[string]string + health cobrautil.ClUSTER_HEALTH_STATUS +} + +const ( + STATUS_SUBURI = "/vars/snapshotcloneserver_status" + VERSION_SUBURI = "/vars/curve_version" +) + +var ( + SnapshotCloneStatusMap = map[string]string{ + "active": "leader", + "standby": "follower", + } +) + +var _ basecmd.FinalCurveCmdFunc = (*SnapshotCommand)(nil) // check interface + +func NewSnapshotCommand() *cobra.Command { + return NewStatusSnapshotCommand().Cmd +} + +func (sCmd *SnapshotCommand) AddFlags() { + config.AddHttpTimeoutFlag(sCmd.Cmd) + config.AddBsSnapshotCloneFlagOption(sCmd.Cmd) + config.AddBsSnapshotCloneDummyFlagOption(sCmd.Cmd) +} + +func (sCmd *SnapshotCommand) Init(cmd *cobra.Command, args []string) error { + sCmd.health = cobrautil.HEALTH_ERROR + + header := []string{cobrautil.ROW_ADDR, cobrautil.ROW_DUMMY_ADDR, cobrautil.ROW_VERSION, cobrautil.ROW_STATUS} + sCmd.SetHeader(header) + sCmd.TableNew.SetAutoMergeCellsByColumnIndex(cobrautil.GetIndexSlice( + sCmd.Header, []string{cobrautil.ROW_STATUS, cobrautil.ROW_VERSION}, + )) + + // set main addr + mainAddrs, addrErr := config.GetBsSnapshotAddrSlice(sCmd.Cmd) + if addrErr.TypeCode() != cmderror.CODE_SUCCESS { + return fmt.Errorf(addrErr.Message) + } + + // set dummy addr + dummyAddrs, addrErr := config.GetBsSnapshotDummyAddrSlice(sCmd.Cmd) + if addrErr.TypeCode() != cmderror.CODE_SUCCESS { + return fmt.Errorf(addrErr.Message) + } + + for _, addr := range dummyAddrs { + // Use the dummy port to access the metric service + timeout := viper.GetDuration(config.VIPER_GLOBALE_HTTPTIMEOUT) + + addrs := []string{addr} + statusMetric := basecmd.NewMetric(addrs, STATUS_SUBURI, timeout) + sCmd.metrics = append(sCmd.metrics, statusMetric) + versionMetric := basecmd.NewMetric(addrs, VERSION_SUBURI, timeout) + sCmd.metrics = append(sCmd.metrics, versionMetric) + } + + for i := range mainAddrs { + row := make(map[string]string) + row[cobrautil.ROW_ADDR] = mainAddrs[i] + row[cobrautil.ROW_DUMMY_ADDR] = dummyAddrs[i] + row[cobrautil.ROW_STATUS] = cobrautil.ROW_VALUE_OFFLINE + row[cobrautil.ROW_VERSION] = cobrautil.ROW_VALUE_UNKNOWN + sCmd.rows = append(sCmd.rows, row) + } + + return nil +} + +func (sCmd *SnapshotCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&sCmd.FinalCurveCmd, sCmd) +} + +func (sCmd *SnapshotCommand) RunCommand(cmd *cobra.Command, args []string) error { + results := make(chan basecmd.MetricResult, config.MaxChannelSize()) + size := 0 + for _, metric := range sCmd.metrics { + size++ + go func(m *basecmd.Metric) { + result, err := basecmd.QueryMetric(m) + + var key string + if m.SubUri == STATUS_SUBURI { + key = "status" + } else { + key = "version" + } + + var value string + if err.TypeCode() == cmderror.CODE_SUCCESS { + value, err = basecmd.GetMetricValue(result) + } + + results <- basecmd.MetricResult{ + Addr: m.Addrs[0], + Key: key, + Value: value, + Err: err, + } + }(metric) + } + + count := 0 + var errs []*cmderror.CmdError + var recordAddrs []string + for res := range results { + for _, row := range sCmd.rows { + if res.Err.TypeCode() == cmderror.CODE_SUCCESS && row[cobrautil.ROW_DUMMY_ADDR] == res.Addr { + if res.Key == "status" { + row[res.Key] = SnapshotCloneStatusMap[res.Value] + } else { + row[res.Key] = res.Value + } + } else if res.Err.TypeCode() != cmderror.CODE_SUCCESS { + index := slices.Index(recordAddrs, res.Addr) + if index == -1 { + errs = append(errs, res.Err) + recordAddrs = append(recordAddrs, res.Addr) + } + } + } + count++ + if count >= size { + break + } + } + + if len(errs) > 0 && len(errs) < len(sCmd.rows) { + sCmd.health = cobrautil.HEALTH_WARN + } else if len(errs) == 0 { + sCmd.health = cobrautil.HEALTH_OK + } + + mergeErr := cmderror.MergeCmdErrorExceptSuccess(errs) + sCmd.Error = mergeErr + list := cobrautil.ListMap2ListSortByKeys(sCmd.rows, sCmd.Header, []string{ + cobrautil.ROW_STATUS, cobrautil.ROW_VERSION, + }) + sCmd.TableNew.AppendBulk(list) + sCmd.Result = sCmd.rows + + return nil +} + +func (sCmd *SnapshotCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&sCmd.FinalCurveCmd) +} + +func NewStatusSnapshotCommand() *SnapshotCommand { + snapshotCmd := &SnapshotCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{ + Use: "snapshotserver", + Short: "get the snapshot clone status of curvebs", + Example: snapshotExample, + }, + } + basecmd.NewFinalCurveCli(&snapshotCmd.FinalCurveCmd, snapshotCmd) + return snapshotCmd +} + +func GetSnapshotStatus(caller *cobra.Command) (*interface{}, *cmderror.CmdError, cobrautil.ClUSTER_HEALTH_STATUS) { + snapshotCmd := NewStatusSnapshotCommand() + snapshotCmd.Cmd.SetArgs([]string{ + fmt.Sprintf("--%s", config.FORMAT), config.FORMAT_NOOUT, + }) + config.AlignFlagsValue(caller, snapshotCmd.Cmd, []string{ + config.RPCRETRYTIMES, config.RPCTIMEOUT, config.CURVEBS_SNAPSHOTADDR, + }) + snapshotCmd.Cmd.SilenceErrors = true + err := snapshotCmd.Cmd.Execute() + if err != nil { + retErr := cmderror.ErrBsGetSnapshotServerStatus() + retErr.Format(err.Error()) + return nil, retErr, cobrautil.HEALTH_ERROR + } + return &snapshotCmd.Result, cmderror.Success(), snapshotCmd.health +} diff --git a/tools-v2/pkg/cli/command/curvebs/status/status.go b/tools-v2/pkg/cli/command/curvebs/status/status.go index 473b65f71a..5156a2286d 100644 --- a/tools-v2/pkg/cli/command/curvebs/status/status.go +++ b/tools-v2/pkg/cli/command/curvebs/status/status.go @@ -24,8 +24,12 @@ package status import ( basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/status/chunkserver" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/status/client" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/status/copyset" "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/status/etcd" "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/status/mds" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/status/snapshot" "github.com/spf13/cobra" ) @@ -39,6 +43,10 @@ func (statusCmd *StatusCommand) AddSubCommands() { statusCmd.Cmd.AddCommand( etcd.NewEtcdCommand(), mds.NewMdsCommand(), + client.NewClientCommand(), + snapshot.NewSnapshotCommand(), + chunkserver.NewChunkServerCommand(), + copyset.NewCopysetCommand(), ) } diff --git a/tools-v2/pkg/cli/command/curvebs/update/copyset/availflag/availflag.go b/tools-v2/pkg/cli/command/curvebs/update/copyset/availflag/availflag.go new file mode 100644 index 0000000000..ea031711fb --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/update/copyset/availflag/availflag.go @@ -0,0 +1,264 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: CurveCli + * Created Date: 2023-04-24 + * Author: baytan + */ + +package availflag + +import ( + "context" + "fmt" + "sort" + "strconv" + "strings" + + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + copyset2 "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/check/copyset" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/list/chunkserver" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/list/unavailcopysets" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/query/chunk" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/opencurve/curve/tools-v2/proto/proto/common" + "github.com/opencurve/curve/tools-v2/proto/proto/topology" + "github.com/opencurve/curve/tools-v2/proto/proto/topology/statuscode" + "github.com/spf13/cobra" + "google.golang.org/grpc" +) + +const ( + copysetAvailflagExample = `$ curve bs update copyset availflag --availflag=true --dryrun=false` +) + +type UpdateCopysetAvailflagRpc struct { + Info *basecmd.Rpc + Request *topology.SetCopysetsAvailFlagRequest + topologyClient topology.TopologyServiceClient +} + +var _ basecmd.RpcFunc = (*UpdateCopysetAvailflagRpc)(nil) // check interface + +func (cRpc *UpdateCopysetAvailflagRpc) NewRpcClient(cc grpc.ClientConnInterface) { + cRpc.topologyClient = topology.NewTopologyServiceClient(cc) +} + +func (cRpc *UpdateCopysetAvailflagRpc) Stub_Func(ctx context.Context) (interface{}, error) { + return cRpc.topologyClient.SetCopysetsAvailFlag(ctx, cRpc.Request) +} + +type CopysetAvailflagCommand struct { + basecmd.FinalCurveCmd + Rpc *UpdateCopysetAvailflagRpc +} + +var _ basecmd.FinalCurveCmdFunc = (*CopysetAvailflagCommand)(nil) // check interface + +func NewCommand() *cobra.Command { + return NewCopysetAvailflagCommand().Cmd +} + +func NewCopysetAvailflagCommand() *CopysetAvailflagCommand { + fsCmd := &CopysetAvailflagCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{ + Use: "availflag", + Short: "update copyset availflag", + Example: copysetAvailflagExample, + }, + } + + basecmd.NewFinalCurveCli(&fsCmd.FinalCurveCmd, fsCmd) + return fsCmd +} + +func (cCmd *CopysetAvailflagCommand) AddFlags() { + config.AddBsMdsFlagOption(cCmd.Cmd) + config.AddRpcRetryTimesFlag(cCmd.Cmd) + config.AddRpcTimeoutFlag(cCmd.Cmd) + + config.AddBsDryrunOptionFlag(cCmd.Cmd) + config.AddBsAvailFlagRequireFlag(cCmd.Cmd) +} + +func (cCmd *CopysetAvailflagCommand) Init(cmd *cobra.Command, args []string) error { + mdsAddrs, err := config.GetBsMdsAddrSlice(cCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + timeout := config.GetFlagDuration(cCmd.Cmd, config.RPCTIMEOUT) + retrytimes := config.GetFlagInt32(cCmd.Cmd, config.RPCRETRYTIMES) + availFlag := config.GetBsFlagBool(cCmd.Cmd, config.CURVEBS_AVAILFLAG) + var copysets []*common.CopysetInfo + if availFlag { + var err *cmderror.CmdError + copysets, err = unavailcopysets.GetUnAvailCopySets(cCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + } else { + // list chunkserver + chunkserverInfos, errCmd := chunkserver.GetChunkServerInCluster(cCmd.Cmd) + if errCmd.TypeCode() != cmderror.CODE_SUCCESS { + return errCmd.ToError() + } + + // check chunkserver offline + config.AddBsCopysetIdSliceRequiredFlag(cCmd.Cmd) + config.AddBsLogicalPoolIdSliceRequiredFlag(cCmd.Cmd) + config.AddBsChunkIdSliceRequiredFlag(cCmd.Cmd) + config.AddBsChunkServerAddressSliceRequiredFlag(cCmd.Cmd) + for _, info := range chunkserverInfos { + address := fmt.Sprintf("%s:%d", *info.HostIp, *info.Port) + cCmd.Cmd.ParseFlags([]string{ + fmt.Sprintf("--%s", config.CURVEBS_LOGIC_POOL_ID), "1", + fmt.Sprintf("--%s", config.CURVEBS_COPYSET_ID), "1", + fmt.Sprintf("--%s", config.CURVEBS_CHUNK_ID), "1", + fmt.Sprintf("--%s", config.CURVEBS_CHUNKSERVER_ADDRESS), address, + }) + } + addr2Chunk, errCmd := chunk.GetChunkInfo(cCmd.Cmd) + if errCmd.TypeCode() != cmderror.CODE_SUCCESS { + return errCmd.ToError() + } + var offlineChunkServer []string + for addr, info := range *addr2Chunk { + if info == nil { + offlineChunkServer = append(offlineChunkServer, addr) + } + } + if len(offlineChunkServer) > 0 { + cCmd.Cmd.ResetFlags() + cCmd.AddFlags() + config.AddBsChunkServerAddressSliceRequiredFlag(cCmd.Cmd) + config.AddFormatFlag(cCmd.Cmd) + cCmd.Cmd.SetArgs([]string{ + fmt.Sprintf("--%s", config.CURVEBS_CHUNKSERVER_ADDRESS), strings.Join(offlineChunkServer, ","), + }) + addr2Copysets, errCmd := chunkserver.GetCopySetsInChunkServerByHost(cCmd.Cmd) + if errCmd.TypeCode() != cmderror.CODE_SUCCESS { + return errCmd.ToError() + } + copysetIds := make([]string, 0) + logicalpoolids := make([]string, 0) + for _, infos := range *addr2Copysets { + for _, info := range infos { + copysetid := info.GetCopysetId() + logicalpoolid := info.GetLogicalPoolId() + copysetIds = append(copysetIds, strconv.FormatUint(uint64(copysetid), 10)) + logicalpoolids = append(logicalpoolids, strconv.FormatUint(uint64(logicalpoolid), 10)) + } + } + + config.AddBsCopysetIdSliceRequiredFlag(cCmd.Cmd) + config.AddBsLogicalPoolIdSliceRequiredFlag(cCmd.Cmd) + cCmd.Cmd.SetArgs([]string{ + fmt.Sprintf("--%s", config.CURVEBS_LOGIC_POOL_ID), strings.Join(logicalpoolids, ","), + fmt.Sprintf("--%s", config.CURVEBS_COPYSET_ID), strings.Join(copysetIds, ","), + }) + key2Health, errCmd := copyset2.CheckCopysets(cCmd.Cmd) + if errCmd.TypeCode() != cmderror.CODE_SUCCESS { + return errCmd.ToError() + } + + for _, infos := range *addr2Copysets { + for _, info := range infos { + key := cobrautil.GetCopysetKey(uint64(info.GetLogicalPoolId()), uint64(info.GetCopysetId())) + if health, ok := (*key2Health)[key]; !ok || health == cobrautil.HEALTH_ERROR { + copysets = append(copysets, info) + } + } + } + } + } + + cCmd.Rpc = &UpdateCopysetAvailflagRpc{ + Request: &topology.SetCopysetsAvailFlagRequest{ + AvailFlag: &availFlag, + Copysets: copysets, + }, + Info: basecmd.NewRpc(mdsAddrs, timeout, retrytimes, "SetCopysetsAvailFlag"), + } + + header := []string{cobrautil.ROW_COPYSET_ID, + cobrautil.ROW_POOL_ID, cobrautil.ROW_AVAILFLAG, cobrautil.ROW_DRYRUN, + } + cCmd.SetHeader(header) + cCmd.TableNew.SetAutoMergeCellsByColumnIndex(cobrautil.GetIndexSlice( + cCmd.Header, []string{cobrautil.ROW_AVAILFLAG, cobrautil.ROW_DRYRUN}, + )) + return nil +} + +func (cCmd *CopysetAvailflagCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&cCmd.FinalCurveCmd, cCmd) +} + +func (cCmd *CopysetAvailflagCommand) RunCommand(cmd *cobra.Command, args []string) error { + if len(cCmd.Rpc.Request.Copysets) == 0 { + return nil + } + rows := make([]map[string]string, 0) + dryrun := config.GetFlagBool(cmd, config.CURVEBS_DRYRUN) + if dryrun { + for _, info := range cCmd.Rpc.Request.Copysets { + row := make(map[string]string) + row[cobrautil.ROW_POOL_ID] = strconv.FormatUint(uint64(info.GetLogicalPoolId()), 10) + row[cobrautil.ROW_COPYSET_ID] = strconv.FormatUint(uint64(info.GetCopysetId()), 10) + row[cobrautil.ROW_AVAILFLAG] = fmt.Sprintf("%v => %v", !*cCmd.Rpc.Request.AvailFlag, *cCmd.Rpc.Request.AvailFlag) + row[cobrautil.ROW_DRYRUN] = cobrautil.ROW_VALUE_TRUE + rows = append(rows, row) + } + } else { + result, err := basecmd.GetRpcResponse(cCmd.Rpc.Info, cCmd.Rpc) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + response := result.(*topology.SetCopysetsAvailFlagResponse) + if response.GetStatusCode() != int32(statuscode.TopoStatusCode_Success) { + err := cmderror.ErrBsSetCopysetAvailFlagRpc( + statuscode.TopoStatusCode(response.GetStatusCode()), + ) + return err.ToError() + } + for _, info := range cCmd.Rpc.Request.Copysets { + row := make(map[string]string) + row[cobrautil.ROW_POOL_ID] = strconv.FormatUint(uint64(info.GetLogicalPoolId()), 10) + row[cobrautil.ROW_COPYSET_ID] = strconv.FormatUint(uint64(info.GetCopysetId()), 10) + row[cobrautil.ROW_AVAILFLAG] = fmt.Sprintf("%v => %v", !*cCmd.Rpc.Request.AvailFlag, *cCmd.Rpc.Request.AvailFlag) + row[cobrautil.ROW_DRYRUN] = cobrautil.ROW_VALUE_FALSE + rows = append(rows, row) + } + } + + sort.Slice(rows, func(i, j int) bool { + return rows[i][cobrautil.ROW_COPYSET_KEY] < rows[j][cobrautil.ROW_COPYSET_KEY] + }) + list := cobrautil.ListMap2ListSortByKeys(rows, cCmd.Header, []string{ + cobrautil.ROW_POOL_ID, cobrautil.ROW_STATUS, cobrautil.ROW_COPYSET_ID, + }) + cCmd.TableNew.AppendBulk(list) + cCmd.Result = rows + return nil +} + +func (cCmd *CopysetAvailflagCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&cCmd.FinalCurveCmd) +} diff --git a/tools-v2/pkg/cli/command/curvebs/update/copyset/copyset.go b/tools-v2/pkg/cli/command/curvebs/update/copyset/copyset.go new file mode 100644 index 0000000000..9defb68230 --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/update/copyset/copyset.go @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: tools-v2 + * Created Date: 2023-04-24 + * Author: baytan + */ + +package copyset + +import ( + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/update/copyset/availflag" + "github.com/spf13/cobra" +) + +type CopysetCommand struct { + basecmd.MidCurveCmd +} + +var _ basecmd.MidCurveCmdFunc = (*CopysetCommand)(nil) // check interface + +func (sCmd *CopysetCommand) AddSubCommands() { + sCmd.Cmd.AddCommand( + availflag.NewCommand(), + ) +} + +func NewCopysetCommand() *cobra.Command { + sCmd := &CopysetCommand{ + basecmd.MidCurveCmd{ + Use: "copyset", + Short: "update copyset resources in the curvebs", + }, + } + return basecmd.NewMidCurveCli(&sCmd.MidCurveCmd, sCmd) +} diff --git a/tools-v2/pkg/cli/command/curvebs/update/file/file.go b/tools-v2/pkg/cli/command/curvebs/update/file/file.go new file mode 100644 index 0000000000..ee575570e3 --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/update/file/file.go @@ -0,0 +1,150 @@ +/* +* Copyright (c) 2023 NetEase Inc. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ +/* +* Project: curve +* Created Date: 2023-04-13 +* Author: chengyi01 + */ + +package file + +import ( + "context" + + "github.com/dustin/go-humanize" + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/opencurve/curve/tools-v2/proto/proto/nameserver2" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "google.golang.org/grpc" +) + +const ( + fileExample = `$ curve bs update file` +) + +type ExtendFileRpc struct { + Info *basecmd.Rpc + Request *nameserver2.ExtendFileRequest + mdsClient nameserver2.CurveFSServiceClient +} + +var _ basecmd.RpcFunc = (*ExtendFileRpc)(nil) // check interface + +func (eRpc *ExtendFileRpc) NewRpcClient(cc grpc.ClientConnInterface) { + eRpc.mdsClient = nameserver2.NewCurveFSServiceClient(cc) +} + +func (eRpc *ExtendFileRpc) Stub_Func(ctx context.Context) (interface{}, error) { + return eRpc.mdsClient.ExtendFile(ctx, eRpc.Request) +} + +type FileCommand struct { + basecmd.FinalCurveCmd + Rpc *ExtendFileRpc + Response *nameserver2.ExtendFileResponse +} + +var _ basecmd.FinalCurveCmdFunc = (*FileCommand)(nil) // check interface + +func NewUpdateFileCommand() *FileCommand { + fileCmd := &FileCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{ + Use: "file", + Short: "extend volume of file to size", + Example: fileExample, + }, + } + basecmd.NewFinalCurveCli(&fileCmd.FinalCurveCmd, fileCmd) + return fileCmd +} + +func NewFileCommand() *cobra.Command { + return NewUpdateFileCommand().Cmd +} + +func (uCmd *FileCommand) AddFlags() { + config.AddRpcRetryTimesFlag(uCmd.Cmd) + config.AddRpcTimeoutFlag(uCmd.Cmd) + config.AddBsMdsFlagOption(uCmd.Cmd) + config.AddBsPathRequiredFlag(uCmd.Cmd) + config.AddBsSizeRequiredFlag(uCmd.Cmd) + config.AddBsUserOptionFlag(uCmd.Cmd) + config.AddBsPasswordOptionFlag(uCmd.Cmd) +} + +func (uCmd *FileCommand) Init(cmd *cobra.Command, args []string) error { + fileName := config.GetBsFlagString(uCmd.Cmd, config.CURVEBS_PATH) + newSize := config.GetBsFlagUint64(uCmd.Cmd, config.CURVEBS_SIZE) + newSize = newSize * humanize.GiByte + date, errDat := cobrautil.GetTimeofDayUs() + owner := config.GetBsFlagString(uCmd.Cmd, config.CURVEBS_USER) + if errDat.TypeCode() != cmderror.CODE_SUCCESS { + return errDat.ToError() + } + request := &nameserver2.ExtendFileRequest{ + FileName: &fileName, + NewSize: &newSize, + Date: &date, + Owner: &owner, + } + password := config.GetBsFlagString(uCmd.Cmd, config.CURVEBS_PASSWORD) + if owner == viper.GetString(config.VIPER_CURVEBS_USER) && len(password) != 0 { + strSig := cobrautil.GetString2Signature(date, owner) + sig := cobrautil.CalcString2Signature(strSig, password) + request.Signature = &sig + } + mdsAddrs, errMds := config.GetBsMdsAddrSlice(uCmd.Cmd) + if errMds.TypeCode() != cmderror.CODE_SUCCESS { + return errMds.ToError() + } + timeout := config.GetFlagDuration(uCmd.Cmd, config.RPCTIMEOUT) + retrytimes := config.GetFlagInt32(uCmd.Cmd, config.RPCRETRYTIMES) + uCmd.Rpc = &ExtendFileRpc{ + Info: basecmd.NewRpc(mdsAddrs, timeout, retrytimes, "ExtendFile"), + Request: request, + } + uCmd.SetHeader([]string{cobrautil.ROW_RESULT}) + return nil +} + +func (uCmd *FileCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&uCmd.FinalCurveCmd, uCmd) +} + +func (uCmd *FileCommand) RunCommand(cmd *cobra.Command, args []string) error { + result, err := basecmd.GetRpcResponse(uCmd.Rpc.Info, uCmd.Rpc) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + uCmd.Response = result.(*nameserver2.ExtendFileResponse) + uCmd.Result = uCmd.Response + sizeStr := humanize.IBytes(uCmd.Rpc.Request.GetNewSize()) + uCmd.Error = cmderror.ErrExtendFile(uCmd.Response.GetStatusCode(), *uCmd.Rpc.Request.FileName, sizeStr) + uCmd.TableNew.Append([]string{uCmd.Error.Message}) + if uCmd.Error.TypeCode() != cmderror.CODE_SUCCESS { + return uCmd.Error.ToError() + } + return nil +} + +func (uCmd *FileCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&uCmd.FinalCurveCmd) +} diff --git a/tools-v2/pkg/cli/command/curvebs/update/leader/leader.go b/tools-v2/pkg/cli/command/curvebs/update/leader/leader.go new file mode 100644 index 0000000000..570f88eea2 --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/update/leader/leader.go @@ -0,0 +1,184 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: CurveCli + * Created Date: 2023-04-28 + * Author: Xinlong-Chen + */ + +package leader + +import ( + "context" + "errors" + "fmt" + + "github.com/spf13/cobra" + "google.golang.org/grpc" + + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/delete/peer" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/opencurve/curve/tools-v2/proto/proto/cli2" +) + +const ( + leaderExample = `$ curve bs update leader 127.0.0.1:8200:0 --logicalpoolid=1 --copysetid=10001 + --peers=127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0` +) + +type LeaderRpc struct { + Info *basecmd.Rpc + Request *cli2.TransferLeaderRequest2 + Client cli2.CliService2Client +} + +func (lRpc *LeaderRpc) NewRpcClient(cc grpc.ClientConnInterface) { + lRpc.Client = cli2.NewCliService2Client(cc) +} + +func (lRpc *LeaderRpc) Stub_Func(ctx context.Context) (interface{}, error) { + return lRpc.Client.TransferLeader(ctx, lRpc.Request) +} + +type leaderCommand struct { + basecmd.FinalCurveCmd + + Rpc *LeaderRpc + Response *cli2.TransferLeaderResponse2 + row map[string]string +} + +var _ basecmd.FinalCurveCmdFunc = (*leaderCommand)(nil) // check interface + +// NewCommand ... +func NewleaderCommand() *cobra.Command { + peerCmd := &leaderCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{ + Use: "leader", + Short: "update(reset) the leader from the copyset", + Example: leaderExample, + }, + } + basecmd.NewFinalCurveCli(&peerCmd.FinalCurveCmd, peerCmd) + return peerCmd.Cmd +} + +func (lCmd *leaderCommand) AddFlags() { + config.AddRpcRetryTimesFlag(lCmd.Cmd) + config.AddRpcTimeoutFlag(lCmd.Cmd) + + config.AddBsLogicalPoolIdRequiredFlag(lCmd.Cmd) + config.AddBsCopysetIdRequiredFlag(lCmd.Cmd) + + config.AddBsPeersConfFlag(lCmd.Cmd) +} + +func (lCmd *leaderCommand) Init(cmd *cobra.Command, args []string) error { + lCmd.SetHeader([]string{cobrautil.ROW_LEADER, cobrautil.ROW_OLDLEADER, cobrautil.ROW_COPYSET, cobrautil.ROW_RESULT}) + lCmd.TableNew.SetAutoMergeCellsByColumnIndex(cobrautil.GetIndexSlice( + lCmd.Header, []string{}, + )) + + opts := peer.Options{} + opts.Timeout = config.GetFlagDuration(lCmd.Cmd, config.RPCTIMEOUT) + opts.RetryTimes = config.GetFlagInt32(lCmd.Cmd, config.RPCRETRYTIMES) + + copysetID := config.GetBsFlagUint32(lCmd.Cmd, config.CURVEBS_COPYSET_ID) + logicalPoolID := config.GetBsFlagUint32(lCmd.Cmd, config.CURVEBS_LOGIC_POOL_ID) + + // parse peers config + peers := config.GetBsFlagStringSlice(lCmd.Cmd, config.CURVEBS_PEERS_ADDRESS) + c, err := peer.ParseConfiguration(peers) + if err != nil { + return err.ToError() + } + conf := *c + + // parse peer conf + if len(args) < 1 { + pErr := cmderror.ErrGetPeer() + pErr.Format("should specified the peer address") + return pErr.ToError() + } + peerPara := args[0] + leaderPeer, err := peer.ParsePeer(peerPara) + if err != nil { + return err.ToError() + } + + // 1. acquire leader peer info. + oldLeader, err := peer.GetLeader(logicalPoolID, copysetID, conf, opts) + if err != nil { + return err.ToError() + } + + // 2. judge is same? + oldLeaderPeer, err := peer.ParsePeer(oldLeader.GetAddress()) + if err != nil { + return err.ToError() + } + + if oldLeaderPeer.GetAddress() == leaderPeer.GetAddress() { + return errors.New("don't need transfer!") + } + + out := make(map[string]string) + out[cobrautil.ROW_OLDLEADER] = oldLeader.GetAddress() + out[cobrautil.ROW_LEADER] = fmt.Sprintf("%s:%d", leaderPeer.GetAddress(), leaderPeer.GetId()) + out[cobrautil.ROW_COPYSET] = fmt.Sprintf("(%d:%d)", logicalPoolID, copysetID) + lCmd.row = out + + lCmd.Rpc = &LeaderRpc{ + Info: basecmd.NewRpc([]string{oldLeaderPeer.GetAddress()}, opts.Timeout, opts.RetryTimes, "TransferLeader"), + Request: &cli2.TransferLeaderRequest2{ + LogicPoolId: &logicalPoolID, + CopysetId: ©setID, + Leader: oldLeaderPeer, + Transferee: leaderPeer, + }, + } + + return nil +} + +func (lCmd *leaderCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&lCmd.FinalCurveCmd, lCmd) +} + +func (lCmd *leaderCommand) RunCommand(cmd *cobra.Command, args []string) error { + // 3. transfer leader + response, err := basecmd.GetRpcResponse(lCmd.Rpc.Info, lCmd.Rpc) + lCmd.Error = err + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + + lCmd.row[cobrautil.ROW_RESULT] = "success" + lCmd.Response = response.(*cli2.TransferLeaderResponse2) + + list := cobrautil.Map2List(lCmd.row, lCmd.Header) + lCmd.TableNew.Append(list) + return nil +} + +func (lCmd *leaderCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&lCmd.FinalCurveCmd) +} diff --git a/tools-v2/pkg/cli/command/curvebs/update/peer/peer.go b/tools-v2/pkg/cli/command/curvebs/update/peer/peer.go index f5a0306a47..76731fd4da 100644 --- a/tools-v2/pkg/cli/command/curvebs/update/peer/peer.go +++ b/tools-v2/pkg/cli/command/curvebs/update/peer/peer.go @@ -42,7 +42,7 @@ import ( ) const ( - updateExample = `$ curve bs update peer 127.0.0.0:8200:0 --logicalpoolid=1 --copysetid=1` + peerExample = `$ curve bs update peer 127.0.0.0:8200:0 --logicalpoolid=1 --copysetid=1` ) type ResetRpc struct { @@ -84,7 +84,7 @@ func NewPeerCommand() *cobra.Command { FinalCurveCmd: basecmd.FinalCurveCmd{ Use: "peer", Short: "update(reset) the peer from the copyset", - Example: updateExample, + Example: peerExample, }, } basecmd.NewFinalCurveCli(&peerCmd.FinalCurveCmd, peerCmd) @@ -95,8 +95,8 @@ func (pCmd *PeerCommand) AddFlags() { config.AddRpcRetryTimesFlag(pCmd.Cmd) config.AddRpcTimeoutFlag(pCmd.Cmd) - config.AddBSLogicalPoolIdFlag(pCmd.Cmd) - config.AddBSCopysetIdFlag(pCmd.Cmd) + config.AddBsLogicalPoolIdRequiredFlag(pCmd.Cmd) + config.AddBsCopysetIdRequiredFlag(pCmd.Cmd) } // ParsePeer parse the peer string @@ -129,21 +129,14 @@ func (pCmd *PeerCommand) Init(cmd *cobra.Command, args []string) error { pCmd.Header, []string{}, )) - var err error var e *cmderror.CmdError pCmd.opts.Timeout = config.GetFlagDuration(pCmd.Cmd, config.RPCTIMEOUT) pCmd.opts.RetryTimes = config.GetFlagInt32(pCmd.Cmd, config.RPCRETRYTIMES) - pCmd.copysetID, err = config.GetBsFlagUint32(pCmd.Cmd, config.CURVEBS_COPYSET_ID) - if err != nil { - return err - } + pCmd.copysetID = config.GetBsFlagUint32(pCmd.Cmd, config.CURVEBS_COPYSET_ID) - pCmd.logicalPoolID, err = config.GetBsFlagUint32(pCmd.Cmd, config.CURVEBS_LOGIC_POOL_ID) - if err != nil { - return err - } + pCmd.logicalPoolID = config.GetBsFlagUint32(pCmd.Cmd, config.CURVEBS_LOGIC_POOL_ID) // parse peer conf if len(args) < 1 { diff --git a/tools-v2/pkg/cli/command/curvebs/update/scan_state/scan_state.go b/tools-v2/pkg/cli/command/curvebs/update/scan_state/scan_state.go new file mode 100644 index 0000000000..adc25fd662 --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/update/scan_state/scan_state.go @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* +* Project: curvecli +* Created Date: 2023-05-08 +* Author: montaguelhz + */ + +package scan_state + +import ( + "context" + "fmt" + + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/opencurve/curve/tools-v2/proto/proto/topology" + "github.com/opencurve/curve/tools-v2/proto/proto/topology/statuscode" + "github.com/spf13/cobra" + "google.golang.org/grpc" +) + +type UpdateScanStateRpc struct { + Info *basecmd.Rpc + Request *topology.SetLogicalPoolScanStateRequest + Client topology.TopologyServiceClient +} + +var _ basecmd.RpcFunc = (*UpdateScanStateRpc)(nil) // check interface + +func (sRpc *UpdateScanStateRpc) NewRpcClient(cc grpc.ClientConnInterface) { + sRpc.Client = topology.NewTopologyServiceClient(cc) +} + +func (sRpc *UpdateScanStateRpc) Stub_Func(ctx context.Context) (interface{}, error) { + return sRpc.Client.SetLogicalPoolScanState(ctx, sRpc.Request) +} + +type ScanStateCommand struct { + basecmd.FinalCurveCmd + + Rpc *UpdateScanStateRpc + Response *topology.SetLogicalPoolScanStateResponse + LogicalPoolID uint32 + Scan bool +} + +var _ basecmd.FinalCurveCmdFunc = (*ScanStateCommand)(nil) // check interface + +const ( + scanStateExample = `$ curve bs update scan-state --logicalpoolid 1 [--scan=true/false]` +) + +func NewScanStateCommand() *cobra.Command { + return NewUpdateScanStateCommand().Cmd +} + +func (sCmd *ScanStateCommand) AddFlags() { + config.AddBsMdsFlagOption(sCmd.Cmd) + config.AddRpcRetryTimesFlag(sCmd.Cmd) + config.AddRpcTimeoutFlag(sCmd.Cmd) + + config.AddBsLogicalPoolIdRequiredFlag(sCmd.Cmd) + config.AddBsScanOptionFlag(sCmd.Cmd) +} + +func NewUpdateScanStateCommand() *ScanStateCommand { + sCmd := &ScanStateCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{ + Use: "scan-state", + Short: "enable/disable scan for logical pool", + Example: scanStateExample, + }, + } + + basecmd.NewFinalCurveCli(&sCmd.FinalCurveCmd, sCmd) + return sCmd +} + +func (sCmd *ScanStateCommand) Init(cmd *cobra.Command, args []string) error { + mdsAddrs, err := config.GetBsMdsAddrSlice(sCmd.Cmd) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + + header := []string{cobrautil.ROW_ID, cobrautil.ROW_SCAN, cobrautil.ROW_RESULT, cobrautil.ROW_REASON} + sCmd.SetHeader(header) + + Timeout := config.GetFlagDuration(sCmd.Cmd, config.RPCTIMEOUT) + RetryTimes := config.GetFlagInt32(sCmd.Cmd, config.RPCRETRYTIMES) + sCmd.LogicalPoolID = config.GetBsFlagUint32(sCmd.Cmd, config.CURVEBS_LOGIC_POOL_ID) + sCmd.Scan = config.GetBsFlagBool(sCmd.Cmd, config.CURVEBS_SCAN) + sCmd.Rpc = &UpdateScanStateRpc{ + Info: basecmd.NewRpc(mdsAddrs, Timeout, RetryTimes, "UpdateScanState"), + Request: &topology.SetLogicalPoolScanStateRequest{ + LogicalPoolID: &sCmd.LogicalPoolID, + ScanEnable: &sCmd.Scan, + }, + } + return nil +} + +func (sCmd *ScanStateCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&sCmd.FinalCurveCmd, sCmd) +} + +func (sCmd *ScanStateCommand) RunCommand(cmd *cobra.Command, args []string) error { + out := make(map[string]string) + out[cobrautil.ROW_ID] = fmt.Sprintf("%d", sCmd.LogicalPoolID) + out[cobrautil.ROW_SCAN] = fmt.Sprintf("%t", sCmd.Scan) + result, err := basecmd.GetRpcResponse(sCmd.Rpc.Info, sCmd.Rpc) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + sCmd.Response = result.(*topology.SetLogicalPoolScanStateResponse) + if *sCmd.Response.StatusCode != int32(statuscode.TopoStatusCode_Success) { + out[cobrautil.ROW_RESULT] = cobrautil.ROW_VALUE_FAILED + out[cobrautil.ROW_REASON] = statuscode.TopoStatusCode_name[*sCmd.Response.StatusCode] + } else { + out[cobrautil.ROW_RESULT] = cobrautil.ROW_VALUE_SUCCESS + out[cobrautil.ROW_REASON] = cobrautil.ROW_VALUE_NULL + } + + res := cobrautil.Map2List(out, sCmd.Header) + sCmd.TableNew.Append(res) + + sCmd.Result = out + sCmd.Error = cmderror.ErrSuccess() + return nil +} + +func (sCmd *ScanStateCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&sCmd.FinalCurveCmd) +} diff --git a/tools-v2/pkg/cli/command/curvebs/update/throttle/throttle.go b/tools-v2/pkg/cli/command/curvebs/update/throttle/throttle.go new file mode 100644 index 0000000000..dc561a61b1 --- /dev/null +++ b/tools-v2/pkg/cli/command/curvebs/update/throttle/throttle.go @@ -0,0 +1,163 @@ +/* +* Copyright (c) 2023 NetEase Inc. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ +/* +* Project: curve +* Created Date: 2023-04-24 +* Author: chengyi01 + */ + +package throttle + +import ( + "context" + + cmderror "github.com/opencurve/curve/tools-v2/internal/error" + cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" + basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/config" + "github.com/opencurve/curve/tools-v2/pkg/output" + "github.com/opencurve/curve/tools-v2/proto/proto/nameserver2" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "google.golang.org/grpc" +) + +type UpdateFileThrottleRpc struct { + Info *basecmd.Rpc + Request *nameserver2.UpdateFileThrottleParamsRequest + mdsClient nameserver2.CurveFSServiceClient +} + +var _ basecmd.RpcFunc = (*UpdateFileThrottleRpc)(nil) // check interface + +func (uRpc *UpdateFileThrottleRpc) NewRpcClient(cc grpc.ClientConnInterface) { + uRpc.mdsClient = nameserver2.NewCurveFSServiceClient(cc) +} + +func (uRpc *UpdateFileThrottleRpc) Stub_Func(ctx context.Context) (interface{}, error) { + return uRpc.mdsClient.UpdateFileThrottleParams(ctx, uRpc.Request) +} + +type ThrottleCommand struct { + basecmd.FinalCurveCmd + Rpc *UpdateFileThrottleRpc + Response *nameserver2.UpdateFileThrottleParamsResponse +} + +var _ basecmd.FinalCurveCmdFunc = (*ThrottleCommand)(nil) // check interface + +func NewUpdateThrottleCommand() *ThrottleCommand { + throttleCmd := &ThrottleCommand{ + FinalCurveCmd: basecmd.FinalCurveCmd{ + Use: "throttle", + Short: "update file throttle params", + }, + } + basecmd.NewFinalCurveCli(&throttleCmd.FinalCurveCmd, throttleCmd) + return throttleCmd +} + +func NewThrottleCommand() *cobra.Command { + return NewUpdateThrottleCommand().Cmd +} + +func (tCmd *ThrottleCommand) AddFlags() { + config.AddRpcRetryTimesFlag(tCmd.Cmd) + config.AddRpcTimeoutFlag(tCmd.Cmd) + config.AddBsMdsFlagOption(tCmd.Cmd) + config.AddBsPathRequiredFlag(tCmd.Cmd) + config.AddBsThrottleTypeRequiredFlag(tCmd.Cmd) + config.AddBsUserOptionFlag(tCmd.Cmd) + config.AddBsPasswordOptionFlag(tCmd.Cmd) + config.AddBsLimitRequiredFlag(tCmd.Cmd) + config.AddBsBurstOptionFlag(tCmd.Cmd) + config.AddBsBurstLengthOptionFlag(tCmd.Cmd) +} + +func (tCmd *ThrottleCommand) Init(cmd *cobra.Command, args []string) error { + path := config.GetBsFlagString(cmd, config.CURVEBS_PATH) + throttleTypeStr := config.GetBsFlagString(cmd, config.CURVEBS_TYPE) + throttleType, err := cobrautil.ParseThrottleType(throttleTypeStr) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + limit := config.GetBsFlagUint64(cmd, config.CURVEBS_LIMIT) + params := &nameserver2.ThrottleParams{ + Type: &throttleType, + Limit: &limit, + } + if config.GetFlagChanged(cmd, config.CURVEBS_BURST) { + burst := config.GetBsFlagUint64(cmd, config.CURVEBS_BURST) + burstLength := config.GetBsFlagUint64(cmd, config.CURVEBS_BURST_LENGTH) + if burstLength == 0 { + burstLength = 1 + } + params.Burst = &burst + params.BurstLength = &burstLength + } + date, errDat := cobrautil.GetTimeofDayUs() + owner := config.GetBsFlagString(tCmd.Cmd, config.CURVEBS_USER) + if errDat.TypeCode() != cmderror.CODE_SUCCESS { + return errDat.ToError() + } + request := &nameserver2.UpdateFileThrottleParamsRequest{ + FileName: &path, + Owner: &owner, + Date: &date, + ThrottleParams: params, + } + password := config.GetBsFlagString(tCmd.Cmd, config.CURVEBS_PASSWORD) + if owner == viper.GetString(config.VIPER_CURVEBS_USER) && len(password) != 0 { + strSig := cobrautil.GetString2Signature(date, owner) + sig := cobrautil.CalcString2Signature(strSig, password) + request.Signature = &sig + } + mdsAddrs, errMds := config.GetBsMdsAddrSlice(tCmd.Cmd) + if errMds.TypeCode() != cmderror.CODE_SUCCESS { + return errMds.ToError() + } + timeout := config.GetFlagDuration(tCmd.Cmd, config.RPCTIMEOUT) + retrytimes := config.GetFlagInt32(tCmd.Cmd, config.RPCRETRYTIMES) + tCmd.Rpc = &UpdateFileThrottleRpc{ + Info: basecmd.NewRpc(mdsAddrs, timeout, retrytimes, "UpdateFileThrottle"), + Request: request, + } + tCmd.SetHeader([]string{cobrautil.ROW_RESULT}) + return nil +} + +func (tCmd *ThrottleCommand) Print(cmd *cobra.Command, args []string) error { + return output.FinalCmdOutput(&tCmd.FinalCurveCmd, tCmd) +} + +func (tCmd *ThrottleCommand) RunCommand(cmd *cobra.Command, args []string) error { + result, err := basecmd.GetRpcResponse(tCmd.Rpc.Info, tCmd.Rpc) + if err.TypeCode() != cmderror.CODE_SUCCESS { + return err.ToError() + } + tCmd.Response = result.(*nameserver2.UpdateFileThrottleParamsResponse) + tCmd.Result = tCmd.Response + tCmd.Error = cmderror.ErrUpdateFileThrottle(tCmd.Response.GetStatusCode(), tCmd.Rpc.Request.GetFileName()) + tCmd.TableNew.Append([]string{tCmd.Error.Message}) + if tCmd.Error.TypeCode() != cmderror.CODE_SUCCESS { + return tCmd.Error.ToError() + } + return nil +} + +func (tCmd *ThrottleCommand) ResultPlainOutput() error { + return output.FinalCmdOutputPlain(&tCmd.FinalCurveCmd) +} diff --git a/tools-v2/pkg/cli/command/curvebs/update/update.go b/tools-v2/pkg/cli/command/curvebs/update/update.go index 1032cd31eb..b21407623d 100644 --- a/tools-v2/pkg/cli/command/curvebs/update/update.go +++ b/tools-v2/pkg/cli/command/curvebs/update/update.go @@ -23,10 +23,15 @@ package update import ( + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/update/copyset" "github.com/spf13/cobra" basecmd "github.com/opencurve/curve/tools-v2/pkg/cli/command" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/update/file" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/update/leader" "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/update/peer" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/update/scan_state" + "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs/update/throttle" ) type UpdateCommand struct { @@ -38,6 +43,11 @@ var _ basecmd.MidCurveCmdFunc = (*UpdateCommand)(nil) func (updateCmd *UpdateCommand) AddSubCommands() { updateCmd.Cmd.AddCommand( peer.NewPeerCommand(), + file.NewFileCommand(), + throttle.NewThrottleCommand(), + leader.NewleaderCommand(), + scan_state.NewScanStateCommand(), + copyset.NewCopysetCommand(), ) } diff --git a/tools-v2/pkg/cli/command/curvefs/check/copyset/copyset.go b/tools-v2/pkg/cli/command/curvefs/check/copyset/copyset.go index c90717099d..9f7d250743 100644 --- a/tools-v2/pkg/cli/command/curvefs/check/copyset/copyset.go +++ b/tools-v2/pkg/cli/command/curvefs/check/copyset/copyset.go @@ -158,10 +158,12 @@ func (cCmd *CopysetCommand) RunCommand(cmd *cobra.Command, args []string) error row[cobrautil.ROW_COPYSET_ID] = fmt.Sprintf("%d", copysetid) if v == nil { row[cobrautil.ROW_STATUS] = cobrautil.CopysetHealthStatus_Str[int32(cobrautil.COPYSET_NOTEXIST)] + (*cCmd.copysetKey2Status)[k] = cobrautil.COPYSET_NOTEXIST } else { status, errsCheck := cobrautil.CheckCopySetHealth(v) copysetHealthCount[status]++ row[cobrautil.ROW_STATUS] = cobrautil.CopysetHealthStatus_Str[int32(status)] + (*cCmd.copysetKey2Status)[k] = status explain := "" if status != cobrautil.COPYSET_OK { for i, e := range errsCheck { @@ -177,6 +179,10 @@ func (cCmd *CopysetCommand) RunCommand(cmd *cobra.Command, args []string) error leaderInfo := (*cCmd.copysetKey2LeaderInfo)[k] if leaderInfo == nil { explain = "no leader peer" + if row[cobrautil.ROW_STATUS] == cobrautil.COPYSET_OK_STR { + row[cobrautil.ROW_STATUS] = cobrautil.COPYSET_ERROR_STR + copysetHealthCount[cobrautil.COPYSET_ERROR]++ + } } else if leaderInfo.Snapshot { installSnapshot := "installing snapshot" if len(explain) > 0 { @@ -215,7 +221,7 @@ func (cCmd *CopysetCommand) RunCommand(cmd *cobra.Command, args []string) error cCmd.health = cobrautil.HEALTH_OK } retErr := cmderror.MergeCmdErrorExceptSuccess(errs) - cCmd.Error = &retErr + cCmd.Error = retErr sort.Slice(rows, func(i, j int) bool { return rows[i][cobrautil.ROW_COPYSET_KEY] < rows[j][cobrautil.ROW_COPYSET_KEY] }) @@ -255,5 +261,5 @@ func (cCmd *CopysetCommand) UpdateCopysteGap(timeout time.Duration) *cmderror.Cm return true }) retErr := cmderror.MergeCmdErrorExceptSuccess(errs) - return &retErr + return retErr } diff --git a/tools-v2/pkg/cli/command/curvefs/list/partition/partition.go b/tools-v2/pkg/cli/command/curvefs/list/partition/partition.go index 99223bd764..ef38b3a94a 100644 --- a/tools-v2/pkg/cli/command/curvefs/list/partition/partition.go +++ b/tools-v2/pkg/cli/command/curvefs/list/partition/partition.go @@ -168,6 +168,9 @@ func (pCmd *PartitionCommand) RunCommand(cmd *cobra.Command, args []string) erro } var resList []interface{} for _, result := range results { + if result == nil { + continue + } response := result.(*topology.ListPartitionResponse) res, err := output.MarshalProtoJson(response) if err != nil { diff --git a/tools-v2/pkg/cli/command/curvefs/list/topology/topology.go b/tools-v2/pkg/cli/command/curvefs/list/topology/topology.go index 1628d71a88..106db3930b 100644 --- a/tools-v2/pkg/cli/command/curvefs/list/topology/topology.go +++ b/tools-v2/pkg/cli/command/curvefs/list/topology/topology.go @@ -214,7 +214,7 @@ func (tCmd *TopologyCommand) updateTable(topoMap *map[string]interface{}) *cmder } } retErr := cmderror.MergeCmdError(errs) - return &retErr + return retErr } func (tCmd *TopologyCommand) updateMetaserverAddr(metaservers []*topology.MetaServerInfo) { diff --git a/tools-v2/pkg/cli/command/curvefs/query/fs/fs.go b/tools-v2/pkg/cli/command/curvefs/query/fs/fs.go index 58f177f649..429c268a35 100644 --- a/tools-v2/pkg/cli/command/curvefs/query/fs/fs.go +++ b/tools-v2/pkg/cli/command/curvefs/query/fs/fs.go @@ -190,6 +190,9 @@ func (fCmd *FsCommand) RunCommand(cmd *cobra.Command, args []string) error { } var resList []interface{} for _, result := range results { + if result == nil { + continue + } response := result.(*mds.GetFsInfoResponse) res, err := output.MarshalProtoJson(response) if err != nil { diff --git a/tools-v2/pkg/cli/command/curvefs/query/metaserver/metaserver.go b/tools-v2/pkg/cli/command/curvefs/query/metaserver/metaserver.go index 95f889de8c..f2ea176349 100644 --- a/tools-v2/pkg/cli/command/curvefs/query/metaserver/metaserver.go +++ b/tools-v2/pkg/cli/command/curvefs/query/metaserver/metaserver.go @@ -184,6 +184,9 @@ func (mCmd *MetaserverCommand) RunCommand(cmd *cobra.Command, args []string) err } var resList []interface{} for _, result := range results { + if result == nil { + continue + } response := result.(*topology.GetMetaServerInfoResponse) res, err := output.MarshalProtoJson(response) if err != nil { diff --git a/tools-v2/pkg/cli/command/curvefs/status/cluster/cluster.go b/tools-v2/pkg/cli/command/curvefs/status/cluster/cluster.go index 329ec8cf8d..2c94c550a3 100644 --- a/tools-v2/pkg/cli/command/curvefs/status/cluster/cluster.go +++ b/tools-v2/pkg/cli/command/curvefs/status/cluster/cluster.go @@ -105,7 +105,7 @@ func (cCmd *ClusterCommand) RunCommand(cmd *cobra.Command, args []string) error cCmd.health = cobrautil.CompareHealth(cCmd.health, health) } finalErr := cmderror.MergeCmdErrorExceptSuccess(errs) - cCmd.Error = &finalErr + cCmd.Error = finalErr results["health"] = cobrautil.ClusterHealthStatus_Str[int32(cCmd.health)] cCmd.Result = results return nil diff --git a/tools-v2/pkg/cli/command/curvefs/status/etcd/etcd.go b/tools-v2/pkg/cli/command/curvefs/status/etcd/etcd.go index 08eb61fdc1..ab25299415 100644 --- a/tools-v2/pkg/cli/command/curvefs/status/etcd/etcd.go +++ b/tools-v2/pkg/cli/command/curvefs/status/etcd/etcd.go @@ -167,7 +167,7 @@ func (eCmd *EtcdCommand) RunCommand(cmd *cobra.Command, args []string) error { } } mergeErr := cmderror.MergeCmdErrorExceptSuccess(errs) - eCmd.Error = &mergeErr + eCmd.Error = mergeErr if len(errs) > 0 && len(errs) < len(eCmd.rows) { eCmd.health = cobrautil.HEALTH_WARN diff --git a/tools-v2/pkg/cli/command/curvefs/status/mds/mds.go b/tools-v2/pkg/cli/command/curvefs/status/mds/mds.go index 4d068d0e7b..df7a29651a 100644 --- a/tools-v2/pkg/cli/command/curvefs/status/mds/mds.go +++ b/tools-v2/pkg/cli/command/curvefs/status/mds/mds.go @@ -163,7 +163,7 @@ func (mCmd *MdsCommand) RunCommand(cmd *cobra.Command, args []string) error { mCmd.health = cobrautil.HEALTH_OK } mergeErr := cmderror.MergeCmdErrorExceptSuccess(errs) - mCmd.Error = &mergeErr + mCmd.Error = mergeErr list := cobrautil.ListMap2ListSortByKeys(mCmd.rows, mCmd.Header, []string{ cobrautil.ROW_STATUS, cobrautil.ROW_VERSION, }) diff --git a/tools-v2/pkg/cli/command/curvefs/status/metaserver/metaserver.go b/tools-v2/pkg/cli/command/curvefs/status/metaserver/metaserver.go index 9412751c9e..26e264e5fa 100644 --- a/tools-v2/pkg/cli/command/curvefs/status/metaserver/metaserver.go +++ b/tools-v2/pkg/cli/command/curvefs/status/metaserver/metaserver.go @@ -87,7 +87,7 @@ func (mCmd *MetaserverCommand) Init(cmd *cobra.Command, args []string) error { )) for i, addr := range externalAddrs { - if !cobrautil.IsValidAddr(addr) { + if !config.IsValidAddr(addr) { return fmt.Errorf("invalid metaserver external addr: %s", addr) } @@ -161,7 +161,7 @@ func (mCmd *MetaserverCommand) RunCommand(cmd *cobra.Command, args []string) err } mergeErr := cmderror.MergeCmdErrorExceptSuccess(errs) - mCmd.Error = &mergeErr + mCmd.Error = mergeErr if len(errs) > 0 && len(errs) < len(mCmd.rows) { mCmd.health = cobrautil.HEALTH_WARN diff --git a/tools-v2/pkg/cli/command/curvefs/usage/inode/inode.go b/tools-v2/pkg/cli/command/curvefs/usage/inode/inode.go index 8058ae6ebc..91c3effc3c 100644 --- a/tools-v2/pkg/cli/command/curvefs/usage/inode/inode.go +++ b/tools-v2/pkg/cli/command/curvefs/usage/inode/inode.go @@ -182,7 +182,7 @@ func (iCmd *InodeNumCommand) RunCommand(cmd *cobra.Command, args []string) error iCmd.Error = cmderror.MostImportantCmdError(errs) mergeErr := cmderror.MergeCmdErrorExceptSuccess(errs) - iCmd.Error = &mergeErr + iCmd.Error = mergeErr if len(rows) > 0 { list := cobrautil.ListMap2ListSortByKeys(rows, iCmd.Header, []string{ diff --git a/tools-v2/pkg/cli/command/curvefs/warmup/add/add.go b/tools-v2/pkg/cli/command/curvefs/warmup/add/add.go index 60edfa097b..539d839a13 100644 --- a/tools-v2/pkg/cli/command/curvefs/warmup/add/add.go +++ b/tools-v2/pkg/cli/command/curvefs/warmup/add/add.go @@ -48,16 +48,22 @@ $ curve fs warmup add /mnt/warmup # warmup all files in /mnt/warmup` const ( CURVEFS_WARMUP_OP_XATTR = "curvefs.warmup.op" - CURVEFS_WARMUP_OP_ADD_SINGLE = "add\nsingle\n%s" - CURVEFS_WARMUP_OP_ADD_LIST = "add\nlist\n%s" + CURVEFS_WARMUP_OP_ADD_SINGLE = "add\nsingle\n%s\n%s" + CURVEFS_WARMUP_OP_ADD_LIST = "add\nlist\n%s\n%s" ) +var STORAGE_TYPE = map[string]string{ + "disk": "disk", + "mem": "kvclient", +} + type AddCommand struct { basecmd.FinalCurveCmd Mountpoint *mountinfo.MountInfo Path string // path in user system - CurvefsPath string // path in curvefs - Single bool // warmup a single file or directory + CurvefsPath string // path in curvefs + Single bool // warmup a single file or directory + StorageType string // warmup storage type ConvertFails []string } @@ -82,6 +88,7 @@ func NewAddCommand() *cobra.Command { func (aCmd *AddCommand) AddFlags() { config.AddFileListOptionFlag(aCmd.Cmd) config.AddDaemonOptionPFlag(aCmd.Cmd) + config.AddStorageOptionFlag(aCmd.Cmd) } func (aCmd *AddCommand) Init(cmd *cobra.Command, args []string) error { @@ -122,18 +129,18 @@ func (aCmd *AddCommand) Init(cmd *cobra.Command, args []string) error { aCmd.Mountpoint = nil for _, mountpoint := range mountpoints { absPath, _ := filepath.Abs(aCmd.Path) - rel , err := filepath.Rel(mountpoint.MountPoint, absPath) + rel, err := filepath.Rel(mountpoint.MountPoint, absPath) if err == nil && !strings.HasPrefix(rel, "..") { // found the mountpoint if aCmd.Mountpoint == nil || len(aCmd.Mountpoint.MountPoint) < len(mountpoint.MountPoint) { - // Prevent the curvefs directory from being mounted under the curvefs directory - // /a/b/c: - // test-1 mount in /a - // test-1 mount in /a/b - // warmup /a/b/c. - aCmd.Mountpoint = mountpoint - aCmd.CurvefsPath = cobrautil.Path2CurvefsPath(aCmd.Path, mountpoint) + // Prevent the curvefs directory from being mounted under the curvefs directory + // /a/b/c: + // test-1 mount in /a + // test-1 mount in /a/b + // warmup /a/b/c. + aCmd.Mountpoint = mountpoint + aCmd.CurvefsPath = cobrautil.Path2CurvefsPath(aCmd.Path, mountpoint) } } } @@ -141,6 +148,12 @@ func (aCmd *AddCommand) Init(cmd *cobra.Command, args []string) error { return fmt.Errorf("[%s] is not saved in curvefs", aCmd.Path) } + // check storage type + aCmd.StorageType = STORAGE_TYPE[config.GetStorageFlag(aCmd.Cmd)] + if aCmd.StorageType == "" { + return fmt.Errorf("[%s] is not support storage type", aCmd.StorageType) + } + return nil } @@ -185,7 +198,7 @@ func (aCmd *AddCommand) RunCommand(cmd *cobra.Command, args []string) error { } xattr = CURVEFS_WARMUP_OP_ADD_LIST } - value := fmt.Sprintf(xattr, aCmd.CurvefsPath) + value := fmt.Sprintf(xattr, aCmd.CurvefsPath, aCmd.StorageType) err := unix.Setxattr(aCmd.Path, CURVEFS_WARMUP_OP_XATTR, []byte(value), 0) if err == unix.ENOTSUP || err == unix.EOPNOTSUPP { return fmt.Errorf("filesystem does not support extended attributes") diff --git a/tools-v2/pkg/cli/curvecli.go b/tools-v2/pkg/cli/curvecli.go index 2cce4d4a7e..1f15569462 100644 --- a/tools-v2/pkg/cli/curvecli.go +++ b/tools-v2/pkg/cli/curvecli.go @@ -29,7 +29,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" - cobraUtil "github.com/opencurve/curve/tools-v2/internal/utils" + cobratemplate "github.com/opencurve/curve/tools-v2/internal/utils/template" "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvebs" "github.com/opencurve/curve/tools-v2/pkg/cli/command/curvefs" "github.com/opencurve/curve/tools-v2/pkg/cli/command/version" @@ -42,9 +42,9 @@ func addSubCommands(cmd *cobra.Command) { func setupRootCommand(cmd *cobra.Command) { cmd.SetVersionTemplate("curve {{.Version}}\n") - cobraUtil.SetFlagErrorFunc(cmd) - cobraUtil.SetHelpTemplate(cmd) - cobraUtil.SetUsageTemplate(cmd) + cobratemplate.SetFlagErrorFunc(cmd) + cobratemplate.SetHelpTemplate(cmd) + cobratemplate.SetUsageTemplate(cmd) } func newCurveCommand() *cobra.Command { @@ -54,7 +54,7 @@ func newCurveCommand() *cobra.Command { Version: version.Version, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { - return cobraUtil.ShowHelp(os.Stderr)(cmd, args) + return cobratemplate.ShowHelp(os.Stderr)(cmd, args) } return fmt.Errorf("curve: '%s' is not a curve command.\n"+ "See 'curve --help'", args[0]) diff --git a/tools-v2/pkg/config/bs.go b/tools-v2/pkg/config/bs.go index d0f0d7cf1d..0fae3d141b 100644 --- a/tools-v2/pkg/config/bs.go +++ b/tools-v2/pkg/config/bs.go @@ -22,48 +22,109 @@ package config import ( + "fmt" + "strconv" "strings" "time" - "strconv" - "github.com/gookit/color" cmderror "github.com/opencurve/curve/tools-v2/internal/error" - cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" "github.com/spf13/cobra" "github.com/spf13/viper" ) const ( // curvebs - CURVEBS_MDSADDR = "mdsaddr" - VIPER_CURVEBS_MDSADDR = "curvebs.mdsAddr" - CURVEBS_MDSDUMMYADDR = "mdsdummyaddr" - VIPER_CURVEBS_MDSDUMMYADDR = "curvebs.mdsDummyAddr" - CURVEBS_ETCDADDR = "etcdaddr" - VIPER_CURVEBS_ETCDADDR = "curvebs.etcdAddr" - CURVEBS_PATH = "path" - VIPER_CURVEBS_PATH = "curvebs.path" - CURVEBS_USER = "user" - VIPER_CURVEBS_USER = "curvebs.root.user" - CURVEBS_DEFAULT_USER = "root" - CURVEBS_PASSWORD = "password" - VIPER_CURVEBS_PASSWORD = "curvebs.root.password" - CURVEBS_DEFAULT_PASSWORD = "root_password" - CURVEBS_CLUSTERMAP = "clustermap" - VIPER_CURVEBS_CLUSTERMAP = "curvebs.clustermap" - CURVEBS_FILENAME = "filename" - VIPER_CURVEBS_FILENAME = "curvebs.filename" - CURVEBS_FORCEDELETE = "forcedelete" - CURVEBS_DEFAULT_FORCEDELETE = false - CURVEBS_DIR = "dir" - VIPER_CURVEBS_DIR = "curvebs.dir" - CURVEBS_LOGIC_POOL_ID = "logicalpoolid" - VIPER_CURVEBS_LOGIC_POOL_ID = "curvebs.logicalpoolid" - CURVEBS_COPYSET_ID = "copysetid" - VIPER_CURVEBS_COPYSET_ID = "curvebs.copysetid" - CURVEBS_PEERS_ADDRESS = "peers" - VIPER_CURVEBS_PEERS_ADDRESS = "curvebs.peers" + CURVEBS_MDSADDR = "mdsaddr" + VIPER_CURVEBS_MDSADDR = "curvebs.mdsAddr" + CURVEBS_MDSDUMMYADDR = "mdsdummyaddr" + VIPER_CURVEBS_MDSDUMMYADDR = "curvebs.mdsDummyAddr" + CURVEBS_ETCDADDR = "etcdaddr" + VIPER_CURVEBS_ETCDADDR = "curvebs.etcdAddr" + CURVEBS_PATH = "path" + VIPER_CURVEBS_PATH = "curvebs.path" + CURVEBS_DEFAULT_PATH = "/test" + CURVEBS_USER = "user" + VIPER_CURVEBS_USER = "curvebs.root.user" + CURVEBS_DEFAULT_USER = "root" + CURVEBS_PASSWORD = "password" + VIPER_CURVEBS_PASSWORD = "curvebs.root.password" + CURVEBS_DEFAULT_PASSWORD = "root_password" + CURVEBS_CLUSTERMAP = "clustermap" + VIPER_CURVEBS_CLUSTERMAP = "curvebs.clustermap" + CURVEBS_FORCE = "force" + VIPER_CURVEBS_FORCE = "curvebs.force" + CURVEBS_DEFAULT_FORCE = false + CURVEBS_LOGIC_POOL_ID = "logicalpoolid" + VIPER_CURVEBS_LOGIC_POOL_ID = "curvebs.logicalpoolid" + CURVEBS_COPYSET_ID = "copysetid" + VIPER_CURVEBS_COPYSET_ID = "curvebs.copysetid" + CURVEBS_PEERS_ADDRESS = "peers" + VIPER_CURVEBS_PEERS_ADDRESS = "curvebs.peers" + CURVEBS_OFFSET = "offset" + VIPER_CURVEBS_OFFSET = "curvebs.offset" + CURVEBS_SIZE = "size" + VIPER_CURVEBS_SIZE = "curvebs.size" + CURVEBS_DEFAULT_SIZE = uint64(10) + CURVEBS_TYPE = "type" + VIPER_CURVEBS_TYPE = "curvebs.type" + CURVEBS_STRIPE_UNIT = "stripeunit" + VIPER_CURVEBS_STRIPE_UNIT = "curvebs.stripeunit" + CURVEBS_DEFAULT_STRIPE_UNIT = "32 KiB" + CURVEBS_STRIPE_COUNT = "stripecount" + VIPER_CURVEBS_STRIPE_COUNT = "curvebs.stripecount" + CURVEBS_DEFAULT_STRIPE_COUNT = uint64(32) + CURVEBS_RECYCLE_PREFIX = "recycleprefix" + VIPER_RECYCLE_PREFIX = "curvebs.recycleprefix" + CURVEBS_EXPIRED_TIME = "expiredtime" + VIPER_CURVEBS_EXPIRED_TIME = "curvebs.expiredtime" + CURVEBS_LIMIT = "limit" + VIPER_CURVEBS_LIMIT = "curvebs.limit" + CURVEBS_BURST = "burst" + VIPER_CURVEBS_BURST = "curvebs.burst" + CURVEBS_DEFAULT_BURST = uint64(30000) + CURVEBS_BURST_LENGTH = "burstlength" + VIPER_CURVEBS_BURST_LENGTH = "curvebs.burstlength" + CURVEBS_DEFAULT_BURST_LENGTH = uint64(10) + CURVEBS_OP = "op" + VIPER_CURVEBS_OP = "curvebs.op" + CURVEBS_DEFAULT_OP = "operator" + CURVEBS_CHECK_TIME = "checktime" + VIPER_CURVEBS_CHECK_TIME = "curvebs.checktime" + CURVEBS_DEFAULT_CHECK_TIME = 30 * time.Second + CURVEBS_MARGIN = "margin" + VIPER_CURVEBS_MARGIN = "curvebs.margin" + CURVEBS_DEFAULT_MARGIN = uint64(1000) + CURVEBS_SNAPSHOTADDR = "snapshotaddr" + VIPER_CURVEBS_SNAPSHOTADDR = "curvebs.snapshotAddr" + CURVEBS_SNAPSHOTDUMMYADDR = "snapshotdummyaddr" + VIPER_CURVEBS_SNAPSHOTDUMMYADDR = "curvebs.snapshotDummyAddr" + CURVEBS_SCAN = "scan" + VIPER_CURVEBS_SCAN = "curvebs.scan" + CURVEBS_DEFAULT_SCAN = true + CURVEBS_CHUNKSERVER_ID = "chunkserverid" + VIPER_CHUNKSERVER_ID = "curvebs.chunkserverid" + CURVEBS_DEFAULT_CHUNKSERVER_ID = "*" + CURVEBS_CHECK_CSALIVE = "checkalive" + VIPER_CURVEBS_CHECK_CSALIVE = "curvebs.checkalive" + CURVEBS_CHECK_HEALTH = "checkhealth" + VIPER_CURVEBS_CHECK_HEALTH = "curvebs.checkHealth" + CURVEBS_CS_OFFLINE = "offline" + VIPER_CURVEBS_CS_OFFLINE = "curvebs.offline" + CURVEBS_CS_UNHEALTHY = "unhealthy" + VIPER_CURVEBS_CS_UNHEALTHY = "curvebs.unhealthy" + CURVEBS_DRYRUN = "dryrun" + VIPER_CURVEBS_DRYRUN = "curvebs.dryrun" + CURVEBS_DEFAULT_DRYRUN = true + CURVEBS_AVAILFLAG = "availflag" + VIPER_CURVEBS_AVAILFLAG = "curvebs.availflag" + CURVEBS_CHUNK_ID = "chunkid" + VIPER_CURVEBS_CHUNK_ID = "curvebs.chunkid" + CURVEBS_CHUNKSERVER_ADDRESS = "chunkserveraddr" + VIPER_CURVEBS_CHUNKSERVER_ADDRESS = "curvebs.chunkserverAddr" + CURVEBS_FIlTER = "filter" + VIPER_CURVEBS_FILTER = "curvebs.filter" + CURVEBS_DEFAULT_FILTER = false ) var ( @@ -73,27 +134,104 @@ var ( RPCRETRYTIMES: VIPER_GLOBALE_RPCRETRYTIMES, // bs - CURVEBS_MDSADDR: VIPER_CURVEBS_MDSADDR, - CURVEBS_MDSDUMMYADDR: VIPER_CURVEBS_MDSDUMMYADDR, - CURVEBS_PATH: VIPER_CURVEBS_PATH, - CURVEBS_USER: VIPER_CURVEBS_USER, - CURVEBS_PASSWORD: VIPER_CURVEBS_PASSWORD, - CURVEBS_ETCDADDR: VIPER_CURVEBS_ETCDADDR, - CURVEBS_DIR: VIPER_CURVEBS_DIR, - CURVEBS_LOGIC_POOL_ID: VIPER_CURVEBS_LOGIC_POOL_ID, - CURVEBS_COPYSET_ID: VIPER_CURVEBS_COPYSET_ID, - CURVEBS_PEERS_ADDRESS: VIPER_CURVEBS_PEERS_ADDRESS, - CURVEBS_CLUSTERMAP: VIPER_CURVEBS_CLUSTERMAP, + CURVEBS_MDSADDR: VIPER_CURVEBS_MDSADDR, + CURVEBS_MDSDUMMYADDR: VIPER_CURVEBS_MDSDUMMYADDR, + CURVEBS_PATH: VIPER_CURVEBS_PATH, + CURVEBS_USER: VIPER_CURVEBS_USER, + CURVEBS_PASSWORD: VIPER_CURVEBS_PASSWORD, + CURVEBS_ETCDADDR: VIPER_CURVEBS_ETCDADDR, + CURVEBS_LOGIC_POOL_ID: VIPER_CURVEBS_LOGIC_POOL_ID, + CURVEBS_COPYSET_ID: VIPER_CURVEBS_COPYSET_ID, + CURVEBS_PEERS_ADDRESS: VIPER_CURVEBS_PEERS_ADDRESS, + CURVEBS_CLUSTERMAP: VIPER_CURVEBS_CLUSTERMAP, + CURVEBS_OFFSET: VIPER_CURVEBS_OFFSET, + CURVEBS_SIZE: VIPER_CURVEBS_SIZE, + CURVEBS_STRIPE_UNIT: VIPER_CURVEBS_STRIPE_UNIT, + CURVEBS_STRIPE_COUNT: VIPER_CURVEBS_STRIPE_COUNT, + CURVEBS_LIMIT: VIPER_CURVEBS_LIMIT, + CURVEBS_BURST: VIPER_CURVEBS_BURST, + CURVEBS_BURST_LENGTH: VIPER_CURVEBS_BURST_LENGTH, + CURVEBS_FORCE: VIPER_CURVEBS_FORCE, + CURVEBS_TYPE: VIPER_CURVEBS_TYPE, + CURVEBS_EXPIRED_TIME: VIPER_CURVEBS_EXPIRED_TIME, + CURVEBS_RECYCLE_PREFIX: VIPER_RECYCLE_PREFIX, + CURVEBS_MARGIN: VIPER_CURVEBS_MARGIN, + CURVEBS_OP: VIPER_CURVEBS_OP, + CURVEBS_CHECK_TIME: VIPER_CURVEBS_CHECK_TIME, + CURVEBS_SNAPSHOTADDR: VIPER_CURVEBS_SNAPSHOTADDR, + CURVEBS_SNAPSHOTDUMMYADDR: VIPER_CURVEBS_SNAPSHOTDUMMYADDR, + CURVEBS_SCAN: VIPER_CURVEBS_SCAN, + CURVEBS_CHUNKSERVER_ID: VIPER_CHUNKSERVER_ID, + CURVEBS_CHECK_CSALIVE: VIPER_CURVEBS_CHECK_CSALIVE, + CURVEBS_CHECK_HEALTH: VIPER_CURVEBS_CHECK_HEALTH, + CURVEBS_CS_OFFLINE: VIPER_CURVEBS_CS_OFFLINE, + CURVEBS_CS_UNHEALTHY: VIPER_CURVEBS_CS_UNHEALTHY, + CURVEBS_DRYRUN: VIPER_CURVEBS_DRYRUN, + CURVEBS_AVAILFLAG: VIPER_CURVEBS_AVAILFLAG, + CURVEBS_CHUNK_ID: VIPER_CURVEBS_CHUNK_ID, + CURVEBS_CHUNKSERVER_ADDRESS: VIPER_CURVEBS_CHUNKSERVER_ADDRESS, + CURVEBS_FIlTER: VIPER_CURVEBS_FILTER, } BSFLAG2DEFAULT = map[string]interface{}{ // bs - CURVEBS_USER: CURVEBS_DEFAULT_USER, - CURVEBS_PASSWORD: CURVEBS_DEFAULT_PASSWORD, - CURVEBS_FORCEDELETE: CURVEBS_DEFAULT_FORCEDELETE, + CURVEBS_USER: CURVEBS_DEFAULT_USER, + CURVEBS_PASSWORD: CURVEBS_DEFAULT_PASSWORD, + CURVEBS_SIZE: CURVEBS_DEFAULT_SIZE, + CURVEBS_STRIPE_UNIT: CURVEBS_DEFAULT_STRIPE_UNIT, + CURVEBS_STRIPE_COUNT: CURVEBS_DEFAULT_STRIPE_COUNT, + CURVEBS_BURST: CURVEBS_DEFAULT_BURST, + CURVEBS_BURST_LENGTH: CURVEBS_DEFAULT_BURST_LENGTH, + CURVEBS_PATH: CURVEBS_DEFAULT_PATH, + CURVEBS_FORCE: CURVEBS_DEFAULT_FORCE, + CURVEBS_MARGIN: CURVEBS_DEFAULT_MARGIN, + CURVEBS_OP: CURVEBS_DEFAULT_OP, + CURVEBS_CHECK_TIME: CURVEBS_DEFAULT_CHECK_TIME, + CURVEBS_SCAN: CURVEBS_DEFAULT_SCAN, + CURVEBS_CHUNKSERVER_ID: CURVEBS_DEFAULT_CHUNKSERVER_ID, + CURVEBS_DRYRUN: CURVEBS_DEFAULT_DRYRUN, + CURVEBS_FIlTER: CURVEBS_DEFAULT_FILTER, } ) +const ( + CURVEBS_OP_OPERATOR = "operator" + CURVEBS_OP_CHANGE_PEER = "change_peer" + CURVEBS_OP_ADD_PEER = "add_peer" + CURVEBS_OP_REMOVE_PEER = "remove_peer" + CURVEBS_OP_TRANSFER__LEADER = "transfer_leader" + + CURVEBS_IOPS_TOTAL = "iops_total" + CURVEBS_IOPS_READ = "iops_read" + CURVEBS_IOPS_WRITE = "iops_write" + CURVEBS_BPS_TOTAL = "bps_total" + CURVEBS_BPS_READ = "bps_read" + CURVEBS_BPS_WRITE = "bps_write" +) + +var ( + CURVEBS_OP_VALUE_SLICE = []string{CURVEBS_OP_OPERATOR, CURVEBS_OP_CHANGE_PEER, CURVEBS_OP_ADD_PEER, CURVEBS_OP_REMOVE_PEER, CURVEBS_OP_TRANSFER__LEADER} + + CURVEBS_THROTTLE_TYPE_SLICE = []string{CURVEBS_IOPS_TOTAL, CURVEBS_IOPS_READ, CURVEBS_IOPS_WRITE, CURVEBS_BPS_TOTAL, CURVEBS_BPS_READ, CURVEBS_BPS_WRITE} +) + +var ( + BS_STRING_FLAG2AVAILABLE = map[string][]string{ + CURVEBS_OP: CURVEBS_OP_VALUE_SLICE, + CURVEBS_TYPE: CURVEBS_THROTTLE_TYPE_SLICE, + } +) + +func BsAvailableValueStr(flagName string) string { + ret := "" + if slice, ok := BS_STRING_FLAG2AVAILABLE[flagName]; ok { + ret = strings.Join(slice, "|") + } else if ret, ok = BSFLAG2DEFAULT[flagName].(string); !ok { + ret = "" + } + return ret +} + // curvebs // add bs option flag func AddBsStringSliceOptionFlag(cmd *cobra.Command, name string, usage string) { @@ -101,6 +239,7 @@ func AddBsStringSliceOptionFlag(cmd *cobra.Command, name string, usage string) { if defaultValue == nil { defaultValue = []string{} } + cmd.Flags().StringSlice(name, defaultValue.([]string), usage) err := viper.BindPFlag(BSFLAG2VIPER[name], cmd.Flags().Lookup(name)) if err != nil { @@ -118,7 +257,7 @@ func AddBsStringSliceRequiredFlag(cmd *cobra.Command, name string, usage string) } func AddBsStringOptionFlag(cmd *cobra.Command, name string, usage string) { - defaultValue := FLAG2DEFAULT[name] + defaultValue := BSFLAG2DEFAULT[name] if defaultValue == nil { defaultValue = "" } @@ -129,6 +268,71 @@ func AddBsStringOptionFlag(cmd *cobra.Command, name string, usage string) { } } +func AddBsUint64OptionFlag(cmd *cobra.Command, name string, usage string) { + defaultValue := BSFLAG2DEFAULT[name] + if defaultValue == nil { + defaultValue = 0 + } + cmd.Flags().Uint64(name, defaultValue.(uint64), usage) + err := viper.BindPFlag(BSFLAG2VIPER[name], cmd.Flags().Lookup(name)) + if err != nil { + cobra.CheckErr(err) + } +} + +func AddBsInt64OptionFlag(cmd *cobra.Command, name string, usage string) { + defaultValue := BSFLAG2DEFAULT[name] + if defaultValue == nil { + defaultValue = 0 + } + cmd.Flags().Int64(name, defaultValue.(int64), usage) + err := viper.BindPFlag(BSFLAG2VIPER[name], cmd.Flags().Lookup(name)) + if err != nil { + cobra.CheckErr(err) + } +} + +func AddBsDurationOptionFlag(cmd *cobra.Command, name string, usage string) { + defaultValue := BSFLAG2DEFAULT[name] + if defaultValue == nil { + defaultValue = 0 + } + cmd.Flags().Duration(name, defaultValue.(time.Duration), usage) + err := viper.BindPFlag(BSFLAG2VIPER[name], cmd.Flags().Lookup(name)) + if err != nil { + cobra.CheckErr(err) + } +} + +func AddBsUint32SliceOptionFlag(cmd *cobra.Command, name string, usage string) { + defaultValue := BSFLAG2DEFAULT[name] + if defaultValue == nil { + defaultValue = []uint32{1, 2, 3} + } + cmd.Flags().UintSlice(name, defaultValue.([]uint), usage) + err := viper.BindPFlag(BSFLAG2VIPER[name], cmd.Flags().Lookup(name)) + if err != nil { + cobra.CheckErr(err) + } +} + +func AddBsBoolRequireFlag(cmd *cobra.Command, name string, usage string) { + cmd.Flags().Bool(name, false, usage+color.Red.Sprint("[required]")) + cmd.MarkFlagRequired(name) + err := viper.BindPFlag(BSFLAG2VIPER[name], cmd.Flags().Lookup(name)) + if err != nil { + cobra.CheckErr(err) + } +} + +func AddBsDryrunOptionFlag(cmd *cobra.Command) { + AddBsBoolOptionFlag(cmd, CURVEBS_DRYRUN, "when dry run set true, no changes will be made") +} + +func AddBsFilterOptionFlag(cmd *cobra.Command) { + AddBsBoolOptionFlag(cmd, CURVEBS_FIlTER, "filter scanning or not") +} + // add bs required flag func AddBsStringRequiredFlag(cmd *cobra.Command, name string, usage string) { cmd.Flags().String(name, "", usage+color.Red.Sprint("[required]")) @@ -151,15 +355,52 @@ func AddBsBoolOptionFlag(cmd *cobra.Command, name string, usage string) { } } +func AddBsUint32RequiredFlag(cmd *cobra.Command, name string, usage string) { + cmd.Flags().Uint32(name, uint32(0), usage+color.Red.Sprint("[required]")) + cmd.MarkFlagRequired(name) + err := viper.BindPFlag(BSFLAG2VIPER[name], cmd.Flags().Lookup(name)) + if err != nil { + cobra.CheckErr(err) + } +} + +func AddBsUint64RequiredFlag(cmd *cobra.Command, name string, usage string) { + cmd.Flags().Uint64(name, uint64(0), usage+color.Red.Sprint("[required]")) + cmd.MarkFlagRequired(name) + err := viper.BindPFlag(BSFLAG2VIPER[name], cmd.Flags().Lookup(name)) + if err != nil { + cobra.CheckErr(err) + } +} + +func AddBsDurationRequiredFlag(cmd *cobra.Command, name string, usage string) { + cmd.Flags().Duration(name, 1*time.Second, usage+color.Red.Sprint("[required]")) + cmd.MarkFlagRequired(name) + err := viper.BindPFlag(BSFLAG2VIPER[name], cmd.Flags().Lookup(name)) + if err != nil { + cobra.CheckErr(err) + } +} + // add flag option // bs mds[option] func AddBsMdsFlagOption(cmd *cobra.Command) { AddBsStringOptionFlag(cmd, CURVEBS_MDSADDR, "mds address, should be like 127.0.0.1:6700,127.0.0.1:6701,127.0.0.1:6702") } + func AddBsMdsDummyFlagOption(cmd *cobra.Command) { AddBsStringOptionFlag(cmd, CURVEBS_MDSDUMMYADDR, "mds dummy address, should be like 127.0.0.1:6700,127.0.0.1:6701,127.0.0.1:6702") } +// snapshot clone +func AddBsSnapshotCloneFlagOption(cmd *cobra.Command) { + AddBsStringOptionFlag(cmd, CURVEBS_SNAPSHOTADDR, "snapshot clone address, should be like 127.0.0.1:5550,127.0.0.1:5551,127.0.0.1:5552") +} + +func AddBsSnapshotCloneDummyFlagOption(cmd *cobra.Command) { + AddBsStringOptionFlag(cmd, CURVEBS_SNAPSHOTDUMMYADDR, "snapshot clone dummy address, should be like 127.0.0.1:8100,127.0.0.1:8101,127.0.0.1:8102") +} + // user func AddBsUserOptionFlag(cmd *cobra.Command) { AddBsStringOptionFlag(cmd, CURVEBS_USER, "user name") @@ -170,53 +411,141 @@ func AddBsPasswordOptionFlag(cmd *cobra.Command) { AddBsStringOptionFlag(cmd, CURVEBS_PASSWORD, "user password") } -// dir -func AddBsDirOptionFlag(cmd *cobra.Command) { - AddBsStringOptionFlag(cmd, CURVEBS_DIR, "directory path") -} - // etcd func AddBsEtcdAddrFlag(cmd *cobra.Command) { AddBsStringOptionFlag(cmd, CURVEBS_ETCDADDR, "etcd address, should be like 127.0.0.1:8700,127.0.0.1:8701,127.0.0.1:8702") } +func AddBsSizeOptionFlag(cmd *cobra.Command) { + AddBsUint64OptionFlag(cmd, CURVEBS_SIZE, "size, unit is GiB") +} + +func AddBsStripeUnitOptionFlag(cmd *cobra.Command) { + AddBsStringOptionFlag(cmd, CURVEBS_STRIPE_UNIT, "stripe volume uint, just like: 32KiB") +} + +func AddBsStripeCountOptionFlag(cmd *cobra.Command) { + AddBsUint64OptionFlag(cmd, CURVEBS_STRIPE_COUNT, "stripe volume count") +} + +func AddBsBurstOptionFlag(cmd *cobra.Command) { + AddBsUint64OptionFlag(cmd, CURVEBS_BURST, "burst") +} + +func AddBsBurstLengthOptionFlag(cmd *cobra.Command) { + AddBsUint64OptionFlag(cmd, CURVEBS_BURST_LENGTH, "burst length") +} + +func AddBsPathOptionFlag(cmd *cobra.Command) { + AddBsStringOptionFlag(cmd, CURVEBS_PATH, "file or directory path") +} + +func AddBsCheckTimeOptionFlag(cmd *cobra.Command) { + AddBsDurationOptionFlag(cmd, CURVEBS_CHECK_TIME, "check time") +} + +func AddBsChunkServerIdOptionFlag(cmd *cobra.Command) { + AddBsStringOptionFlag(cmd, CURVEBS_CHUNKSERVER_ID, "chunkserver id") +} + +// marigin +func AddBsMarginOptionFlag(cmd *cobra.Command) { + AddUint64OptionFlag(cmd, CURVEBS_MARGIN, "the maximum gap between peers") +} + +func GetBsMargin(cmd *cobra.Command) uint64 { + return GetFlagUint64(cmd, CURVEBS_MARGIN) +} + +// scan-state +func AddBsScanOptionFlag(cmd *cobra.Command) { + AddBsBoolOptionFlag(cmd, CURVEBS_SCAN, "enable/disable scan for logical pool") +} + // add flag required // add path[required] func AddBsPathRequiredFlag(cmd *cobra.Command) { AddBsStringRequiredFlag(cmd, CURVEBS_PATH, "file path") } -func AddBsUsernameRequiredFlag(cmd *cobra.Command) { - AddBsStringRequiredFlag(cmd, CURVEBS_USER, "username") +func AddBsLogicalPoolIdRequiredFlag(cmd *cobra.Command) { + AddBsUint32RequiredFlag(cmd, CURVEBS_LOGIC_POOL_ID, "logical pool id") } -func AddBsFilenameRequiredFlag(cmd *cobra.Command) { - AddBsStringRequiredFlag(cmd, CURVEBS_FILENAME, "the full path of file") +func AddBsCopysetIdRequiredFlag(cmd *cobra.Command) { + AddBsUint32RequiredFlag(cmd, CURVEBS_COPYSET_ID, "copyset id") } -func AddBSLogicalPoolIdFlag(cmd *cobra.Command) { - AddBSUint32RequiredFlag(cmd, CURVEBS_LOGIC_POOL_ID, "logical pool id") +func AddBsCopysetIdSliceRequiredFlag(cmd *cobra.Command) { + AddBsStringSliceRequiredFlag(cmd, CURVEBS_COPYSET_ID, "copyset id") } -func AddBSCopysetIdFlag(cmd *cobra.Command) { - AddBSUint32RequiredFlag(cmd, CURVEBS_COPYSET_ID, "copyset id") +func AddBsLogicalPoolIdSliceRequiredFlag(cmd *cobra.Command) { + AddBsStringSliceRequiredFlag(cmd, CURVEBS_LOGIC_POOL_ID, "logical pool id") } -func AddBSPeersConfFlag(cmd *cobra.Command) { +func AddBsPeersConfFlag(cmd *cobra.Command) { AddBsStringSliceRequiredFlag(cmd, CURVEBS_PEERS_ADDRESS, "peers info.") } func AddBsForceDeleteOptionFlag(cmd *cobra.Command) { - AddBsBoolOptionFlag(cmd, CURVEBS_FORCEDELETE, "whether to force delete the file") + AddBsBoolOptionFlag(cmd, CURVEBS_FORCE, "whether to force delete the file") } -func AddBSUint32RequiredFlag(cmd *cobra.Command, name string, usage string) { - cmd.Flags().Uint32(name, uint32(0), usage+color.Red.Sprint("[required]")) - cmd.MarkFlagRequired(name) - err := viper.BindPFlag(BSFLAG2VIPER[name], cmd.Flags().Lookup(name)) - if err != nil { - cobra.CheckErr(err) - } +func AddBsOffsetRequiredFlag(cmd *cobra.Command) { + AddBsUint64RequiredFlag(cmd, CURVEBS_OFFSET, "offset") +} + +func AddBsSizeRequiredFlag(cmd *cobra.Command) { + AddBsUint64RequiredFlag(cmd, CURVEBS_SIZE, "size, uint is GiB") +} + +func AddBsFileTypeRequiredFlag(cmd *cobra.Command) { + AddBsStringRequiredFlag(cmd, CURVEBS_TYPE, "file type, file or dir") +} + +func AddBsThrottleTypeRequiredFlag(cmd *cobra.Command) { + AddBsStringRequiredFlag(cmd, CURVEBS_TYPE, fmt.Sprintf("throttle type, %s", BsAvailableValueStr(CURVEBS_TYPE))) +} + +func AddBsLimitRequiredFlag(cmd *cobra.Command) { + AddBsUint64RequiredFlag(cmd, CURVEBS_LIMIT, "limit") +} + +func AddBsOpRequiredFlag(cmd *cobra.Command) { + AddBsStringRequiredFlag(cmd, CURVEBS_OP, fmt.Sprintf("check operator name, %s", BsAvailableValueStr(CURVEBS_OP))) +} + +func AddBsChunkServerIdFlag(cmd *cobra.Command) { + AddBsUint32RequiredFlag(cmd, CURVEBS_CHUNKSERVER_ID, "chunkserver id") +} + +func AddBsCheckCSAliveOptionFlag(cmd *cobra.Command) { + AddBsBoolOptionFlag(cmd, CURVEBS_CHECK_CSALIVE, "check chunkserver alive") +} + +func AddBsCheckHealthOptionFlag(cmd *cobra.Command) { + AddBsBoolOptionFlag(cmd, CURVEBS_CHECK_HEALTH, "check chunkserver health") +} + +func AddBsCSOfflineOptionFlag(cmd *cobra.Command) { + AddBsBoolOptionFlag(cmd, CURVEBS_CS_OFFLINE, "offline") +} + +func AddBsCSUnhealthyOptionFlag(cmd *cobra.Command) { + AddBsBoolOptionFlag(cmd, CURVEBS_CS_UNHEALTHY, "unhealthy") +} + +func AddBsAvailFlagRequireFlag(cmd *cobra.Command) { + AddBsBoolRequireFlag(cmd, CURVEBS_AVAILFLAG, "copysets available flag") +} + +func AddBsChunkIdSliceRequiredFlag(cmd *cobra.Command) { + AddBsStringSliceRequiredFlag(cmd, CURVEBS_CHUNK_ID, "chunk ids") +} + +func AddBsChunkServerAddressSliceRequiredFlag(cmd *cobra.Command) { + AddBsStringSliceRequiredFlag(cmd, CURVEBS_CHUNKSERVER_ADDRESS, "chunk server address") } // get stingslice flag @@ -242,7 +571,7 @@ func GetBsFlagString(cmd *cobra.Command, flagName string) string { } // GetBsFlagUint32 get uint32 flag -func GetBsFlagUint32(cmd *cobra.Command, flagName string) (uint32, error) { +func GetBsFlagUint32(cmd *cobra.Command, flagName string) uint32 { var value string if cmd.Flag(flagName).Changed { value = cmd.Flag(flagName).Value.String() @@ -251,10 +580,31 @@ func GetBsFlagUint32(cmd *cobra.Command, flagName string) (uint32, error) { } val, err := strconv.ParseUint(value, 10, 32) if err != nil { - return 0, err + return 0 } - return uint32(val), nil + return uint32(val) +} + +// get uint64 flag +func GetBsFlagUint64(cmd *cobra.Command, flagName string) uint64 { + var value uint64 + if cmd.Flag(flagName).Changed { + value, _ = cmd.Flags().GetUint64(flagName) + } else { + value = viper.GetUint64(BSFLAG2VIPER[flagName]) + } + return value +} + +func GetBsFlagInt64(cmd *cobra.Command, flagName string) int64 { + var value int64 + if cmd.Flag(flagName).Changed { + value, _ = cmd.Flags().GetInt64(flagName) + } else { + value = viper.GetInt64(BSFLAG2VIPER[flagName]) + } + return value } // get mdsaddr @@ -265,9 +615,14 @@ func GetBsAddrSlice(cmd *cobra.Command, addrType string) ([]string, *cmderror.Cm } else { addrsStr = viper.GetString(BSFLAG2VIPER[addrType]) } + addrslice := strings.Split(addrsStr, ",") + for i, addr := range addrslice { + addrslice[i] = strings.TrimSpace(addr) + } + for _, addr := range addrslice { - if !cobrautil.IsValidAddr(addr) { + if !IsValidAddr(addr) { err := cmderror.ErrGetAddr() err.Format(addrType, addr) return addrslice, err @@ -279,6 +634,7 @@ func GetBsAddrSlice(cmd *cobra.Command, addrType string) ([]string, *cmderror.Cm func GetBsEtcdAddrSlice(cmd *cobra.Command) ([]string, *cmderror.CmdError) { return GetBsAddrSlice(cmd, CURVEBS_ETCDADDR) } + func GetBsMdsAddrSlice(cmd *cobra.Command) ([]string, *cmderror.CmdError) { return GetBsAddrSlice(cmd, CURVEBS_MDSADDR) } @@ -287,6 +643,14 @@ func GetBsMdsDummyAddrSlice(cmd *cobra.Command) ([]string, *cmderror.CmdError) { return GetBsAddrSlice(cmd, CURVEBS_MDSDUMMYADDR) } +func GetBsSnapshotAddrSlice(cmd *cobra.Command) ([]string, *cmderror.CmdError) { + return GetBsAddrSlice(cmd, CURVEBS_SNAPSHOTADDR) +} + +func GetBsSnapshotDummyAddrSlice(cmd *cobra.Command) ([]string, *cmderror.CmdError) { + return GetBsAddrSlice(cmd, CURVEBS_SNAPSHOTDUMMYADDR) +} + func GetBsFlagBool(cmd *cobra.Command, flagName string) bool { var value bool if cmd.Flag(flagName).Changed { @@ -320,3 +684,39 @@ func GetBsFlagInt32(cmd *cobra.Command, flagName string) int32 { } return value } + +// flag for clean recycle bin +func AddBsRecyclePrefixOptionFlag(cmd *cobra.Command) { + AddBsStringOptionFlag(cmd, CURVEBS_RECYCLE_PREFIX, "recycle prefix (default \"\")") +} + +func AddBsExpireTimeOptionFlag(cmd *cobra.Command) { + AddBsStringOptionFlag(cmd, CURVEBS_EXPIRED_TIME, "expire time (default 0s)") +} + +func GetBsRecyclePrefix(cmd *cobra.Command) string { + return GetBsFlagString(cmd, CURVEBS_RECYCLE_PREFIX) +} + +func GetBsExpireTime(cmd *cobra.Command) time.Duration { + return GetBsFlagDuration(cmd, CURVEBS_EXPIRED_TIME) +} + +func GetBsChunkServerId(cmd *cobra.Command) []uint32 { + chunkserveridStr := GetBsFlagString(cmd, CURVEBS_CHUNKSERVER_ID) + if chunkserveridStr == "" || chunkserveridStr == "*" { + return []uint32{} + } + chunkserveridStrSlice := strings.Split(chunkserveridStr, ",") + var chunkserveridSlice []uint32 + for _, id := range chunkserveridStrSlice { + idUint, err := strconv.ParseUint(id, 10, 32) + if err != nil { + parseError := cmderror.ErrParse() + parseError.Format("chunkserver id", id) + cobra.CheckErr(parseError.ToError()) + } + chunkserveridSlice = append(chunkserveridSlice, uint32(idUint)) + } + return chunkserveridSlice +} diff --git a/tools-v2/pkg/config/config.go b/tools-v2/pkg/config/config.go index 60a30a77f3..b72b219723 100644 --- a/tools-v2/pkg/config/config.go +++ b/tools-v2/pkg/config/config.go @@ -23,6 +23,8 @@ package config import ( "os" + "regexp" + "strings" "time" "github.com/spf13/cobra" @@ -165,3 +167,51 @@ func AlignFlagsValue(caller *cobra.Command, callee *cobra.Command, flagNames []s } } } + +type stringSlice struct { + value []string + change bool +} + +func (s *stringSlice) String() string { + return strings.Join(s.value, ",") +} + +func (s *stringSlice) Set(value string) error { + s.value = strings.Split(value, ",") + s.change = true + return nil +} + +func (s *stringSlice) Type() string { + return "stringSlice" +} + +func ResetStringSliceFlag(flag *pflag.Flag, value string) { + flag.Changed = false + flag.Value = &stringSlice{ + value: strings.Split(value, ","), + change: true, + } + flag.Changed = true +} + +func GetFlagChanged(cmd *cobra.Command, flagName string) bool { + flag := cmd.Flag(flagName) + if flag != nil { + return flag.Changed + } + return false +} + +const ( + IP_PORT_REGEX = "((\\d|[1-9]\\d|1\\d{2}|2[0-4]\\d|25[0-5])\\.(\\d|[1-9]\\d|1\\d{2}|2[0-4]\\d|25[0-5])\\.(\\d|[1-9]\\d|1\\d{2}|2[0-4]\\d|25[0-5])\\.(\\d|[1-9]\\d|1\\d{2}|2[0-4]\\d|25[0-5]):([0-9]|[1-9]\\d{1,3}|[1-5]\\d{4}|6[0-4]\\d{4}|65[0-4]\\d{2}|655[0-2]\\d|6553[0-5]))|(\\d|[1-9]\\d|1\\d{2}|2[0-4]\\d|25[0-5])\\.(\\d|[1-9]\\d|1\\d{2}|2[0-4]\\d|25[0-5])\\.(\\d|[1-9]\\d|1\\d{2}|2[0-4]\\d|25[0-5])\\.(\\d|[1-9]\\d|1\\d{2}|2[0-4]\\d|25[0-5])" +) + +func IsValidAddr(addr string) bool { + matched, err := regexp.MatchString(IP_PORT_REGEX, addr) + if err != nil || !matched { + return false + } + return true +} diff --git a/tools-v2/pkg/config/curve.yaml b/tools-v2/pkg/config/curve.yaml index 96ccc637db..5c8d31c088 100644 --- a/tools-v2/pkg/config/curve.yaml +++ b/tools-v2/pkg/config/curve.yaml @@ -8,7 +8,7 @@ global: curvefs: mdsAddr: 127.0.0.1:6700,127.0.0.1:6701,127.0.0.1:6702 # __CURVEADM_TEMPLATE__ ${cluster_mds_addr} __CURVEADM_TEMPLATE__ mdsDummyAddr: 127.0.0.1:7700,127.0.0.1:7701,127.0.0.1:7702 # __CURVEADM_TEMPLATE__ ${cluster_mds_dummy_addr} __CURVEADM_TEMPLATE__ - etcdAddr: 127.0.0.1:23790,127.0.0.1:23791, 127.0.0.1:23792 # __CURVEADM_TEMPLATE__ ${cluster_etcd_addr} __CURVEADM_TEMPLATE__ + etcdAddr: 127.0.0.1:23790,127.0.0.1:23791,127.0.0.1:23792 # __CURVEADM_TEMPLATE__ ${cluster_etcd_addr} __CURVEADM_TEMPLATE__ s3: ak: ak sk: sk @@ -20,6 +20,8 @@ curvebs: mdsAddr: 127.0.0.1:6700,127.0.0.1:6701,127.0.0.1:6702 # __CURVEADM_TEMPLATE__ ${cluster_mds_addr} __CURVEADM_TEMPLATE__ mdsDummyAddr: 127.0.0.1:7700,127.0.0.1:7701,127.0.0.1:7702 # __CURVEADM_TEMPLATE__ ${cluster_mds_dummy_addr} __CURVEADM_TEMPLATE__ etcdAddr: 127.0.0.1:23790,127.0.0.1:23791,127.0.0.1:23792 # __CURVEADM_TEMPLATE__ ${cluster_etcd_addr} __CURVEADM_TEMPLATE__ + snapshotAddr: 127.0.0.1:5550,127.0.0.1:5551,127.0.0.1:5552 # __CURVEADM_TEMPLATE__ ${cluster_snapshot_addr} __CURVEADM_TEMPLATE__ + snapshotDummyAddr: 127.0.0.1:8100,127.0.0.1:8101,127.0.0.1:8102 # __CURVEADM_TEMPLATE__ ${cluster_snapshot_dummy_addr} __CURVEADM_TEMPLATE__ root: user: root password: root_password diff --git a/tools-v2/pkg/config/fs.go b/tools-v2/pkg/config/fs.go index 3e6404834c..304400db1c 100644 --- a/tools-v2/pkg/config/fs.go +++ b/tools-v2/pkg/config/fs.go @@ -27,7 +27,6 @@ import ( "github.com/gookit/color" cmderror "github.com/opencurve/curve/tools-v2/internal/error" - cobrautil "github.com/opencurve/curve/tools-v2/internal/utils" "github.com/spf13/cobra" "github.com/spf13/viper" ) @@ -93,6 +92,9 @@ const ( CURVEFS_DAEMON = "daemon" VIPER_CURVEFS_DAEMON = "curvefs.daemon" CURVEFS_DEFAULT_DAEMON = false + CURVEFS_STORAGE = "storage" + VIPER_CURVEFS_STORAGE = "curvefs.storage" + CURVEFS_DEFAULT_STORAGE = "disk" // S3 CURVEFS_S3_AK = "s3.ak" @@ -168,7 +170,8 @@ var ( CURVEFS_SERVERS: VIPER_CURVEFS_SERVERS, CURVEFS_FILELIST: VIPER_CURVEFS_FILELIST, CURVEFS_INTERVAL: VIPER_CURVEFS_INTERVAL, - CURVEFS_DAEMON: VIPER_CURVEFS_DAEMON, + CURVEFS_DAEMON: VIPER_CURVEFS_DAEMON, + CURVEFS_STORAGE: VIPER_CURVEFS_STORAGE, // S3 CURVEFS_S3_AK: VIPER_CURVEFS_S3_AK, @@ -197,7 +200,8 @@ var ( CURVEFS_MARGIN: CURVEFS_DEFAULT_MARGIN, CURVEFS_SERVERS: CURVEFS_DEFAULT_SERVERS, CURVEFS_INTERVAL: CURVEFS_DEFAULT_INTERVAL, - CURVEFS_DAEMON: CURVEFS_DEFAULT_DAEMON, + CURVEFS_DAEMON: CURVEFS_DEFAULT_DAEMON, + CURVEFS_STORAGE: CURVEFS_DEFAULT_STORAGE, // S3 CURVEFS_S3_AK: CURVEFS_DEFAULT_S3_AK, @@ -370,7 +374,7 @@ func GetAddrSlice(cmd *cobra.Command, addrType string) ([]string, *cmderror.CmdE } addrslice := strings.Split(addrsStr, ",") for _, addr := range addrslice { - if !cobrautil.IsValidAddr(addr) { + if !IsValidAddr(addr) { err := cmderror.ErrGetAddr() err.Format(addrType, addr) return addrslice, err @@ -758,6 +762,15 @@ func GetDaemonFlag(cmd *cobra.Command) bool { return GetFlagBool(cmd, CURVEFS_DAEMON) } +// storage [option] +func AddStorageOptionFlag(cmd *cobra.Command) { + AddStringOptionFlag(cmd, CURVEFS_STORAGE, "warmup storage type, can be: disk/mem") +} + +func GetStorageFlag(cmd *cobra.Command) string { + return GetFlagString(cmd, CURVEFS_STORAGE) +} + /* required */ // copysetid [required] diff --git a/tools-v2/pkg/config/template.yaml b/tools-v2/pkg/config/template.yaml deleted file mode 100644 index b183c1c48b..0000000000 --- a/tools-v2/pkg/config/template.yaml +++ /dev/null @@ -1,26 +0,0 @@ -global: - httpTimeout: 500ms - rpcTimeout: 500ms - rpcRetryTimes: 1 - maxChannelSize: 4 - showError: false - -curvefs: - mdsAddr: 127.0.0.1:6700,127.0.0.1:6701,127.0.0.1:6702 - mdsDummyAddr: 127.0.0.1:7700,127.0.0.1:7701,127.0.0.1:7702 - etcdAddr: 127.0.0.1:23790,127.0.0.1:23791, 127.0.0.1:23792 - interval: 1s - s3: - ak: ak - sk: sk - endpoint: http://localhost:9000 - bucketname: bucketname - blocksize: 4 mib - chunksize: 64 mib -curvebs: - mdsAddr: 127.0.0.1:6700,127.0.0.1:6701,127.0.0.1:6702 - mdsDummyAddr: 127.0.0.1:7700,127.0.0.1:7701,127.0.0.1:7702 - etcdAddr: 127.0.0.1:23790,127.0.0.1:23791, 127.0.0.1:23792 - root: - user: root - password: root_password diff --git a/tools-v2/pkg/daemon/daemon.go b/tools-v2/pkg/daemon/daemon.go new file mode 100644 index 0000000000..0f53540b38 --- /dev/null +++ b/tools-v2/pkg/daemon/daemon.go @@ -0,0 +1,13 @@ +package daemon + +import "fmt" + +func Execute() { + tasks := GetTasks() + for _, t := range tasks { + err := t.Run() + if err != nil { + fmt.Println(err.Error()) + } + } +} diff --git a/tools-v2/pkg/daemon/task.go b/tools-v2/pkg/daemon/task.go new file mode 100644 index 0000000000..5d11d2f900 --- /dev/null +++ b/tools-v2/pkg/daemon/task.go @@ -0,0 +1,97 @@ +package daemon + +import ( + "bytes" + "encoding/json" + "fmt" + "io/fs" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "sort" + "strings" +) + +type Task struct { + ID int `json:"ID"` + Path string `json:"Path"` + Args []string `json:"Args"` + Env []string `json:"Env"` + Dir string `json:"Dir"` + OutputPath string `json:"OutputPath"` + InputPath string `json:"InputPath"` +} + +func NewTask(str []byte) *Task { + task := Task{} + json.Unmarshal(str, &task) + return &task +} + +func (task *Task) Run() error { + cmd := exec.Command(task.Path, task.Args...) + if task.InputPath != "" { + inputData, err := ioutil.ReadFile(task.InputPath) + if err != nil { + return err + } + cmd.Stdin = strings.NewReader(string(inputData)) + } + var out bytes.Buffer + defer func() { + if task.OutputPath != "" { + ioutil.WriteFile(task.OutputPath, out.Bytes(), 0644) + } + }() + cmd.Stdout = &out + cmd.Env = append(cmd.Env, task.Env...) + err := cmd.Run() + fmt.Printf("cmd:\n%+v\nout:\n%s\n-----------\n", *task, out.String()) + return err +} + +func getFileList(path string) []string { + var fileList []string + fi, err := os.Stat(path) + if err != nil || !fi.IsDir() { + return fileList + } + filepath.Walk(path, func(path string, info fs.FileInfo, err error) error { + if !info.IsDir() { + fileList = append(fileList, path) + } + return nil + }) + + return fileList +} + +const ( + WORK_DIRECTORY = "/curve/init.d/" +) + +func GetTasks() []*Task { + fileList := getFileList(WORK_DIRECTORY) + fmt.Println("fileList:", fileList) + var tasks []*Task + for _, file := range fileList { + fileData, err := ioutil.ReadFile(file) + if err == nil { + task := NewTask(fileData) + tasks = append(tasks, task) + } + } + sort.Slice(tasks, func(i, j int) bool { + return tasks[i].ID < tasks[j].ID + }) + return tasks +} + +func (task *Task)Write(path string) { + b, err := json.Marshal(task) + if err != nil { + return + } + ioutil.WriteFile(path, b, 0644) +} diff --git a/tools-v2/pkg/output/output.go b/tools-v2/pkg/output/output.go index 43b22f3eaa..8fafb69204 100644 --- a/tools-v2/pkg/output/output.go +++ b/tools-v2/pkg/output/output.go @@ -58,7 +58,7 @@ func FinalCmdOutputPlain(finalCmd *basecmd.FinalCurveCmd) error { func FinalCmdOutput(finalCmd *basecmd.FinalCurveCmd, funcs basecmd.FinalCurveCmdFunc) error { - format := finalCmd.Cmd.Flag("format").Value.String() + format := finalCmd.Cmd.Flag(config.FORMAT).Value.String() var err error switch format { case config.FORMAT_JSON: diff --git a/tools/curvefsTool.cpp b/tools/curvefsTool.cpp index bb2a218c01..869b73e36b 100644 --- a/tools/curvefsTool.cpp +++ b/tools/curvefsTool.cpp @@ -22,6 +22,12 @@ #include "tools/curvefsTool.h" +#include + +#include "src/common/namespace_define.h" + +using ::curve::common::kDefaultPoolsetName; + DEFINE_string(mds_addr, "127.0.0.1:6666", "mds ip and port list, separated by \",\""); @@ -64,6 +70,9 @@ const char kScatterWidth[] = "scatterwidth"; const char kAllocStatus[] = "allocstatus"; const char kAllocStatusAllow[] = "allow"; const char kAllocStatusDeny[] = "deny"; +const char kPoolsets[] = "poolsets"; +const char kPoolsetName[] = "poolset"; + using ::curve::common::SplitString; @@ -86,18 +95,17 @@ void UpdateFlagsFromConf(curve::common::Configuration* conf) { } int CurvefsTools::Init() { - std::string confPath = FLAGS_confPath.c_str(); curve::common::Configuration conf; - conf.SetConfigPath(confPath); + conf.SetConfigPath(FLAGS_confPath); UpdateFlagsFromConf(&conf); SplitString(FLAGS_mds_addr, ",", &mdsAddressStr_); - if (mdsAddressStr_.size() <= 0) { - LOG(ERROR) << "no avaliable mds address."; + if (mdsAddressStr_.empty()) { + LOG(ERROR) << "no available mds address."; return kRetCodeCommonErr; } - for (auto addr : mdsAddressStr_) { - butil::EndPoint endpt; + butil::EndPoint endpt; + for (const auto& addr : mdsAddressStr_) { if (butil::str2endpoint(addr.c_str(), &endpt) < 0) { LOG(ERROR) << "Invalid sub mds ip:port provided: " << addr; return kRetCodeCommonErr; @@ -109,7 +117,7 @@ int CurvefsTools::Init() { int CurvefsTools::TryAnotherMdsAddress() { if (mdsAddressStr_.size() == 0) { - LOG(ERROR) << "no avaliable mds address."; + LOG(ERROR) << "no available mds address."; return kRetCodeCommonErr; } mdsAddressIndex_ = (mdsAddressIndex_ + 1) % mdsAddressStr_.size(); @@ -256,6 +264,10 @@ int CurvefsTools::HandleBuildCluster() { if (ret < 0) { return DealFailedRet(ret, "read cluster map"); } + ret = InitPoolsetData(); + if (ret < 0) { + return DealFailedRet(ret, "init poolset data"); + } ret = InitServerData(); if (ret < 0) { return DealFailedRet(ret, "init server data"); @@ -276,6 +288,14 @@ int CurvefsTools::HandleBuildCluster() { if (ret < 0) { return DealFailedRet(ret, "clear physicalpool"); } + ret = ClearPoolset(); + if (ret < 0) { + return DealFailedRet(ret, "clear poolset"); + } + ret = CreatePoolset(); + if (ret < 0) { + return DealFailedRet(ret, "create Poolset"); + } ret = CreatePhysicalPool(); if (ret < 0) { return DealFailedRet(ret, "create physicalpool"); @@ -291,9 +311,9 @@ int CurvefsTools::HandleBuildCluster() { return ret; } + int CurvefsTools::ReadClusterMap() { - std::ifstream fin; - fin.open(FLAGS_cluster_map.c_str(), std::ios::in); + std::ifstream fin(FLAGS_cluster_map); if (fin.is_open()) { Json::CharReaderBuilder reader; JSONCPP_STRING errs; @@ -311,6 +331,33 @@ int CurvefsTools::ReadClusterMap() { } return 0; } +int CurvefsTools::InitPoolsetData() { + if (clusterMap_[kPoolsets].isNull()) { + return 0; + } + + for (const auto& poolset : clusterMap_[kPoolsets]) { + CurvePoolsetData poolsetData; + if (!poolset[kName].isString()) { + LOG(ERROR) <<"poolset name must be string" << poolset[kName]; + return -1; + } + poolsetData.name = poolset[kName].asString(); + + if (!poolset[kType].isString()) { + LOG(ERROR) << "poolset type must be string"; + return -1; + } + poolsetData.type = poolset[kType].asString(); + if (poolsetData.type.empty()) { + LOG(ERROR) << "poolset type must not empty"; + return -1; + } + + poolsetDatas.emplace_back(std::move(poolsetData)); + } + return 0; +} int CurvefsTools::InitServerData() { if (clusterMap_[kServers].isNull()) { @@ -349,12 +396,24 @@ int CurvefsTools::InitServerData() { return -1; } serverData.zoneName = server[kZone].asString(); + if (!server[kPhysicalPool].isString()) { LOG(ERROR) << "server physicalpool must be string"; return -1; } serverData.physicalPoolName = server[kPhysicalPool].asString(); - serverDatas.emplace_back(serverData); + + if (!server.isMember(kPoolsetName)) { + serverData.poolsetName = kDefaultPoolsetName; + } else if (server[kPoolsetName].isString()) { + serverData.poolsetName = server[kPoolsetName].asString(); + } else { + LOG(ERROR) << "server poolsetName must be string, poolsetName is " + << server[kPoolsetName]; + return -1; + } + + serverDatas.emplace_back(std::move(serverData)); } return 0; } @@ -420,6 +479,37 @@ int CurvefsTools::InitLogicalPoolData() { return 0; } +int CurvefsTools::ListPoolset(std::list* poolsetInfos) { + TopologyService_Stub stub(&channel_); + ListPoolsetRequest request; + ListPoolsetResponse response; + brpc::Controller cntl; + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "ListPoolset send request: " << request.DebugString(); + + stub.ListPoolset(&cntl, &request, &response, nullptr); + + if (cntl.Failed()) { + return kRetCodeRedirectMds; + } + if (response.statuscode() != kTopoErrCodeSuccess) { + LOG(ERROR) << "ListPoolset Rpc response fail. " + << "Message is :" + << response.DebugString(); + return response.statuscode(); + } else { + LOG(INFO) << "Received ListPoolset Rpc response success, " + << response.DebugString(); + } + + for (int i = 0; i < response.poolsetinfos_size(); i++) { + poolsetInfos->push_back(response.poolsetinfos(i)); + } + return 0; +} + int CurvefsTools::ListPhysicalPool( std::list *physicalPoolInfos) { TopologyService_Stub stub(&channel_); @@ -459,6 +549,43 @@ int CurvefsTools::ListPhysicalPool( return 0; } +int CurvefsTools::ListPhysicalPoolsInPoolset(PoolsetIdType poolsetid, + std::list *physicalPoolInfos) { + TopologyService_Stub stub(&channel_); + ListPhysicalPoolsInPoolsetRequest request; + ListPhysicalPoolResponse response; + request.add_poolsetid(poolsetid); + + brpc::Controller cntl; + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "ListPhysicalPoolsInPoolset, send request: " + << request.DebugString(); + + stub.ListPhysicalPoolsInPoolset(&cntl, &request, &response, nullptr); + + if (cntl.Failed()) { + return kRetCodeRedirectMds; + } + if (response.statuscode() != kTopoErrCodeSuccess) { + LOG(ERROR) << "ListPhysicalPoolsInPoolset Rpc response fail. " + << "Message is :" + << response.DebugString() + << " , poolsetid = " + << poolsetid; + return response.statuscode(); + } else { + LOG(INFO) << "Received ListPhyPoolsInPoolset Rpc resp success," + << response.DebugString(); + } + + for (int i = 0; i < response.physicalpoolinfos_size(); i++) { + physicalPoolInfos->push_back(response.physicalpoolinfos(i)); + } + return 0; +} + int CurvefsTools::AddListPoolZone(PoolIdType poolid, std::list *zoneInfos) { TopologyService_Stub stub(&channel_); @@ -533,42 +660,96 @@ int CurvefsTools::AddListZoneServer(ZoneIdType zoneid, } int CurvefsTools::ScanCluster() { + // get all poolsets and compare + // De-duplication + for (const auto& poolset : poolsetDatas) { + if (std::find_if(poolsetToAdd.begin(), poolsetToAdd.end(), + [poolset](const CurvePoolsetData& data) { + return data.name == poolset.name; + }) != poolsetToAdd.end()) { + continue; + } + poolsetToAdd.push_back(poolset); + } + + std::list poolsetInfos; + int ret = ListPoolset(&poolsetInfos); + if (ret < 0) { + return ret; + } + + for (auto it = poolsetInfos.begin(); it != poolsetInfos.end();) { + if (it->poolsetname() == kDefaultPoolsetName) { + ++it; + continue; + } + + auto ix = std::find_if(poolsetToAdd.begin(), poolsetToAdd.end(), + [it](const CurvePoolsetData& data) { + return data.name == it->poolsetname(); + }); + if (ix != poolsetToAdd.end()) { + poolsetToAdd.erase(ix); + it++; + } else { + poolsetToDel.push_back(it->poolsetid()); + it = poolsetInfos.erase(it); + } + } + // get all phsicalpool and compare // De-duplication for (auto server : serverDatas) { if (std::find_if(physicalPoolToAdd.begin(), physicalPoolToAdd.end(), [server](CurvePhysicalPoolData& data) { - return data.physicalPoolName == - server.physicalPoolName; + return data.physicalPoolName == server.physicalPoolName; }) != physicalPoolToAdd.end()) { continue; } CurvePhysicalPoolData poolData; poolData.physicalPoolName = server.physicalPoolName; + poolData.poolsetName = server.poolsetName.empty() ? kDefaultPoolsetName + : server.poolsetName; physicalPoolToAdd.push_back(poolData); } std::list physicalPoolInfos; - int ret = ListPhysicalPool(&physicalPoolInfos); - if (ret < 0) { - return ret; + for (auto poolsetid : poolsetToDel) { + ret = ListPhysicalPoolsInPoolset(poolsetid, &physicalPoolInfos); + if (ret < 0) { + return ret; + } } - for (auto it = physicalPoolInfos.begin(); - it != physicalPoolInfos.end();) { - auto ix = std::find_if(physicalPoolToAdd.begin(), - physicalPoolToAdd.end(), - [it] (CurvePhysicalPoolData& data) { - return data.physicalPoolName == it->physicalpoolname(); + for (auto phyPoolinfo : physicalPoolInfos) { + physicalPoolToDel.push_back(phyPoolinfo.physicalpoolid()); + } + + physicalPoolInfos.clear(); + + for (auto it = poolsetInfos.begin(); it != poolsetInfos.end(); it++) { + PoolsetIdType poolsetid = it->poolsetid(); + ret = ListPhysicalPoolsInPoolset(poolsetid, &physicalPoolInfos); + if (ret < 0) { + return ret; + } + } + + for (auto it = physicalPoolInfos.begin(); it != physicalPoolInfos.end();) { + auto ix = std::find_if( + physicalPoolToAdd.begin(), physicalPoolToAdd.end(), + [it](const CurvePhysicalPoolData& data) { + return (data.poolsetName == it->poolsetname()) && + (data.physicalPoolName == it->physicalpoolname()); }); - if (ix != physicalPoolToAdd.end()) { - physicalPoolToAdd.erase(ix); - it++; - } else { - physicalPoolToDel.push_back(it->physicalpoolid()); - it = physicalPoolInfos.erase(it); - } + if (ix != physicalPoolToAdd.end()) { + physicalPoolToAdd.erase(ix); + it++; + } else { + physicalPoolToDel.push_back(it->physicalpoolid()); + it = physicalPoolInfos.erase(it); + } } // get zone and compare @@ -692,12 +873,57 @@ int CurvefsTools::ScanCluster() { return 0; } +int CurvefsTools::CreatePoolset() { + TopologyService_Stub stub(&channel_); + for (const auto& it : poolsetToAdd) { + if (it.name == kDefaultPoolsetName) { + continue; + } + + PoolsetRequest request; + request.set_poolsetname(it.name); + request.set_type(it.type); + request.set_desc(""); + + PoolsetResponse response; + + brpc::Controller cntl; + cntl.set_max_retry(0); + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "CreatePoolset, send request: " + << request.DebugString(); + + stub.CreatePoolset(&cntl, &request, &response, nullptr); + + if (cntl.Failed()) { + LOG(WARNING) << "send rpc get cntl Failed, error context:" + << cntl.ErrorText(); + return kRetCodeRedirectMds; + } + if (response.statuscode() != kTopoErrCodeSuccess) { + LOG(ERROR) << "CreatePoolset Rpc response fail. " + << "Message is :" + << response.DebugString() + << " , poolsetName =" + << it.name; + return response.statuscode(); + } else { + LOG(INFO) << "Received CreatePoolset response success, " + << response.DebugString(); + } + } + return 0; +} + int CurvefsTools::CreatePhysicalPool() { TopologyService_Stub stub(&channel_); for (auto it : physicalPoolToAdd) { PhysicalPoolRequest request; request.set_physicalpoolname(it.physicalPoolName); request.set_desc(""); + request.set_poolsetname(it.poolsetName); PhysicalPoolResponse response; @@ -787,6 +1013,7 @@ int CurvefsTools::CreateServer() { request.set_externalport(it.externalPort); request.set_zonename(it.zoneName); request.set_physicalpoolname(it.physicalPoolName); + request.set_poolsetname(it.poolsetName); request.set_desc(""); ServerRegistResponse response; @@ -873,6 +1100,46 @@ int CurvefsTools::ClearPhysicalPool() { return 0; } +int CurvefsTools::ClearPoolset() { + TopologyService_Stub stub(&channel_); + for (const auto& it : poolsetToDel) { + PoolsetRequest request; + request.set_poolsetid(it); + + PoolsetResponse response; + + brpc::Controller cntl; + cntl.set_max_retry(0); + cntl.set_timeout_ms(FLAGS_rpcTimeOutMs); + cntl.set_log_id(1); + + LOG(INFO) << "DeletePoolset, send request: " << request.DebugString(); + + stub.DeletePoolset(&cntl, &request, &response, nullptr); + + if (cntl.ErrorCode() == EHOSTDOWN || + cntl.ErrorCode() == brpc::ELOGOFF) { + return kRetCodeRedirectMds; + } else if (cntl.Failed()) { + LOG(ERROR) << "DeletePoolset, errcode = " << response.statuscode() + << ", error content:" << cntl.ErrorText() + << " , PoolsetId = " << it; + return kRetCodeCommonErr; + } else if (response.statuscode() != kTopoErrCodeSuccess && + response.statuscode() != + kTopoErrCodeCannotDeleteDefaultPoolset) { + LOG(ERROR) << "DeletePoolset Rpc response fail. " + << "Message is :" << response.DebugString() + << " , PoolsetId = " << it; + return response.statuscode(); + } else { + LOG(INFO) << "Received DeletePoolset Rpc success, " + << response.DebugString(); + } + } + return 0; +} + int CurvefsTools::ClearZone() { TopologyService_Stub stub(&channel_); for (auto it : zoneToDel) { @@ -901,10 +1168,7 @@ int CurvefsTools::ClearZone() { << " , zoneId = " << it; return kRetCodeCommonErr; - } else { - break; - } - if (response.statuscode() != kTopoErrCodeSuccess) { + } else if (response.statuscode() != kTopoErrCodeSuccess) { LOG(ERROR) << "DeleteZone Rpc response fail. " << "Message is :" << response.DebugString() @@ -1012,6 +1276,39 @@ int CurvefsTools::SetChunkServer() { return 0; } +int CurvefsTools::ScanPoolset() { + for (const auto& poolset : poolsetDatas) { + if (std::find_if(poolsetToAdd.begin(), poolsetToAdd.end(), + [poolset](CurvePoolsetData& data) { + return data.name == poolset.name; + }) != poolsetToAdd.end()) { + continue; + } + // CurvePoolsetData poolsetData; + // poolsetData.name = poolset.; + poolsetToAdd.push_back(poolset); + } + std::list poolsetInfos; + int ret = ListPoolset(&poolsetInfos); + if (ret < 0) { + return ret; + } + for (auto it = poolsetInfos.begin(); it != poolsetInfos.end();) { + auto ix = std::find_if(poolsetToAdd.begin(), poolsetToAdd.end(), + [it](CurvePoolsetData& data) { + return data.name == it->poolsetname(); + }); + if (ix != poolsetToAdd.end()) { + poolsetToAdd.erase(ix); + it++; + } else { + poolsetToDel.push_back(static_cast(it->poolsetid())); + it = poolsetInfos.erase(it); + } + } + return 0; +} + int CurvefsTools::SetLogicalPool() { SetLogicalPoolRequest request; request.set_logicalpoolid(FLAGS_logicalpool_id); @@ -1113,5 +1410,3 @@ int main(int argc, char **argv) { return ret; } - - diff --git a/tools/curvefsTool.h b/tools/curvefsTool.h index 0963b94677..d060c1305e 100644 --- a/tools/curvefsTool.h +++ b/tools/curvefsTool.h @@ -55,6 +55,7 @@ struct CurveServerData { uint32_t externalPort; std::string zoneName; std::string physicalPoolName; + std::string poolsetName; }; struct CurveLogicalPoolData { @@ -76,6 +77,12 @@ struct CurveZoneData { struct CurvePhysicalPoolData { std::string physicalPoolName; + std::string poolsetName; +}; + +struct CurvePoolsetData { + std::string name; + std::string type; }; class CurvefsTools { @@ -102,7 +109,11 @@ class CurvefsTools { int ReadClusterMap(); int InitServerData(); int InitLogicalPoolData(); + int InitPoolsetData(); int ScanCluster(); + int ScanPoolset(); + + int CreatePoolset(); int ScanLogicalPool(); int CreatePhysicalPool(); int CreateZone(); @@ -111,10 +122,13 @@ class CurvefsTools { int ClearPhysicalPool(); int ClearZone(); int ClearServer(); + int ClearPoolset(); int DealFailedRet(int ret, std::string operation); - int ListPhysicalPool( + int ListPoolset(std::list *poolsetInfos); + int ListPhysicalPool(std::list *physicalPoolInfos); + int ListPhysicalPoolsInPoolset(PoolsetIdType poolsetid, std::list *physicalPoolInfos); int ListLogicalPool(const std::string& phyPoolName, @@ -129,10 +143,15 @@ class CurvefsTools { private: std::list serverDatas; std::list lgPoolDatas; + + // poolsets from cluster map + std::list poolsetDatas; std::list physicalPoolToAdd; std::list zoneToAdd; std::list serverToAdd; + std::list poolsetToAdd; + std::list poolsetToDel; std::list physicalPoolToDel; std::list zoneToDel; std::list serverToDel; diff --git a/tools/topo_example.json b/tools/topo_example.json index 6a37d29fb2..f930981f4c 100644 --- a/tools/topo_example.json +++ b/tools/topo_example.json @@ -1,4 +1,14 @@ { + "poolsets": [ + { + "name": "SSD", + "type": "SSD" + }, + { + "name": "HDD", + "type": "HDD" + } + ], "servers": [{ "name": "server1", "internalip": "127.0.0.1", @@ -6,7 +16,8 @@ "externalip": "127.0.0.1", "externalport": 8200, "zone": "zone1", - "physicalpool": "pool1" + "physicalpool": "pool1", + "poolset": "SSD" }, { "name": "server2", "internalip": "127.0.0.1", @@ -14,7 +25,8 @@ "externalip": "127.0.0.1", "externalport": 8201, "zone": "zone2", - "physicalpool": "pool1" + "physicalpool": "pool1", + "poolset": "SSD" }, { "name": "server3", "internalip": "127.0.0.1", @@ -22,7 +34,8 @@ "externalip": "127.0.0.1", "externalport": 8202, "zone": "zone3", - "physicalpool": "pool1" + "physicalpool": "pool1", + "poolset": "SSD" }], "logicalpools": [{ "name": "logicalPool1", diff --git a/util/basic.sh b/util/basic.sh new file mode 100644 index 0000000000..272b25f43d --- /dev/null +++ b/util/basic.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +############################ BASIC FUNCTIONS +msg() { + printf '%b' "$1" >&2 +} + +success() { + msg "\33[32m[✔]\33[0m ${1}${2}" +} + +die() { + msg "\33[31m[✘]\33[0m ${1}${2}" + exit 1 +} diff --git a/util/build.sh b/util/build.sh index 62a702ef1d..32dbed489a 100644 --- a/util/build.sh +++ b/util/build.sh @@ -217,7 +217,7 @@ build_target() { # build tools-v2 g_toolsv2_root="tools-v2" if [ $g_release -eq 1 ] - then + then (cd ${g_toolsv2_root} && make build version=${curve_version}) else (cd ${g_toolsv2_root} && make debug version=${curve_version}) diff --git a/util/check.sh b/util/check.sh new file mode 100644 index 0000000000..d6d059eb9d --- /dev/null +++ b/util/check.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash + +# Copyright (C) 2023 Jingli Chen (Wine93), NetEase Inc. + +############################ GLOBAL VARIABLES +g_type="$1" + +############################ FUNCTIONS +precheck() { + if [ -z "$(which xq)" ]; then + die "xq not found, please install it.\n" + fi + + if [ -z "$(which cpplint)" ]; then + die "cpplint not found, please install it.\n" + fi + + if [[ ${g_type} != "bs" && ${g_type} != "fs" ]]; then + die "please specify storage type: bs or fs\n" + fi +} + +get_files() { + if [ "$g_type" = "bs" ]; then + find src test -name '*.h' -or -name '*.cpp' + elif [ "$g_type" = "fs" ]; then + find curvefs/src curvefs/test -name '*.h' -or -name '*.cpp' + else + die "please specify storage type: bs or fs\n" + fi +} + +run_check() { + cpplint \ + --linelength=80 \ + --counting=detailed \ + --output=junit \ + --filter=-build/c++11 \ + --quiet $( get_files ) 2>&1 \ + | xq +} + +############################ MAIN() +main() { + source "util/basic.sh" + precheck + run_check +} + +main "$@" diff --git a/util/image.sh b/util/image.sh index f3b71ce98b..8f5d5b728a 100644 --- a/util/image.sh +++ b/util/image.sh @@ -74,9 +74,12 @@ mkdir -p $prefix $prefix/conf install_pkg $1 $prefix install_pkg $1 $prefix etcd install_pkg $1 $prefix monitor -copy_file ./thirdparties/memcache/libmemcached-1.1.2/build-libmemcached/src/libmemcached/libmemcached.so $docker_prefix -copy_file ./thirdparties/memcache/libmemcached-1.1.2/build-libmemcached/src/libmemcached/libmemcached.so.11 $docker_prefix -copy_file ./thirdparties/memcache/libmemcached-1.1.2/build-libmemcached/src/libhashkit/libhashkit.so.2 $docker_prefix + +if [ "$1" == "fs" ];then + copy_file ./thirdparties/memcache/libmemcached-1.1.2/build-libmemcached/src/libmemcached/libmemcached.so $docker_prefix + copy_file ./thirdparties/memcache/libmemcached-1.1.2/build-libmemcached/src/libmemcached/libmemcached.so.11 $docker_prefix + copy_file ./thirdparties/memcache/libmemcached-1.1.2/build-libmemcached/src/libhashkit/libhashkit.so.2 $docker_prefix +fi if [ "$1" == "bs" ]; then paths=`ls conf/* nebd/etc/nebd/*` diff --git a/util/install.sh b/util/install.sh index 9c012b685d..bb67318928 100644 --- a/util/install.sh +++ b/util/install.sh @@ -122,7 +122,7 @@ create_project_dir() { } copy_file() { - cp -f "$1" "$2" + cp -rf "$1" "$2" if [ $? -eq 0 ]; then success "copy file $1 to $2 success\n" else @@ -306,12 +306,7 @@ install_monitor() { else local dst="curvefs/monitor" fi - mkdir -p $project_prefix - mkdir -p "$project_prefix/prometheus" - mkdir -p "$project_prefix/data" - copy_file "$dst/target_json.py" "$project_prefix" - copy_file "$dst/target.ini" "$project_prefix" - + copy_file $dst $g_prefix success "install $project_name success\n" } @@ -323,6 +318,7 @@ install_tools-v2() { mkdir -p $project_prefix/conf copy_file "$project_name/sbin/curve" "$project_prefix/sbin" copy_file "$project_name/pkg/config/curve.yaml" "$g_prefix/conf" + copy_file "$project_name/sbin/daemon" "$project_prefix/sbin" } main() { diff --git a/util/playground.sh b/util/playground.sh new file mode 100644 index 0000000000..e964bb0be2 --- /dev/null +++ b/util/playground.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash + +# Copyright (C) 2023 Jingli Chen (Wine93), NetEase Inc. + +# see also: https://github.com/Burnett01/rsync-deployments/issues/21 + +############################ GLOBAL VARIABLES +g_obm_cfg=".obm.cfg" +g_worker_dir="/curve" +g_container_name="curve-build-playground.master" +g_container_image="opencurvedocker/curve-base:build-debian9" +g_init_script=$(cat << EOF +useradd -m -s /bin/bash -N -u $UID $USER +echo "${USER} ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers +chmod 0440 /etc/sudoers +chmod g+w /etc/passwd +echo 'alias ls="ls --color"' >> /home/${USER}/.bashrc +EOF +) +g_install_script=$(cat << EOF +apt-get -y install rsync golang jq vim python3-pop >/dev/null +curl -sSL https://bit.ly/install-xq | sudo bash >/dev/null 2>&1 +pip3 install cpplint >/dev/null 2>/dev/null +EOF +) + +############################ BASIC FUNCTIONS +parse_cfg() { + if [ ! -f "${g_obm_cfg}" ]; then + die "${g_obm_cfg} not found\n" + fi + g_container_name=$(cat < "${g_obm_cfg}" | grep -oP '(?<=container_name: ).*') + g_container_image=$(cat < "${g_obm_cfg}" | grep -oP '(?<=container_image: ).*') +} + +create_container() { + id=$(docker ps --all --format "{{.ID}}" --filter name=${g_container_name}) + if [ -n "${id}" ]; then + return + fi + + docker run -v "$(pwd)":${g_worker_dir} \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -dt \ + --env "UID=$(id -u)" \ + --env "USER=${USER}" \ + --hostname "playground" \ + --name "${g_container_name}" \ + --workdir ${g_worker_dir} \ + "${g_container_image}" + docker exec "${g_container_name}" bash -c "${g_init_script}" + docker exec "${g_container_name}" bash -c "${g_install_script}" + success "create ${g_container_name} (${g_container_image}) success :)" +} + +enter_container() { + docker exec \ + -u "$(id -u):$(id -g)" \ + -it \ + --env "TERM=xterm-256color" \ + "${g_container_name}" /bin/bash +} + +############################ MAIN() +main() { + source "util/basic.sh" + parse_cfg + create_container + enter_container +} + +main "$@" diff --git a/util/test.sh b/util/test.sh new file mode 100644 index 0000000000..c94fd1a6bb --- /dev/null +++ b/util/test.sh @@ -0,0 +1,146 @@ +#!/usr/bin/env bash + +# Copyright (C) 2023 Jingli Chen (Wine93), NetEase Inc. + +############################ GLOBAL VARIABLES +g_type="" +g_only="" +g_exclude="NOTHING" +g_sequence=$(date +"%Y-%m-%d_%H:%M:%S") +g_prefix=".test/${g_sequence}" +g_summary_log="${g_prefix}/summary.log" +g_pass_footer="__PASSED__" +g_fail_footer="__FAILED__" +g_total=0 +g_failures=0 +g_successes=0 +g_failed_cases=() +g_running_cases=() + +############################ FUNCTIONS +parse_option() { + g_type="$1" + g_only="$2" + + if [ "$g_type" != "fs" ]; then + die "now we only support run CurveFS tests.\n" + fi +} + +# curvefs/test/common:curvefs-common-test +# curvefs/test/client/volume:curvefs_client_volume_test +# ... +get_files() { + make list stor=fs 2>/dev/null \ + | grep -E "${g_only}" \ + | grep -v "${g_exclude}" \ + | grep -v "curvefs/test/tools:" \ + | grep -Eo 'curvefs/test/(.+)' \ + | sed 's/:/\//g' +} + +run_test() { + binary="$1" + output="$2" + if [ ! -f "${binary}" ]; then + echo "${g_fail_footer}" >> "${output}" + return + fi + + $binary >"${output}" 2>&1 + if [ $? -eq 0 ]; then + echo "${g_pass_footer}" >> "${output}" + else + echo "${g_fail_footer}" >> "${output}" + fi +} + +run_tests() { + mkdir -p "${g_prefix}" + files=$(get_files) + arr=(${files}) + g_total=${#arr[@]} + for filepath in ${files}; do + output_dir="${g_prefix}/$(dirname ${filepath})" + mkdir -p "${output_dir}" + output_file="${output_dir}/$(basename ${filepath})" + run_test "$(pwd)/bazel-bin/${filepath}" "${output_file}" & + done +} + +cout() { + msg "\33[32m${1}\33[0m$2" +} + +error() { + msg "\33[31m${1}\33[0m${2}" +} + +warm() { + msg "\33[33m${1}\33[0m${2}" +} + +display_progress() { + while true; do + nrun=0 + g_failures=0 + g_successes=0 + g_failed_cases=() + g_running_cases=() + for output_file in $(find "${g_prefix}" -type f); do + footer=$(tail -n 1 "${output_file}" 2>/dev/null) + if [ "${footer}" = "${g_pass_footer}" ]; then + g_successes=$((g_successes+1)) + nrun=$((nrun+1)) + elif [ "${footer}" = "${g_fail_footer}" ]; then + g_failures=$((g_failures+1)) + g_failed_cases+=(${output_file}) + nrun=$((nrun+1)) + else + g_running_cases+=(${output_file}) + fi + done + + warm "testing/(${g_sequence}): " "(${g_failures}/${g_successes}/${nrun}/${g_total})\n" + + if [ ${nrun} -eq ${g_total} ]; then + break + else + sleep 3 + fi + done +} + +_summary() { + cout "\n" + cout "[==========]\n" + cout "[ TOTAL ]" ": ${g_total} tests\n" + cout "[ PASSED ]" ": ${g_successes} tests\n" + cout "[ FAILED ]" ": ${g_failures} tests\n" + cout "\n" + for case in "${g_failed_cases[@]}"; do + error "FAILED: " "${case}\n" + done + + for case in "${g_running_cases[@]}"; do + warm "RUNNING: " "${case}\n" + done +} + +summary() { + _summary 2>&1 | tee "${g_summary_log}" + exit 0 +} + +############################ MAIN() +main() { + trap 'summary' SIGINT SIGTERM + + source "util/basic.sh" + parse_option "$@" + run_tests + display_progress + summary +} + +main "$@"