diff --git a/conf/chunkserver.conf.example b/conf/chunkserver.conf.example index b8a339e877..23c8416b71 100644 --- a/conf/chunkserver.conf.example +++ b/conf/chunkserver.conf.example @@ -164,6 +164,24 @@ chunkfilepool.cpmeta_file_size=4096 # chunkfilepool get chunk最大重试次数 chunkfilepool.retry_times=5 +# +# WAL file pool +# +# 是否开启从walfilepool获取chunk,一般是true +walfilepool.enable_get_segment_from_pool=true +# walpool目录 +walfilepool.file_pool_dir=./0/ +# walpool meta文件路径 +walfilepool.meta_path=./walfilepool.meta +# walpool meta文件大小 +walfilepool.segment_size=8388608 +# WAL metapage大小 +walfilepool.metapage_size=4096 +# WAL filepool 元数据文件大小 +walfilepool.meta_file_size=4096 +# WAL filepool get chunk最大重试次数 +walfilepool.retry_times=5 + # # trash settings # diff --git a/curve-ansible/roles/format_chunkserver/defaults/main.yml b/curve-ansible/roles/format_chunkserver/defaults/main.yml index e2976e29c9..bfb315016f 100644 --- a/curve-ansible/roles/format_chunkserver/defaults/main.yml +++ b/curve-ansible/roles/format_chunkserver/defaults/main.yml @@ -16,3 +16,4 @@ # chunk_alloc_percent: 80 +wal_segment_alloc_percent: 10 diff --git a/curve-ansible/roles/generate_config/defaults/main.yml b/curve-ansible/roles/generate_config/defaults/main.yml index 5825145750..90d9d83e9d 100644 --- a/curve-ansible/roles/generate_config/defaults/main.yml +++ b/curve-ansible/roles/generate_config/defaults/main.yml @@ -118,10 +118,15 @@ chunkserver_wconcurrentapply_size: 10 chunkserver_wconcurrentapply_queuedepth: 1 chunkserver_rconcurrentapply_size: 5 chunkserver_rconcurrentapply_queuedepth: 1 -chunkserver_chunkfilepool_enable_get_chunk_from_pool: true chunkserver_chunkfilepool_chunk_file_pool_dir: ./0/ chunkserver_chunkfilepool_cpmeta_file_size: 4096 chunkserver_chunkfilepool_retry_times: 5 +chunkserver_walfilepool_file_pool_dir: ./0/ +chunkserver_walfilepool_meta_path: ./walfilepool.meta +chunkserver_walfilepool_segment_size: 8388608 +chunkserver_walfilepool_metapage_size: 4096 +chunkserver_walfilepool_meta_file_size: 4096 +chunkserver_walfilepool_retry_times: 5 chunkserver_trash_expire_after_sec: 300 chunkserver_trash_scan_period_sec: 120 chunkserver_common_log_dir: ./runlog/ diff --git a/curve-ansible/roles/generate_config/templates/chunkserver.conf.j2 b/curve-ansible/roles/generate_config/templates/chunkserver.conf.j2 index 616fa09f53..ea738a6794 100644 --- a/curve-ansible/roles/generate_config/templates/chunkserver.conf.j2 +++ b/curve-ansible/roles/generate_config/templates/chunkserver.conf.j2 @@ -161,7 +161,7 @@ rconcurrentapply.queuedepth={{ chunkserver_rconcurrentapply_queuedepth }} # Chunkfile pool # # 是否开启从chunkfilepool获取chunk,一般是true -chunkfilepool.enable_get_chunk_from_pool={{ chunkserver_chunkfilepool_enable_get_chunk_from_pool }} +chunkfilepool.enable_get_chunk_from_pool={{ chunkserver_format_disk }} # chunkfilepool目录 chunkfilepool.chunk_file_pool_dir={{ chunkserver_chunkfilepool_chunk_file_pool_dir }} # chunkfilepool meta文件路径 @@ -169,7 +169,25 @@ chunkfilepool.chunk_file_pool_dir={{ chunkserver_chunkfilepool_chunk_file_pool_d # chunkfilepool meta文件大小 chunkfilepool.cpmeta_file_size={{ chunkserver_chunkfilepool_cpmeta_file_size }} # chunkfilepool get chunk最大重试次数 -chunkfilepool.retry_times={{ chunkserver_chunkfilepool_retry_times }} +chunkfilepool.retry_times=5 + +# +# WAL file pool +# +# 是否开启从walfilepool获取chunk,一般是true +walfilepool.enable_get_segment_from_pool={{ chunkserver_format_disk }} +# walpool目录 +walfilepool.file_pool_dir={{ chunkserver_walfilepool_file_pool_dir }} +# walpool meta文件路径 +walfilepool.meta_path={{ chunkserver_walfilepool_meta_path }} +# walpool meta文件大小 +walfilepool.segment_size={{ chunkserver_walfilepool_segment_size }} +# WAL metapage大小 +walfilepool.metapage_size={{ chunkserver_walfilepool_metapage_size }} +# WAL filepool 元数据文件大小 +walfilepool.meta_file_size={{ chunkserver_walfilepool_meta_file_size }} +# WAL filepool get chunk最大重试次数 +walfilepool.retry_times={{ chunkserver_walfilepool_retry_times }} # # trash settings diff --git a/curve-ansible/roles/install_package/tasks/include/install_curve-chunkserver.yml b/curve-ansible/roles/install_package/tasks/include/install_curve-chunkserver.yml index 9fa059af07..da7a6962f5 100644 --- a/curve-ansible/roles/install_package/tasks/include/install_curve-chunkserver.yml +++ b/curve-ansible/roles/install_package/tasks/include/install_curve-chunkserver.yml @@ -49,7 +49,7 @@ - name: generate chunkserver_ctl.sh vars: - enable_chunkfilepool: "{{ chunkserver_format_disk }}" + enable_FilePool: "{{ chunkserver_format_disk }}" jemalloc_path: "{{ lib_install_prefix }}/lib/libjemalloc.so.1" template: src=chunkserver_ctl.sh.j2 dest={{ deploy_dir }}/chunkserver_ctl.sh mode=0755 diff --git a/curve-ansible/roles/install_package/templates/chunkserver_ctl.sh.j2 b/curve-ansible/roles/install_package/templates/chunkserver_ctl.sh.j2 index ebb5be83d6..05da13c92b 100644 --- a/curve-ansible/roles/install_package/templates/chunkserver_ctl.sh.j2 +++ b/curve-ansible/roles/install_package/templates/chunkserver_ctl.sh.j2 @@ -13,6 +13,7 @@ function help() { echo " restart : restart chunkserver" echo " status : show the online status of chunkserver" echo " deploy : prepare the chunkserver running environment" + echo " deploy-wal-pool : prepare the wal pool" echo " format : format the chunkfile pool" echo "USAGE:" echo " start all chunkservers : ./chunkserver_ctl.sh start all" @@ -26,13 +27,16 @@ function help() { echo " record uuid meta in all disks : ./chunkserver_ctl.sh record-meta" echo " deploy all disk : ./chunkserver_ctl.sh deploy all" echo " deploy one disk : ./chunkserver_ctl.sh deploy /dev/sd{id} /data/chunkserver{id}" - echo " format by percent : ./chunkserver_ctl.sh format -allocatepercent=80 -filesystem_path=/data/chunkserver{id} " - echo " -chunkfilepool_dir=/data/chunkserver{id}/chunkfilepool/" - echo " -chunkfilepool_metapath=/data/chunkserver{id}/chunkfilepool.meta" - echo " format by chunk numbers : ./chunkserver_ctl.sh format -allocateByPercent=false -preallocateNum=100" - echo " -filesystem_path=/data/chunkserver{id} " - echo " -chunkfilepool_dir=/data/chunkserver{id}/chunkfilepool/" - echo " -chunkfilepool_metapath==/data/chunkserver{id}/chunkfilepool.meta" + echo " deploy all wal pool : ./chunkserver_ctl.sh deploy-wal-pool all" + echo " deploy one wal pool : ./chunkserver_ctl.sh deploy-wal-pool {\$chunkserverId}" + echo " format by percent : ./chunkserver_ctl.sh format -allocatePercent=80 -fileSystemPath=/data/chunkserver{id} " + echo " -filePoolDir=/data/chunkserver{id}/filepool/" + echo " -filePoolMetaPath=/data/chunkserver{id}/filepool.meta" + echo " format by chunk numbers : ./chunkserver_ctl.sh format -allocateByPercent=false -preAllocateNum=100" + echo " -fileSystemPath=/data/chunkserver{id} " + echo " -filePoolDir=/data/chunkserver{id}/filepool/" + echo " -filePoolMetaPath==/data/chunkserver{id}/filepool.meta" + echo "OPSTIONS:" echo " [-c|--confPath path] chunkserver conf path need for start command, default:/etc/curve/chunkserver.conf" echo " [-nc|--noConfirm] if specified, deploy no need to confirm" @@ -146,9 +150,10 @@ function start_one() { -bthread_concurrency=18 -raft_max_segment_size=8388608 \ -raft_max_install_snapshot_tasks_num=1 -raft_sync=true \ -conf=${confPath} \ - -enableChunkfilepool={{ enable_chunkfilepool }} \ -chunkFilePoolDir=${dataDir}/chunkserver$1 \ -chunkFilePoolMetaPath=${dataDir}/chunkserver$1/chunkfilepool.meta \ + -walFilePoolDir=${dataDir}/chunkserver$1 \ + -walFilePoolMetaPath=${dataDir}/chunkserver$1/walfilepool.meta \ -chunkServerIp=$internal_ip \ -enableExternalServer=$enableExternalServer \ -chunkServerExternalIp=$external_ip \ @@ -156,10 +161,12 @@ function start_one() { -chunkServerMetaUri=local://${dataDir}/chunkserver$1/chunkserver.dat \ -chunkServerStoreUri=local://${dataDir}/chunkserver$1/ \ -copySetUri=local://${dataDir}/chunkserver$1/copysets \ - -raftSnapshotUri=curve:///data/chunkserver$1/copysets \ + -raftSnapshotUri=curve://${dataDir}/chunkserver$1/copysets \ + -raftLogUri=curve://${dataDir}/chunkserver$1/copysets \ -recycleUri=local://${dataDir}/chunkserver$1/recycler \ -raft_sync_segments=true \ -graceful_quit_on_sigterm=true \ + -raft_use_fsync_rather_than_fdatasync=false \ -log_dir=${dataDir}/log/chunkserver$1 > /dev/null 2>&1 & } @@ -314,6 +321,22 @@ function recordmeta() { meta_record; } +function deploy-wal-pool() { + if [ $# -lt 1 ] + then + help + return 1 + fi + if [ "$1" = "all" ] + then + walfile_pool_prep + return $? + fi + deploy_one_walfile_pool $1 + wait +} + + function main() { if [ $# -lt 1 ] then @@ -350,6 +373,11 @@ function main() { shift recordmeta ;; + "deploy-wal-pool") + shift + deploy-wal-pool $@ + ;; + *) help ;; diff --git a/curve-ansible/roles/install_package/templates/chunkserver_deploy.sh.j2 b/curve-ansible/roles/install_package/templates/chunkserver_deploy.sh.j2 index afbcde57e1..ad81182b92 100644 --- a/curve-ansible/roles/install_package/templates/chunkserver_deploy.sh.j2 +++ b/curve-ansible/roles/install_package/templates/chunkserver_deploy.sh.j2 @@ -161,14 +161,34 @@ function chunkfile_pool_prep { ret=`lsblk|grep chunkserver|wc -l` for i in `seq 0 $((${ret}-1))` do - curve-format -allocatepercent={{ chunk_alloc_percent }} \ - -chunkfilepool_dir=$dataDir/chunkserver$i/chunkfilepool \ - -chunkfilepool_metapath=$dataDir/chunkserver$i/chunkfilepool.meta \ - -filesystem_path=$dataDir/chunkserver$i/chunkfilepool & + curve-format -allocatePercent={{ chunk_alloc_percent }} \ + -filePoolDir=/data/chunkserver$i/chunkfilepool \ + -filePoolMetaPath=/data/chunkserver$i/chunkfilepool.meta \ + -fileSize={{ chunk_size }} \ + -fileSystemPath=/data/chunkserver$i/chunkfilepool & done wait } +function deploy_one_walfile_pool { + curve-format -allocatePercent={{ wal_segment_alloc_percent }} \ + -filePoolDir=/data/chunkserver$1/walfilepool \ + -filePoolMetaPath=/data/chunkserver$1/walfilepool.meta \ + -fileSize={{ chunkserver_walfilepool_segment_size }} \ + -fileSystemPath=/data/chunkserver$1/walfilepool & +} + + +# format walfile pool +function walfile_pool_prep { + ret=`lsblk|grep chunkserver|wc -l` + for i in `seq 0 $((${ret}-1))` + do + deploy_one_walfile_pool $i + done + wait +} + function usage { echo "HELP: this tool will prepare the chunkserver running env." echo " you can deploy all the disks by setting all" @@ -187,6 +207,7 @@ function deploy_all { fstab_record; meta_record; chunkfile_pool_prep; + walfile_pool_prep; } function deploy_one { @@ -244,10 +265,15 @@ function deploy_one { echo "uuid=$uuid" > $dirname/disk.meta echo "uuidmd5=$uuidmd5" >> $dirname/disk.meta #格式化chunkfile pool - curve-format -allocatepercent=80 \ - -chunkfilepool_dir=$dirname/chunkfilepool \ - -chunkfilepool_metapath=$dirname/chunkfilepool.meta \ - -filesystem_path=$dirname/chunkfilepool & + curve-format -allocatePercent=80 \ + -filePoolDir=$dirname/chunkfilepool \ + -filePoolMetaPath=$dirname/chunkfilepool.meta \ + -fileSystemPath=$dirname/chunkfilepool & + # prepare walfile pool + curve-format -allocatePercent=10 \ + -filePoolDir=$dirname/walfilepool \ + -filePoolMetaPath=$dirname/walfilepool.meta \ + -fileSystemPath=$dirname/walfilepool & wait exit } diff --git a/curve-ansible/server.ini b/curve-ansible/server.ini index 081c7367fb..6b00495143 100644 --- a/curve-ansible/server.ini +++ b/curve-ansible/server.ini @@ -68,6 +68,7 @@ defined_copysets_status="Copysets are healthy" chunkserver_base_port=8200 chunkserver_format_disk=false chunk_alloc_percent=80 +wal_segment_alloc_percent=10 # 每台机器上的chunkserver的数量 chunkserver_num=3 chunkserver_need_sudo=true @@ -89,6 +90,8 @@ client_chunkserver_op_max_retry=3 client_chunkserver_max_stable_timeout_times=64 client_turn_off_health_check=false disable_snapshot_clone=true +chunk_size=16777216 +chunkserver_walfilepool_segment_size=8388608 [snapshotclone_nginx:vars] snapshotcloneserver_nginx_dir=/etc/curve/nginx diff --git a/curve-chunkserver/home/nbs/chunkserver_ctl.sh b/curve-chunkserver/home/nbs/chunkserver_ctl.sh index 6086fdf8a6..ee86e6fc8a 100755 --- a/curve-chunkserver/home/nbs/chunkserver_ctl.sh +++ b/curve-chunkserver/home/nbs/chunkserver_ctl.sh @@ -29,6 +29,7 @@ function help() { echo " restart : restart chunkserver" echo " status : show the online status of chunkserver" echo " deploy : prepare the chunkserver running environment" + echo " deploy-wal-pool : prepare the wal pool" echo " format : format the chunkfile pool" echo "USAGE:" echo " start all chunkservers : ./chunkserver_ctl.sh start all" @@ -42,13 +43,17 @@ function help() { echo " record uuid meta in all disks : ./chunkserver_ctl.sh record-meta" echo " deploy all disk : ./chunkserver_ctl.sh deploy all" echo " deploy one disk : ./chunkserver_ctl.sh deploy /dev/sd{id} /data/chunkserver{id}" - echo " format by percent : ./chunkserver_ctl.sh format -allocatepercent=80 -filesystem_path=/data/chunkserver{id} " - echo " -chunkfilepool_dir=/data/chunkserver{id}/chunkfilepool/" - echo " -chunkfilepool_metapath=/data/chunkserver{id}/chunkfilepool.meta" - echo " format by chunk numbers : ./chunkserver_ctl.sh format -allocateByPercent=false -preallocateNum=100" - echo " -filesystem_path=/data/chunkserver{id} " - echo " -chunkfilepool_dir=/data/chunkserver{id}/chunkfilepool/" - echo " -chunkfilepool_metapath==/data/chunkserver{id}/chunkfilepool.meta" + echo " deploy all wal pool : ./chunkserver_ctl.sh deploy-wal-pool all" + echo " deploy one wal pool : ./chunkserver_ctl.sh deploy-wal-pool {\$chunkserverId}" + echo " deploy all chunk pool : ./chunkserver_ctl.sh deploy-chunk-pool all" + echo " deploy one chunk pool : ./chunkserver_ctl.sh deploy-chunk-pool {\$chunkserverId}" + echo " format by percent : ./chunkserver_ctl.sh format -allocatePercent=80 -fileSystemPath=/data/chunkserver{id} " + echo " -filePoolDir=/data/chunkserver{id}/filepool/" + echo " -filePoolMetaPath=/data/chunkserver{id}/filepool.meta" + echo " format by chunk numbers : ./chunkserver_ctl.sh format -allocateByPercent=false -preAllocateNum=100" + echo " -fileSystemPath=/data/chunkserver{id} " + echo " -filePoolDir=/data/chunkserver{id}/filepool/" + echo " -filePoolMetaPath==/data/chunkserver{id}/filepool.meta" echo "OPSTIONS:" echo " [-c|--confPath path] chunkserver conf path need for start command, default:/etc/curve/chunkserver.conf" } @@ -163,6 +168,8 @@ function start_one() { -conf=${confPath} \ -chunkFilePoolDir=${DATA_DIR}/chunkserver$1 \ -chunkFilePoolMetaPath=${DATA_DIR}/chunkserver$1/chunkfilepool.meta \ + -walFilePoolDir=${DATA_DIR}/chunkserver$1 \ + -walFilePoolMetaPath=${DATA_DIR}/chunkserver$1/walfilepool.meta \ -chunkServerIp=$internal_ip \ -enableExternalServer=$enableExternalServer \ -chunkServerExternalIp=$external_ip \ @@ -171,11 +178,13 @@ function start_one() { -chunkServerStoreUri=local:///data/chunkserver$1/ \ -copySetUri=local:///data/chunkserver$1/copysets \ -raftSnapshotUri=curve:///data/chunkserver$1/copysets \ + -raftLogUri=curve:///data/chunkserver$1/copysets \ -recycleUri=local:///data/chunkserver$1/recycler \ -raft_sync_segments=true \ -graceful_quit_on_sigterm=true \ -raft_sync_meta=true \ -raft_sync_segments=true \ + -raft_use_fsync_rather_than_fdatasync=false \ -log_dir=${DATA_DIR}/log/chunkserver$1 > /dev/null 2>&1 & } @@ -327,6 +336,21 @@ function recordmeta() { meta_record; } +function deploy-wal-pool() { + if [ $# -lt 1 ] + then + help + return 1 + fi + if [ "$1" = "all" ] + then + walfile_pool_prep + return $? + fi + deploy_one_walfile_pool $1 + wait +} + function main() { if [ $# -lt 1 ] then @@ -363,6 +387,10 @@ function main() { shift recordmeta ;; + "deploy-wal-pool") + shift + deploy-wal-pool $@ + ;; *) help ;; diff --git a/curve-chunkserver/home/nbs/chunkserver_deploy.sh b/curve-chunkserver/home/nbs/chunkserver_deploy.sh index 2c06c87c32..db143fa189 100644 --- a/curve-chunkserver/home/nbs/chunkserver_deploy.sh +++ b/curve-chunkserver/home/nbs/chunkserver_deploy.sh @@ -171,14 +171,32 @@ function chunkfile_pool_prep { ret=`lsblk|grep chunkserver|wc -l` for i in `seq 0 $((${ret}-1))` do - curve-format -allocatepercent=80 \ - -chunkfilepool_dir=/data/chunkserver$i/chunkfilepool \ - -chunkfilepool_metapath=/data/chunkserver$i/chunkfilepool.meta \ - -filesystem_path=/data/chunkserver$i/chunkfilepool & + curve-format -allocatePercent=80 \ + -filePoolDir=/data/chunkserver$i/chunkfilepool \ + -filePoolMetaPath=/data/chunkserver$i/chunkfilepool.meta \ + -fileSystemPath=/data/chunkserver$i/chunkfilepool & done wait } +function deploy_one_walfile_pool { + curve-format -allocatePercent=10 \ + -filePoolDir=/data/chunkserver$1/walfilepool \ + -filePoolMetaPath=/data/chunkserver$1/walfilepool.meta \ + -fileSize=8388608 \ + -fileSystemPath=/data/chunkserver$1/walfilepool & +} + +# format walfile pool +function walfile_pool_prep { + ret=`lsblk|grep chunkserver|wc -l` + for i in `seq 0 $((${ret}-1))` + do + deploy_one_walfile_pool $i + done + wait +} + function usage { echo "HELP: this tool will prepare the chunkserver running env." echo " you can deploy all the disks by setting all" @@ -198,6 +216,7 @@ function deploy_all { fstab_record; meta_record; chunkfile_pool_prep; + walfile_pool_prep; } function deploy_one { @@ -273,10 +292,15 @@ function deploy_one { echo "uuid=$uuid" > $dirname/disk.meta echo "uuidmd5=$uuidmd5" >> $dirname/disk.meta #格式化chunkfile pool - curve-format -allocatepercent=80 \ - -chunkfilepool_dir=$dirname/chunkfilepool \ - -chunkfilepool_metapath=$dirname/chunkfilepool.meta \ - -filesystem_path=$dirname/chunkfilepool & + curve-format -allocatePercent=80 \ + -filePoolDir=$dirname/chunkfilepool \ + -filePoolMetaPath=$dirname/chunkfilepool.meta \ + -fileSystemPath=$dirname/chunkfilepool & + # prepare walfile pool + curve-format -allocatePercent=10 \ + -filePoolDir=$dirname/walfilepool \ + -filePoolMetaPath=$dirname/walfilepool.meta \ + -fileSystemPath=$dirname/walfilepool & wait exit } diff --git a/deploy/local/chunkserver/conf/chunkserver.conf.0 b/deploy/local/chunkserver/conf/chunkserver.conf.0 index 2c7789ab74..0fe4e855d7 100644 --- a/deploy/local/chunkserver/conf/chunkserver.conf.0 +++ b/deploy/local/chunkserver/conf/chunkserver.conf.0 @@ -128,6 +128,17 @@ chunkfilepool.meta_path=./0/chunkfilepool.meta chunkfilepool.cpmeta_file_size=4096 chunkfilepool.retry_times=5 +# +# WAL file pool +# +walfilepool.enable_get_segment_from_pool=false +walfilepool.file_pool_dir=./0/walfilepool/ +walfilepool.meta_path=./0/walfilepool.meta +walfilepool.segment_size=8388608 +walfilepool.metapage_size=4096 +walfilepool.meta_file_size=4096 +walfilepool.retry_times=5 + # # trash settings # diff --git a/deploy/local/chunkserver/conf/chunkserver.conf.1 b/deploy/local/chunkserver/conf/chunkserver.conf.1 index a59a953838..8f8cc9e523 100644 --- a/deploy/local/chunkserver/conf/chunkserver.conf.1 +++ b/deploy/local/chunkserver/conf/chunkserver.conf.1 @@ -125,6 +125,17 @@ chunkfilepool.meta_path=./1/chunkfilepool.meta chunkfilepool.cpmeta_file_size=4096 chunkfilepool.retry_times=5 +# +# WAL file pool +# +walfilepool.enable_get_segment_from_pool=false +walfilepool.file_pool_dir=./1/walfilepool/ +walfilepool.meta_path=./1/walfilepool.meta +walfilepool.segment_size=8388608 +walfilepool.metapage_size=4096 +walfilepool.meta_file_size=4096 +walfilepool.retry_times=5 + # # trash settings # diff --git a/deploy/local/chunkserver/conf/chunkserver.conf.2 b/deploy/local/chunkserver/conf/chunkserver.conf.2 index 7fdc60f244..e285376482 100644 --- a/deploy/local/chunkserver/conf/chunkserver.conf.2 +++ b/deploy/local/chunkserver/conf/chunkserver.conf.2 @@ -125,6 +125,17 @@ chunkfilepool.meta_path=./2/chunkfilepool.meta chunkfilepool.cpmeta_file_size=4096 chunkfilepool.retry_times=5 +# +# WAL file pool +# +walfilepool.enable_get_segment_from_pool=false +walfilepool.file_pool_dir=./2/walfilepool/ +walfilepool.meta_path=./2/walfilepool.meta +walfilepool.segment_size=8388608 +walfilepool.metapage_size=4096 +walfilepool.meta_file_size=4096 +walfilepool.retry_times=5 + # # trash settings # diff --git a/deploy/local/chunkserver/start_chunkservers_locally.sh b/deploy/local/chunkserver/start_chunkservers_locally.sh index e72c0d42ef..e39b4127ee 100755 --- a/deploy/local/chunkserver/start_chunkservers_locally.sh +++ b/deploy/local/chunkserver/start_chunkservers_locally.sh @@ -29,10 +29,10 @@ curveformt=bazel-bin/src/tools [ -f ${loghome}/1 ] || mkdir -p ${loghome}/1 [ -f ${loghome}/2 ] || mkdir -p ${loghome}/2 -${curveformt}/curve_format -chunkfilepool_dir=./0/chunkfilepool/ -chunkfilepool_metapath=./0/chunkfilepool.meta -filesystem_path=./0/ -allocateByPercent=false -preallocateNum=16 -${curveformt}/curve_format -chunkfilepool_dir=./1/chunkfilepool/ -chunkfilepool_metapath=./1/chunkfilepool.meta -filesystem_path=./1/ -allocateByPercent=false -preallocateNum=16 -${curveformt}/curve_format -chunkfilepool_dir=./2/chunkfilepool/ -chunkfilepool_metapath=./2/chunkfilepool.meta -filesystem_path=./2/ -allocateByPercent=false -preallocateNum=16 +${curveformt}/curve_format -filePoolDir=./0/chunkfilepool/ -filePoolMetaPath=./0/chunkfilepool.meta -fileSystemPath=./0/ -allocateByPercent=false -preAllocateNum=16 +${curveformt}/curve_format -filePoolDir=./1/chunkfilepool/ -filePoolMetaPath=./1/chunkfilepool.meta -fileSystemPath=./1/ -allocateByPercent=false -preAllocateNum=16 +${curveformt}/curve_format -filePoolDir=./2/chunkfilepool/ -filePoolMetaPath=./2/chunkfilepool.meta -fileSystemPath=./2/ -allocateByPercent=false -preAllocateNum=16 -${bin}/chunkserver -bthread_concurrency=18 -raft_max_segment_size=8388608 -raft_max_install_snapshot_tasks_num=5 -raft_sync=true -chunkServerIp=127.0.0.1 -chunkServerPort=8200 -chunkServerStoreUri=local://./0/ -chunkServerMetaUri=local://./0/chunkserver.dat -copySetUri=local://./0/copysets -raftSnapshotUri=curve://./0/copysets -recycleUri=local://./0/recycler -chunkFilePoolDir=./0/chunkfilepool/ -chunkFilePoolMetaPath=./0/chunkfilepool.meta -v 19 -conf=${conf}/chunkserver.conf.0 2>${loghome}/0/chunkserver.err & -${bin}/chunkserver -bthread_concurrency=18 -raft_max_segment_size=8388608 -raft_max_install_snapshot_tasks_num=5 -raft_sync=true -chunkServerIp=127.0.0.1 -chunkServerPort=8201 -chunkServerStoreUri=local://./1/ -chunkServerMetaUri=local://./1/chunkserver.dat -copySetUri=local://./1/copysets -raftSnapshotUri=curve://./1/copysets -recycleUri=local://./1/recycler -chunkFilePoolDir=./1/chunkfilepool/ -chunkFilePoolMetaPath=./1/chunkfilepool.meta -v 19 -conf=${conf}/chunkserver.conf.1 2>${loghome}/1/chunkserver.err & -${bin}/chunkserver -bthread_concurrency=18 -raft_max_segment_size=8388608 -raft_max_install_snapshot_tasks_num=5 -raft_sync=true -chunkServerIp=127.0.0.1 -chunkServerPort=8202 -chunkServerStoreUri=local://./2/ -chunkServerMetaUri=local://./2/chunkserver.dat -copySetUri=local://./2/copysets -raftSnapshotUri=curve://./2/copysets -recycleUri=local://./2/recycler -chunkFilePoolDir=./2/chunkfilepool/ -chunkFilePoolMetaPath=./2/chunkfilepool.meta -v 19 -conf=${conf}/chunkserver.conf.2 2>${loghome}/2/chunkserver.err & +${bin}/chunkserver -bthread_concurrency=18 -raft_max_install_snapshot_tasks_num=5 -raft_sync=true -chunkServerIp=127.0.0.1 -chunkServerPort=8200 -chunkServerStoreUri=local://./0/ -chunkServerMetaUri=local://./0/chunkserver.dat -copySetUri=local://./0/copysets -raftSnapshotUri=curve://./0/copysets -raftLogUri=curve://./0/copysets -recycleUri=local://./0/recycler -chunkFilePoolDir=./0/chunkfilepool/ -chunkFilePoolMetaPath=./0/chunkfilepool.meta -walFilePoolDir=./0/walfilepool/ -walFilePoolMetaPath=./0/walfilepool.meta -v 19 -conf=${conf}/chunkserver.conf.0 2>${loghome}/0/chunkserver.err & +${bin}/chunkserver -bthread_concurrency=18 -raft_max_install_snapshot_tasks_num=5 -raft_sync=true -chunkServerIp=127.0.0.1 -chunkServerPort=8201 -chunkServerStoreUri=local://./1/ -chunkServerMetaUri=local://./1/chunkserver.dat -copySetUri=local://./1/copysets -raftSnapshotUri=curve://./1/copysets -raftLogUri=curve://./1/copysets -recycleUri=local://./1/recycler -chunkFilePoolDir=./1/chunkfilepool/ -chunkFilePoolMetaPath=./1/chunkfilepool.meta -walFilePoolDir=./1/walfilepool/ -walFilePoolMetaPath=./1/walfilepool.meta -v 19 -conf=${conf}/chunkserver.conf.1 2>${loghome}/1/chunkserver.err & +${bin}/chunkserver -bthread_concurrency=18 -raft_max_install_snapshot_tasks_num=5 -raft_sync=true -chunkServerIp=127.0.0.1 -chunkServerPort=8202 -chunkServerStoreUri=local://./2/ -chunkServerMetaUri=local://./2/chunkserver.dat -copySetUri=local://./2/copysets -raftSnapshotUri=curve://./2/copysets -raftLogUri=curve://./2/copysets -recycleUri=local://./2/recycler -chunkFilePoolDir=./2/chunkfilepool/ -chunkFilePoolMetaPath=./2/chunkfilepool.meta -walFilePoolDir=./2/walfilepool/ -walFilePoolMetaPath=./2/walfilepool.meta -v 19 -conf=${conf}/chunkserver.conf.2 2>${loghome}/2/chunkserver.err & diff --git a/src/chunkserver/BUILD b/src/chunkserver/BUILD index 8d7a2824a2..f097a21765 100644 --- a/src/chunkserver/BUILD +++ b/src/chunkserver/BUILD @@ -67,6 +67,7 @@ cc_library( "//src/chunkserver/datastore:chunkserver_datastore", "//src/chunkserver/concurrent_apply:chunkserver_concurrent_apply", "//src/chunkserver/raftsnapshot:chunkserver-raft-snapshot", + "//src/chunkserver/raftlog:chunkserver-raft-log", "//src/common:curve_common", "//src/common:curve_s3_adapter", "//src/fs:lfs", @@ -111,6 +112,7 @@ cc_library( "//src/chunkserver/datastore:chunkserver_datastore", "//src/chunkserver/concurrent_apply:chunkserver_concurrent_apply", "//src/chunkserver/raftsnapshot:chunkserver-raft-snapshot", + "//src/chunkserver/raftlog:chunkserver-raft-log", "//src/common:curve_common", "//src/common:curve_s3_adapter", "//src/fs:lfs", @@ -148,6 +150,7 @@ cc_binary( "//src/chunkserver/datastore:chunkserver_datastore", "//src/chunkserver/concurrent_apply:chunkserver_concurrent_apply", "//src/chunkserver/raftsnapshot:chunkserver-raft-snapshot", + "//src/chunkserver/raftlog:chunkserver-raft-log", "//src/common:curve_common", "//src/common:curve_s3_adapter", "//src/fs:lfs", diff --git a/src/chunkserver/chunkserver.cpp b/src/chunkserver/chunkserver.cpp index 535664888b..fb0c8b335a 100644 --- a/src/chunkserver/chunkserver.cpp +++ b/src/chunkserver/chunkserver.cpp @@ -41,6 +41,7 @@ #include "src/chunkserver/raftsnapshot/curve_snapshot_attachment.h" #include "src/chunkserver/raftsnapshot/curve_file_service.h" #include "src/chunkserver/raftsnapshot/curve_snapshot_storage.h" +#include "src/chunkserver/raftlog/curve_segment_log_storage.h" #include "src/common/curve_version.h" using ::curve::fs::LocalFileSystem; @@ -59,6 +60,7 @@ DEFINE_string(chunkServerMetaUri, "local://./0/chunkserver.dat", "chunnkserver meata uri"); DEFINE_string(copySetUri, "local://./0/copysets", "copyset data uri"); DEFINE_string(raftSnapshotUri, "curve://./0/copysets", "raft snapshot uri"); +DEFINE_string(raftLogUri, "curve://./0/copysets", "raft log uri"); DEFINE_string(recycleUri, "local://./0/recycler" , "recycle uri"); DEFINE_string(chunkFilePoolDir, "./0/", "chunk file pool location"); DEFINE_string(chunkFilePoolMetaPath, @@ -67,6 +69,10 @@ DEFINE_string(logPath, "./0/chunkserver.log-", "log file path"); DEFINE_string(mdsListenAddr, "127.0.0.1:6666", "mds listen addr"); DEFINE_bool(enableChunkfilepool, true, "enable chunkfilepool"); DEFINE_uint32(copysetLoadConcurrency, 5, "copyset load concurrency"); +DEFINE_bool(enableWalfilepool, true, "enable WAL filepool"); +DEFINE_string(walFilePoolDir, "./0/", "WAL filepool location"); +DEFINE_string(walFilePoolMetaPath, "./walfilepool.meta", + "WAL filepool meta path"); namespace curve { namespace chunkserver { @@ -74,6 +80,8 @@ namespace chunkserver { int ChunkServer::Run(int argc, char** argv) { gflags::ParseCommandLineFlags(&argc, &argv, true); + RegisterCurveSegmentLogStorageOrDie(); + // ==========================加载配置项===============================// LOG(INFO) << "Loading Configuration."; common::Configuration conf; @@ -120,13 +128,20 @@ int ChunkServer::Run(int argc, char** argv) { << "Failed to initialize local filesystem module!"; // 初始化chunk文件池 - ChunkfilePoolOptions chunkFilePoolOptions; + FilePoolOptions chunkFilePoolOptions; InitChunkFilePoolOptions(&conf, &chunkFilePoolOptions); - std::shared_ptr chunkfilePool = - std::make_shared(fs); + std::shared_ptr chunkfilePool = + std::make_shared(fs); LOG_IF(FATAL, false == chunkfilePool->Initialize(chunkFilePoolOptions)) << "Failed to init chunk file pool"; + // Init Wal file pool + FilePoolOptions walFilePoolOptions; + InitWalFilePoolOptions(&conf, &walFilePoolOptions); + kWalFilePool = std::make_shared(fs); + LOG_IF(FATAL, false == kWalFilePool->Initialize(walFilePoolOptions)) + << "Failed to init wal file pool"; + // 远端拷贝管理模块选项 CopyerOptions copyerOptions; InitCopyerOptions(&conf, ©erOptions); @@ -173,7 +188,7 @@ int ChunkServer::Run(int argc, char** argv) { TrashOptions trashOptions; InitTrashOptions(&conf, &trashOptions); trashOptions.localFileSystem = fs; - trashOptions.chunkfilePool = chunkfilePool; + trashOptions.chunkFilePool = chunkfilePool; trash_ = std::make_shared(); LOG_IF(FATAL, trash_->Init(trashOptions) != 0) << "Failed to init Trash"; @@ -182,7 +197,7 @@ int ChunkServer::Run(int argc, char** argv) { CopysetNodeOptions copysetNodeOptions; InitCopysetNodeOptions(&conf, ©setNodeOptions); copysetNodeOptions.concurrentapply = &concurrentapply; - copysetNodeOptions.chunkfilePool = chunkfilePool; + copysetNodeOptions.chunkFilePool = chunkfilePool; copysetNodeOptions.localFileSystem = fs; copysetNodeOptions.trash = trash_; @@ -232,6 +247,7 @@ int ChunkServer::Run(int argc, char** argv) { // 监控部分模块的metric指标 metric->MonitorTrash(trash_.get()); metric->MonitorChunkFilePool(chunkfilePool.get()); + metric->MonitorWalFilePool(kWalFilePool.get()); metric->ExposeConfigMetric(&conf); // ========================添加rpc服务===============================// @@ -382,22 +398,22 @@ void ChunkServer::Stop() { void ChunkServer::InitChunkFilePoolOptions( - common::Configuration *conf, ChunkfilePoolOptions *chunkFilePoolOptions) { + common::Configuration *conf, FilePoolOptions *chunkFilePoolOptions) { LOG_IF(FATAL, !conf->GetUInt32Value("global.chunk_size", - &chunkFilePoolOptions->chunkSize)); + &chunkFilePoolOptions->fileSize)); LOG_IF(FATAL, !conf->GetUInt32Value("global.meta_page_size", &chunkFilePoolOptions->metaPageSize)); LOG_IF(FATAL, !conf->GetUInt32Value("chunkfilepool.cpmeta_file_size", - &chunkFilePoolOptions->cpMetaFileSize)); + &chunkFilePoolOptions->metaFileSize)); LOG_IF(FATAL, !conf->GetBoolValue( "chunkfilepool.enable_get_chunk_from_pool", - &chunkFilePoolOptions->getChunkFromPool)); + &chunkFilePoolOptions->getFileFromPool)); - if (chunkFilePoolOptions->getChunkFromPool == false) { + if (chunkFilePoolOptions->getFileFromPool == false) { std::string chunkFilePoolUri; LOG_IF(FATAL, !conf->GetStringValue( "chunkfilepool.chunk_file_pool_dir", &chunkFilePoolUri)); - ::memcpy(chunkFilePoolOptions->chunkFilePoolDir, + ::memcpy(chunkFilePoolOptions->filePoolDir, chunkFilePoolUri.c_str(), chunkFilePoolUri.size()); } else { @@ -421,6 +437,34 @@ void ChunkServer::InitConcurrentApplyOptions(common::Configuration *conf, "wconcurrentapply.queuedepth", &concurrentApplyOptions->wqueuedepth)); } +void ChunkServer::InitWalFilePoolOptions( + common::Configuration *conf, FilePoolOptions *walPoolOptions) { + LOG_IF(FATAL, !conf->GetUInt32Value("walfilepool.segment_size", + &walPoolOptions->fileSize)); + LOG_IF(FATAL, !conf->GetUInt32Value("walfilepool.metapage_size", + &walPoolOptions->metaPageSize)); + LOG_IF(FATAL, !conf->GetUInt32Value("walfilepool.meta_file_size", + &walPoolOptions->metaFileSize)); + LOG_IF(FATAL, !conf->GetBoolValue( + "walfilepool.enable_get_segment_from_pool", + &walPoolOptions->getFileFromPool)); + + if (walPoolOptions->getFileFromPool == false) { + std::string filePoolUri; + LOG_IF(FATAL, !conf->GetStringValue( + "walfilepool.file_pool_dir", &filePoolUri)); + ::memcpy(walPoolOptions->filePoolDir, + filePoolUri.c_str(), + filePoolUri.size()); + } else { + std::string metaUri; + LOG_IF(FATAL, !conf->GetStringValue( + "walfilepool.meta_path", &metaUri)); + ::memcpy( + walPoolOptions->metaPath, metaUri.c_str(), metaUri.size()); + } +} + void ChunkServer::InitCopysetNodeOptions( common::Configuration *conf, CopysetNodeOptions *copysetNodeOptions) { LOG_IF(FATAL, !conf->GetStringValue("global.ip", ©setNodeOptions->ip)); @@ -616,6 +660,13 @@ void ChunkServer::LoadConfigFromCmdline(common::Configuration *conf) { LOG(FATAL) << "raftSnapshotUri must be set when run chunkserver in command."; } + if (GetCommandLineFlagInfo("raftLogUri", &info) && !info.is_default) { + conf->SetStringValue( + "copyset.raft_log_uri", FLAGS_raftLogUri); + } else { + LOG(FATAL) + << "raftLogUri must be set when run chunkserver in command."; + } if (GetCommandLineFlagInfo("recycleUri", &info) && !info.is_default) { @@ -643,6 +694,24 @@ void ChunkServer::LoadConfigFromCmdline(common::Configuration *conf) { << "chunkFilePoolMetaPath must be set when run chunkserver in command."; } + if (GetCommandLineFlagInfo("walFilePoolDir", &info) && + !info.is_default) { + conf->SetStringValue( + "walfilepool.file_pool_dir", FLAGS_walFilePoolDir); + } else { + LOG(FATAL) + << "walFilePoolDir must be set when run chunkserver in command."; + } + + if (GetCommandLineFlagInfo("walFilePoolMetaPath", &info) && + !info.is_default) { + conf->SetStringValue( + "walfilepool.meta_path", FLAGS_walFilePoolMetaPath); + } else { + LOG(FATAL) + << "walFilePoolMetaPath must be set when run chunkserver in command."; + } + if (GetCommandLineFlagInfo("mdsListenAddr", &info) && !info.is_default) { conf->SetStringValue("mds.listen.addr", FLAGS_mdsListenAddr); } @@ -661,6 +730,12 @@ void ChunkServer::LoadConfigFromCmdline(common::Configuration *conf) { FLAGS_enableChunkfilepool); } + if (GetCommandLineFlagInfo("enableWalfilepool", &info) && + !info.is_default) { + conf->SetBoolValue("walfilepool.enable_get_segment_from_pool", + FLAGS_enableWalfilepool); + } + if (GetCommandLineFlagInfo("copysetLoadConcurrency", &info) && !info.is_default) { conf->SetIntValue("copyset.load_concurrency", diff --git a/src/chunkserver/chunkserver.h b/src/chunkserver/chunkserver.h index 19dcf56ba6..ad11d9f101 100644 --- a/src/chunkserver/chunkserver.h +++ b/src/chunkserver/chunkserver.h @@ -57,7 +57,10 @@ class ChunkServer { private: void InitChunkFilePoolOptions(common::Configuration *conf, - ChunkfilePoolOptions *chunkFilePoolOptions); + FilePoolOptions *chunkFilePoolOptions); + + void InitWalFilePoolOptions(common::Configuration *conf, + FilePoolOptions *walPoolOption); void InitConcurrentApplyOptions(common::Configuration *conf, ConcurrentApplyOption *concurrentApplyOption); diff --git a/src/chunkserver/chunkserver_metrics.cpp b/src/chunkserver/chunkserver_metrics.cpp index 7ff034eb66..d74ab83816 100644 --- a/src/chunkserver/chunkserver_metrics.cpp +++ b/src/chunkserver/chunkserver_metrics.cpp @@ -219,6 +219,7 @@ ChunkServerMetric::ChunkServerMetric() : hasInited_(false) , leaderCount_(nullptr) , chunkLeft_(nullptr) + , walSegmentLeft_(nullptr) , chunkTrashed_(nullptr) , chunkCount_(nullptr) , snapshotCount_(nullptr) @@ -281,6 +282,7 @@ int ChunkServerMetric::Fini() { ioMetrics_.Fini(); leaderCount_ = nullptr; chunkLeft_ = nullptr; + walSegmentLeft_ = nullptr; chunkTrashed_ = nullptr; chunkCount_ = nullptr; snapshotCount_ = nullptr; @@ -369,14 +371,24 @@ void ChunkServerMetric::OnResponse(const LogicPoolID& logicPoolId, ioMetrics_.OnResponse(type, size, latUs, hasError); } -void ChunkServerMetric::MonitorChunkFilePool(ChunkfilePool* chunkfilePool) { +void ChunkServerMetric::MonitorChunkFilePool(FilePool* chunkFilePool) { if (!option_.collectMetric) { return; } std::string chunkLeftPrefix = Prefix() + "_chunkfilepool_left"; chunkLeft_ = std::make_shared>( - chunkLeftPrefix, GetChunkLeftFunc, chunkfilePool); + chunkLeftPrefix, GetChunkLeftFunc, chunkFilePool); +} + +void ChunkServerMetric::MonitorWalFilePool(FilePool* walFilePool) { + if (!option_.collectMetric) { + return; + } + + std::string walSegmentLeftPrefix = Prefix() + "_walfilepool_left"; + walSegmentLeft_ = std::make_shared>( + walSegmentLeftPrefix, GetWalSegmentLeftFunc, walFilePool); } void ChunkServerMetric::MonitorTrash(Trash* trash) { diff --git a/src/chunkserver/chunkserver_metrics.h b/src/chunkserver/chunkserver_metrics.h index 7de0d5354c..8119aee5c9 100644 --- a/src/chunkserver/chunkserver_metrics.h +++ b/src/chunkserver/chunkserver_metrics.h @@ -45,7 +45,7 @@ namespace curve { namespace chunkserver { class CopysetNodeManager; -class ChunkfilePool; +class FilePool; class CSDataStore; class Trash; @@ -422,9 +422,15 @@ class ChunkServerMetric : public Uncopyable { /** * 监视chunk分配池,主要监视池中chunk的数量 - * @param chunkfilePool: ChunkfilePool的对象指针 + * @param chunkFilePool: chunkfilePool的对象指针 */ - void MonitorChunkFilePool(ChunkfilePool* chunkfilePool); + void MonitorChunkFilePool(FilePool* chunkFilePool); + + /** + * 监视wal segment分配池,主要监视池中segment的数量 + * @param walFilePool: walfilePool的对象指针 + */ + void MonitorWalFilePool(FilePool* walFilePool); /** * 监视回收站 @@ -515,8 +521,10 @@ class ChunkServerMetric : public Uncopyable { ChunkServerMetricOptions option_; // leader 的数量 AdderPtr leaderCount_; - // chunkfilepool 中剩余的 chunk 的数量 + // chunkfilepool 中剩余的 chunk 的数量 PassiveStatusPtr chunkLeft_; + // walfilepool 中剩余的 wal segment 的数量 + PassiveStatusPtr walSegmentLeft_; // trash 中的 chunk 的数量 PassiveStatusPtr chunkTrashed_; // chunkserver上的 chunk 的数量 diff --git a/src/chunkserver/config_info.cpp b/src/chunkserver/config_info.cpp index e29d17d55a..ad48981762 100644 --- a/src/chunkserver/config_info.cpp +++ b/src/chunkserver/config_info.cpp @@ -23,8 +23,8 @@ #include "src/chunkserver/config_info.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/datastore/chunkfile_pool.h" #include "src/chunkserver/concurrent_apply/concurrent_apply.h" +#include "src/chunkserver/datastore/file_pool.h" namespace curve { namespace chunkserver { @@ -44,7 +44,7 @@ CopysetNodeOptions::CopysetNodeOptions() maxChunkSize(16 * 1024 * 1024), pageSize(4096), concurrentapply(nullptr), - chunkfilePool(nullptr), + chunkFilePool(nullptr), localFileSystem(nullptr), snapshotThrottle(nullptr) { } diff --git a/src/chunkserver/config_info.h b/src/chunkserver/config_info.h index 5011a95109..db765d5813 100644 --- a/src/chunkserver/config_info.h +++ b/src/chunkserver/config_info.h @@ -38,7 +38,8 @@ namespace chunkserver { using curve::fs::LocalFileSystem; using curve::chunkserver::concurrent::ConcurrentApplyModule; -class ChunkfilePool; +class ConcurrentApplyModule; +class FilePool; class CopysetNodeManager; class CloneManager; @@ -94,7 +95,7 @@ struct CopysetNodeOptions { // 并发模块 ConcurrentApplyModule *concurrentapply; // Chunk file池子 - std::shared_ptr chunkfilePool; + std::shared_ptr chunkFilePool; // 文件系统适配层 std::shared_ptr localFileSystem; // 回收站, 心跳模块判断该chunkserver不在copyset配置组时, diff --git a/src/chunkserver/copyset_node.cpp b/src/chunkserver/copyset_node.cpp index 4d3768b048..30ba28397a 100755 --- a/src/chunkserver/copyset_node.cpp +++ b/src/chunkserver/copyset_node.cpp @@ -113,7 +113,7 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { dsOptions.pageSize = options.pageSize; dsOptions.locationLimit = options.locationLimit; dataStore_ = std::make_shared(options.localFileSystem, - options.chunkfilePool, + options.chunkFilePool, dsOptions); CHECK(nullptr != dataStore_); if (false == dataStore_->Initialize()) { @@ -147,7 +147,7 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { nodeOptions_.snapshot_throttle = options.snapshotThrottle; CurveFilesystemAdaptor* cfa = - new CurveFilesystemAdaptor(options.chunkfilePool, + new CurveFilesystemAdaptor(options.chunkFilePool, options.localFileSystem); std::vector filterList; std::string snapshotMeta(BRAFT_SNAPSHOT_META_FILE); diff --git a/src/chunkserver/datastore/chunkserver_chunkfile.cpp b/src/chunkserver/datastore/chunkserver_chunkfile.cpp index 03e9b82432..f96acf73ee 100644 --- a/src/chunkserver/datastore/chunkserver_chunkfile.cpp +++ b/src/chunkserver/datastore/chunkserver_chunkfile.cpp @@ -129,7 +129,7 @@ CSErrorCode ChunkFileMetaPage::decode(const char* buf) { } CSChunkFile::CSChunkFile(std::shared_ptr lfs, - std::shared_ptr chunkfilePool, + std::shared_ptr chunkFilePool, const ChunkOptions& options) : fd_(-1), size_(options.chunkSize), @@ -138,7 +138,7 @@ CSChunkFile::CSChunkFile(std::shared_ptr lfs, baseDir_(options.baseDir), isCloneChunk_(false), snapshot_(nullptr), - chunkfilePool_(chunkfilePool), + chunkFilePool_(chunkFilePool), lfs_(lfs), metric_(options.metric) { CHECK(!baseDir_.empty()) << "Create chunk file failed"; @@ -187,7 +187,7 @@ CSErrorCode CSChunkFile::Open(bool createFile) { char buf[pageSize_]; // NOLINT memset(buf, 0, sizeof(buf)); metaPage_.encode(buf); - int rc = chunkfilePool_->GetChunk(chunkFilePath, buf); + int rc = chunkFilePool_->GetFile(chunkFilePath, buf); // 并发创建文件时,可能前面线程已经创建成功,那么这里会返回-EEXIST // 此时可以继续open已经生成的文件 // 不过当前同一个chunk的操作是串行的,不会出现这个问题 @@ -248,7 +248,7 @@ CSErrorCode CSChunkFile::LoadSnapshot(SequenceNum sn) { options.pageSize = pageSize_; options.metric = metric_; snapshot_ = new(std::nothrow) CSSnapshot(lfs_, - chunkfilePool_, + chunkFilePool_, options); CHECK(snapshot_ != nullptr) << "Failed to new CSSnapshot!" << "ChunkID:" << chunkId_ @@ -320,7 +320,7 @@ CSErrorCode CSChunkFile::Write(SequenceNum sn, options.pageSize = pageSize_; options.metric = metric_; snapshot_ = new(std::nothrow) CSSnapshot(lfs_, - chunkfilePool_, + chunkFilePool_, options); CHECK(snapshot_ != nullptr) << "Failed to new CSSnapshot!"; CSErrorCode errorCode = snapshot_->Open(true); @@ -582,7 +582,7 @@ CSErrorCode CSChunkFile::Delete(SequenceNum sn) { lfs_->Close(fd_); fd_ = -1; } - int ret = chunkfilePool_->RecycleChunk(path()); + int ret = chunkFilePool_->RecycleFile(path()); if (ret < 0) return CSErrorCode::InternalError; diff --git a/src/chunkserver/datastore/chunkserver_chunkfile.h b/src/chunkserver/datastore/chunkserver_chunkfile.h index b440575a40..344e25f040 100644 --- a/src/chunkserver/datastore/chunkserver_chunkfile.h +++ b/src/chunkserver/datastore/chunkserver_chunkfile.h @@ -39,7 +39,7 @@ #include "src/chunkserver/datastore/filename_operator.h" #include "src/chunkserver/datastore/chunkserver_snapshot.h" #include "src/chunkserver/datastore/define.h" -#include "src/chunkserver/datastore/chunkfile_pool.h" +#include "src/chunkserver/datastore/file_pool.h" namespace curve { namespace chunkserver { @@ -50,7 +50,7 @@ using curve::common::WriteLockGuard; using curve::common::ReadLockGuard; using curve::common::BitRange; -class ChunkfilePool; +class FilePool; class CSSnapshot; struct DataStoreMetric; @@ -117,7 +117,7 @@ struct ChunkOptions { class CSChunkFile { public: CSChunkFile(std::shared_ptr lfs, - std::shared_ptr ChunkfilePool, + std::shared_ptr chunkFilePool, const ChunkOptions& options); virtual ~CSChunkFile(); @@ -349,8 +349,8 @@ class CSChunkFile { RWLock rwLock_; // 快照文件指针 CSSnapshot* snapshot_; - // 依赖chunkfilepool创建删除文件 - std::shared_ptr chunkfilePool_; + // 依赖FilePool创建删除文件 + std::shared_ptr chunkFilePool_; // 依赖本地文件系统操作文件 std::shared_ptr lfs_; // datastore内部统计指标 diff --git a/src/chunkserver/datastore/chunkserver_datastore.cpp b/src/chunkserver/datastore/chunkserver_datastore.cpp index f97b614ba5..cc360a4531 100644 --- a/src/chunkserver/datastore/chunkserver_datastore.cpp +++ b/src/chunkserver/datastore/chunkserver_datastore.cpp @@ -35,17 +35,17 @@ namespace curve { namespace chunkserver { CSDataStore::CSDataStore(std::shared_ptr lfs, - std::shared_ptr chunkfilePool, + std::shared_ptr chunkFilePool, const DataStoreOptions& options) : chunkSize_(options.chunkSize), pageSize_(options.pageSize), baseDir_(options.baseDir), locationLimit_(options.locationLimit), - chunkfilePool_(chunkfilePool), + chunkFilePool_(chunkFilePool), lfs_(lfs) { CHECK(!baseDir_.empty()) << "Create datastore failed"; CHECK(lfs_ != nullptr) << "Create datastore failed"; - CHECK(chunkfilePool_ != nullptr) << "Create datastore failed"; + CHECK(chunkFilePool_ != nullptr) << "Create datastore failed"; } CSDataStore::~CSDataStore() { @@ -190,7 +190,7 @@ CSErrorCode CSDataStore::CreateChunkFile(const ChunkOptions & options, return CSErrorCode::InvalidArgError; } auto tempChunkFile = std::make_shared(lfs_, - chunkfilePool_, + chunkFilePool_, options); CSErrorCode errorCode = tempChunkFile->Open(true); if (errorCode != CSErrorCode::Success) { @@ -369,7 +369,7 @@ CSErrorCode CSDataStore::loadChunkFile(ChunkID id) { options.metric = metric_; CSChunkFilePtr chunkFilePtr = std::make_shared(lfs_, - chunkfilePool_, + chunkFilePool_, options); CSErrorCode errorCode = chunkFilePtr->Open(false); if (errorCode != CSErrorCode::Success) diff --git a/src/chunkserver/datastore/chunkserver_datastore.h b/src/chunkserver/datastore/chunkserver_datastore.h index e26df7449a..6cb10d1c1d 100644 --- a/src/chunkserver/datastore/chunkserver_datastore.h +++ b/src/chunkserver/datastore/chunkserver_datastore.h @@ -37,7 +37,7 @@ #include "src/common/concurrent/concurrent.h" #include "src/chunkserver/datastore/define.h" #include "src/chunkserver/datastore/chunkserver_chunkfile.h" -#include "src/chunkserver/datastore/chunkfile_pool.h" +#include "src/chunkserver/datastore/file_pool.h" #include "src/fs/local_filesystem.h" namespace curve { @@ -141,7 +141,7 @@ class CSDataStore { CSDataStore() {} CSDataStore(std::shared_ptr lfs, - std::shared_ptr chunkfilePool, + std::shared_ptr chunkFilePool, const DataStoreOptions& options); virtual ~CSDataStore(); /** @@ -294,7 +294,7 @@ class CSDataStore { // 为chunkid->chunkfile的映射 CSMetaCache metaCache_; // chunkfile池,依赖该池子创建回收chunk文件或快照文件 - std::shared_ptr chunkfilePool_; + std::shared_ptr chunkFilePool_; // 本地文件系统 std::shared_ptr lfs_; // datastore的内部统计信息 diff --git a/src/chunkserver/datastore/chunkserver_snapshot.cpp b/src/chunkserver/datastore/chunkserver_snapshot.cpp index 6aa80fc1aa..3aa793a398 100644 --- a/src/chunkserver/datastore/chunkserver_snapshot.cpp +++ b/src/chunkserver/datastore/chunkserver_snapshot.cpp @@ -105,7 +105,7 @@ SnapshotMetaPage& SnapshotMetaPage::operator =( } CSSnapshot::CSSnapshot(std::shared_ptr lfs, - std::shared_ptr chunkfilePool, + std::shared_ptr chunkFilePool, const ChunkOptions& options) : fd_(-1), chunkId_(options.id), @@ -113,7 +113,7 @@ CSSnapshot::CSSnapshot(std::shared_ptr lfs, pageSize_(options.pageSize), baseDir_(options.baseDir), lfs_(lfs), - chunkfilePool_(chunkfilePool), + chunkFilePool_(chunkFilePool), metric_(options.metric) { CHECK(!baseDir_.empty()) << "Create snapshot failed"; CHECK(lfs_ != nullptr) << "Create snapshot failed"; @@ -146,7 +146,7 @@ CSErrorCode CSSnapshot::Open(bool createFile) { char buf[pageSize_]; // NOLINT memset(buf, 0, sizeof(buf)); metaPage_.encode(buf); - int ret = chunkfilePool_->GetChunk(snapshotPath, buf); + int ret = chunkFilePool_->GetFile(snapshotPath, buf); if (ret != 0) { LOG(ERROR) << "Error occured when create snapshot." << " filepath = " << snapshotPath; @@ -192,7 +192,7 @@ CSErrorCode CSSnapshot::Delete() { lfs_->Close(fd_); fd_ = -1; } - int ret = chunkfilePool_->RecycleChunk(path()); + int ret = chunkFilePool_->RecycleFile(path()); if (ret < 0) return CSErrorCode::InternalError; return CSErrorCode::Success; diff --git a/src/chunkserver/datastore/chunkserver_snapshot.h b/src/chunkserver/datastore/chunkserver_snapshot.h index aa9e8d376c..2e745d5b9a 100644 --- a/src/chunkserver/datastore/chunkserver_snapshot.h +++ b/src/chunkserver/datastore/chunkserver_snapshot.h @@ -33,7 +33,7 @@ #include "src/chunkserver/datastore/filename_operator.h" #include "src/chunkserver/datastore/define.h" #include "src/fs/local_filesystem.h" -#include "src/chunkserver/datastore/chunkfile_pool.h" +#include "src/chunkserver/datastore/file_pool.h" namespace curve { namespace chunkserver { @@ -41,7 +41,7 @@ namespace chunkserver { using curve::common::Bitmap; using curve::fs::LocalFileSystem; -class ChunkfilePool; +class FilePool; class CSChunkFile; struct ChunkOptions; struct DataStoreMetric; @@ -79,7 +79,7 @@ struct SnapshotMetaPage { class CSSnapshot { public: CSSnapshot(std::shared_ptr lfs, - std::shared_ptr ChunkfilePool, + std::shared_ptr chunkFilePool, const ChunkOptions& options); virtual ~CSSnapshot(); /** @@ -181,8 +181,8 @@ class CSSnapshot { std::set dirtyPages_; // 依赖本地文件系统操作文件 std::shared_ptr lfs_; - // 依赖chunkfilepool创建删除文件 - std::shared_ptr chunkfilePool_; + // 依赖FilePool创建删除文件 + std::shared_ptr chunkFilePool_; // datastore内部统计指标 std::shared_ptr metric_; }; diff --git a/src/chunkserver/datastore/chunkfile_pool.cpp b/src/chunkserver/datastore/file_pool.cpp similarity index 79% rename from src/chunkserver/datastore/chunkfile_pool.cpp rename to src/chunkserver/datastore/file_pool.cpp index 9e93e3f0c8..34d6da6402 100644 --- a/src/chunkserver/datastore/chunkfile_pool.cpp +++ b/src/chunkserver/datastore/file_pool.cpp @@ -35,44 +35,44 @@ #include "src/common/crc32.h" #include "src/common/configuration.h" #include "src/common/curve_define.h" -#include "src/chunkserver/datastore/chunkfile_pool.h" +#include "src/chunkserver/datastore/file_pool.h" -using curve::common::kChunkFilePoolMaigic; +using curve::common::kFilePoolMaigic; namespace curve { namespace chunkserver { -const char* ChunkfilePoolHelper::kChunkSize = "chunkSize"; -const char* ChunkfilePoolHelper::kMetaPageSize = "metaPageSize"; -const char* ChunkfilePoolHelper::kChunkFilePoolPath = "chunkfilepool_path"; -const char* ChunkfilePoolHelper::kCRC = "crc"; -const uint32_t ChunkfilePoolHelper::kPersistSize = 4096; +const char* FilePoolHelper::kFileSize = "chunkSize"; +const char* FilePoolHelper::kMetaPageSize = "metaPageSize"; +const char* FilePoolHelper::kFilePoolPath = "chunkfilepool_path"; +const char* FilePoolHelper::kCRC = "crc"; +const uint32_t FilePoolHelper::kPersistSize = 4096; -int ChunkfilePoolHelper::PersistEnCodeMetaInfo( +int FilePoolHelper::PersistEnCodeMetaInfo( std::shared_ptr fsptr, uint32_t chunkSize, uint32_t metaPageSize, - const std::string& chunkfilepool_path, + const std::string& filePoolPath, const std::string& persistPath) { Json::Value root; - root[kChunkSize] = chunkSize; + root[kFileSize] = chunkSize; root[kMetaPageSize] = metaPageSize; - root[kChunkFilePoolPath] = chunkfilepool_path; + root[kFilePoolPath] = filePoolPath; - uint32_t crcsize = sizeof(kChunkFilePoolMaigic) + + uint32_t crcsize = sizeof(kFilePoolMaigic) + sizeof(chunkSize) + sizeof(metaPageSize) + - chunkfilepool_path.size(); + filePoolPath.size(); char* crcbuf = new char[crcsize]; - ::memcpy(crcbuf, kChunkFilePoolMaigic, - sizeof(kChunkFilePoolMaigic)); - ::memcpy(crcbuf + sizeof(kChunkFilePoolMaigic), + ::memcpy(crcbuf, kFilePoolMaigic, + sizeof(kFilePoolMaigic)); + ::memcpy(crcbuf + sizeof(kFilePoolMaigic), &chunkSize, sizeof(uint32_t)); - ::memcpy(crcbuf + sizeof(uint32_t) + sizeof(kChunkFilePoolMaigic), + ::memcpy(crcbuf + sizeof(uint32_t) + sizeof(kFilePoolMaigic), &metaPageSize, sizeof(uint32_t)); - ::memcpy(crcbuf + 2*sizeof(uint32_t) + sizeof(kChunkFilePoolMaigic), - chunkfilepool_path.c_str(), - chunkfilepool_path.size()); + ::memcpy(crcbuf + 2*sizeof(uint32_t) + sizeof(kFilePoolMaigic), + filePoolPath.c_str(), + filePoolPath.size()); uint32_t crc = ::curve::common::CRC32(crcbuf, crcsize); delete[] crcbuf; @@ -107,7 +107,7 @@ int ChunkfilePoolHelper::PersistEnCodeMetaInfo( return 0; } -int ChunkfilePoolHelper::DecodeMetaInfoFromMetaFile( +int FilePoolHelper::DecodeMetaInfoFromMetaFile( std::shared_ptr fsptr, const std::string& metaFilePath, uint32_t metaFileSize, @@ -141,8 +141,8 @@ int ChunkfilePoolHelper::DecodeMetaInfoFromMetaFile( break; } - if (!value[kChunkSize].isNull()) { - *chunksize = value[kChunkSize].asUInt(); + if (!value[kFileSize].isNull()) { + *chunksize = value[kFileSize].asUInt(); } else { LOG(ERROR) << "chunkfile meta file got error!" << " no chunksize!"; @@ -157,11 +157,11 @@ int ChunkfilePoolHelper::DecodeMetaInfoFromMetaFile( break; } - if (!value[kChunkFilePoolPath].isNull()) { - *chunkfilePath = value[kChunkFilePoolPath].asString(); + if (!value[kFilePoolPath].isNull()) { + *chunkfilePath = value[kFilePoolPath].asString(); } else { LOG(ERROR) << "chunkfile meta file got error!" - << " no chunkfilepool path!"; + << " no FilePool path!"; break; } @@ -182,17 +182,17 @@ int ChunkfilePoolHelper::DecodeMetaInfoFromMetaFile( } uint32_t crcCheckSize = 2*sizeof(uint32_t) + - sizeof(kChunkFilePoolMaigic) + + sizeof(kFilePoolMaigic) + chunkfilePath->size(); std::unique_ptr crcCheckBuf(new char[crcCheckSize]); - ::memcpy(crcCheckBuf.get(), kChunkFilePoolMaigic, sizeof(kChunkFilePoolMaigic)); // NOLINT - ::memcpy(crcCheckBuf.get() + sizeof(kChunkFilePoolMaigic), + ::memcpy(crcCheckBuf.get(), kFilePoolMaigic, sizeof(kFilePoolMaigic)); // NOLINT + ::memcpy(crcCheckBuf.get() + sizeof(kFilePoolMaigic), chunksize, sizeof(uint32_t)); - ::memcpy(crcCheckBuf.get() + sizeof(uint32_t) + sizeof(kChunkFilePoolMaigic), // NOLINT + ::memcpy(crcCheckBuf.get() + sizeof(uint32_t) + sizeof(kFilePoolMaigic), // NOLINT metapagesize, sizeof(uint32_t)); - ::memcpy(crcCheckBuf.get() + 2*sizeof(uint32_t) + sizeof(kChunkFilePoolMaigic), // NOLINT + ::memcpy(crcCheckBuf.get() + 2*sizeof(uint32_t) + sizeof(kFilePoolMaigic), // NOLINT chunkfilePath->c_str(), chunkfilePath->size()); uint32_t crcCalc = ::curve::common::CRC32(crcCheckBuf.get(), crcCheckSize); @@ -205,16 +205,16 @@ int ChunkfilePoolHelper::DecodeMetaInfoFromMetaFile( return 0; } -ChunkfilePool::ChunkfilePool(std::shared_ptr fsptr): - currentmaxfilenum_(0) { +FilePool::FilePool(std::shared_ptr fsptr): + currentmaxfilenum_(0) { CHECK(fsptr != nullptr) << "fs ptr allocate failed!"; fsptr_ = fsptr; tmpChunkvec_.clear(); } -bool ChunkfilePool::Initialize(const ChunkfilePoolOptions& cfopt) { - chunkPoolOpt_ = cfopt; - if (chunkPoolOpt_.getChunkFromPool) { +bool FilePool::Initialize(const FilePoolOptions& cfopt) { + poolOpt_ = cfopt; + if (poolOpt_.getFileFromPool) { if (!CheckValid()) { LOG(ERROR) << "check valid failed!"; return false; @@ -227,7 +227,7 @@ bool ChunkfilePool::Initialize(const ChunkfilePoolOptions& cfopt) { return false; } } else { - currentdir_ = chunkPoolOpt_.chunkFilePoolDir; + currentdir_ = poolOpt_.filePoolDir; if (!fsptr_->DirExists(currentdir_.c_str())) { return fsptr_->Mkdir(currentdir_.c_str()) == 0; } @@ -235,36 +235,36 @@ bool ChunkfilePool::Initialize(const ChunkfilePoolOptions& cfopt) { return true; } -bool ChunkfilePool::CheckValid() { +bool FilePool::CheckValid() { uint32_t chunksize = 0; uint32_t metapagesize = 0; - std::string chunkfilePath; + std::string filePath; - int ret = ChunkfilePoolHelper::DecodeMetaInfoFromMetaFile(fsptr_, - chunkPoolOpt_.metaPath, - chunkPoolOpt_.cpMetaFileSize, + int ret = FilePoolHelper::DecodeMetaInfoFromMetaFile(fsptr_, + poolOpt_.metaPath, + poolOpt_.metaFileSize, &chunksize, &metapagesize, - &chunkfilePath); + &filePath); if (ret == -1) { LOG(ERROR) << "Decode meta info from meta file failed!"; return false; } - currentdir_ = chunkfilePath; + currentdir_ = filePath; currentState_.chunkSize = chunksize; currentState_.metaPageSize = metapagesize; return true; } -int ChunkfilePool::GetChunk(const std::string& targetpath, char* metapage) { +int FilePool::GetFile(const std::string& targetpath, char* metapage) { int ret = -1; int retry = 0; - while (retry < chunkPoolOpt_.retryTimes) { + while (retry < poolOpt_.retryTimes) { uint64_t chunkID; std::string srcpath; - if (chunkPoolOpt_.getChunkFromPool) { + if (poolOpt_.getFileFromPool) { std::unique_lock lk(mtx_); if (tmpChunkvec_.empty()) { LOG(ERROR) << "no avaliable chunk!"; @@ -290,7 +290,7 @@ int ChunkfilePool::GetChunk(const std::string& targetpath, char* metapage) { << ", dist path = " << targetpath.c_str(); if (rc) { // 这里使用RENAME_NOREPLACE模式来rename文件当目标文件存在时,不允许被覆盖 - // 也就是说通过chunkfilepool创建文件需要保证目标文件不存在datastore可能存 + // 也就是说通过FilePool创建文件需要保证目标文件不存在datastore可能存 // 在并发创建文件的场景通过rename一来保证文件创建的原子性,二来保证不会覆盖已 // 有文件 ret = fsptr_->Rename(srcpath.c_str(), targetpath.c_str(), @@ -303,7 +303,7 @@ int ChunkfilePool::GetChunk(const std::string& targetpath, char* metapage) { } else if (ret < 0) { LOG(ERROR) << "file rename failed, " << srcpath.c_str(); } else { - LOG(INFO) << "get chunk success! now pool size = " + LOG(INFO) << "get file success! now pool size = " << tmpChunkvec_.size(); break; } @@ -316,8 +316,8 @@ int ChunkfilePool::GetChunk(const std::string& targetpath, char* metapage) { return ret; } -int ChunkfilePool::AllocateChunk(const std::string& chunkpath) { - uint64_t chunklen = chunkPoolOpt_.chunkSize + chunkPoolOpt_.metaPageSize; +int FilePool::AllocateChunk(const std::string& chunkpath) { + uint64_t chunklen = poolOpt_.fileSize + poolOpt_.metaPageSize; int ret = fsptr_->Open(chunkpath.c_str(), O_RDWR | O_CREAT); if (ret < 0) { @@ -359,7 +359,7 @@ int ChunkfilePool::AllocateChunk(const std::string& chunkpath) { return ret; } -bool ChunkfilePool::WriteMetaPage(const std::string& sourcepath, char* page) { +bool FilePool::WriteMetaPage(const std::string& sourcepath, char* page) { int fd = -1; int ret = -1; @@ -371,8 +371,8 @@ bool ChunkfilePool::WriteMetaPage(const std::string& sourcepath, char* page) { fd = ret; - ret = fsptr_->Write(fd, page, 0, chunkPoolOpt_.metaPageSize); - if (ret != chunkPoolOpt_.metaPageSize) { + ret = fsptr_->Write(fd, page, 0, poolOpt_.metaPageSize); + if (ret != poolOpt_.metaPageSize) { fsptr_->Close(fd); LOG(ERROR) << "write metapage failed, " << sourcepath.c_str(); return false; @@ -393,8 +393,8 @@ bool ChunkfilePool::WriteMetaPage(const std::string& sourcepath, char* page) { return true; } -int ChunkfilePool::RecycleChunk(const std::string& chunkpath) { - if (!chunkPoolOpt_.getChunkFromPool) { +int FilePool::RecycleFile(const std::string& chunkpath) { + if (!poolOpt_.getFileFromPool) { int ret = fsptr_->Delete(chunkpath.c_str()); if (ret < 0) { LOG(ERROR) << "Recycle chunk failed!"; @@ -402,7 +402,7 @@ int ChunkfilePool::RecycleChunk(const std::string& chunkpath) { } } else { // 检查该待回收的文件大小是否符合要求,不符合就直接删掉 - uint64_t chunklen = chunkPoolOpt_.chunkSize+chunkPoolOpt_.metaPageSize; + uint64_t chunklen = poolOpt_.fileSize+poolOpt_.metaPageSize; int fd = fsptr_->Open(chunkpath.c_str(), O_RDWR); if (fd < 0) { LOG(ERROR) << "file open failed! delete file dirctly" @@ -456,26 +456,26 @@ int ChunkfilePool::RecycleChunk(const std::string& chunkpath) { return 0; } -void ChunkfilePool::UnInitialize() { +void FilePool::UnInitialize() { currentdir_ = ""; std::unique_lock lk(mtx_); tmpChunkvec_.clear(); } -bool ChunkfilePool::ScanInternal() { +bool FilePool::ScanInternal() { uint64_t maxnum = 0; std::vector tmpvec; int ret = fsptr_->List(currentdir_.c_str(), &tmpvec); if (ret < 0) { - LOG(ERROR) << "list chunkfile pool dir failed!"; + LOG(ERROR) << "list file pool dir failed!"; return false; } else { - LOG(INFO) << "list chunkfile pool dir done, size = " + LOG(INFO) << "list file pool dir done, size = " << tmpvec.size(); } - uint64_t chunklen = chunkPoolOpt_.chunkSize + chunkPoolOpt_.metaPageSize; + uint64_t chunklen = poolOpt_.fileSize + poolOpt_.metaPageSize; for (auto& iter : tmpvec) { auto it = std::find_if(iter.begin(), iter.end(), [](unsigned char c) { @@ -526,12 +526,12 @@ bool ChunkfilePool::ScanInternal() { return true; } -size_t ChunkfilePool::Size() { +size_t FilePool::Size() { std::unique_lock lk(mtx_); return tmpChunkvec_.size(); } -ChunkFilePoolState_t ChunkfilePool::GetState() { +FilePoolState_t FilePool::GetState() { return currentState_; } diff --git a/src/chunkserver/datastore/chunkfile_pool.h b/src/chunkserver/datastore/file_pool.h similarity index 63% rename from src/chunkserver/datastore/chunkfile_pool.h rename to src/chunkserver/datastore/file_pool.h index 3271426c1c..f75b9247ce 100644 --- a/src/chunkserver/datastore/chunkfile_pool.h +++ b/src/chunkserver/datastore/file_pool.h @@ -20,8 +20,8 @@ * Author: tongguangxun */ -#ifndef SRC_CHUNKSERVER_DATASTORE_CHUNKFILE_POOL_H_ -#define SRC_CHUNKSERVER_DATASTORE_CHUNKFILE_POOL_H_ +#ifndef SRC_CHUNKSERVER_DATASTORE_FILE_POOL_H_ +#define SRC_CHUNKSERVER_DATASTORE_FILE_POOL_H_ #include @@ -40,75 +40,63 @@ using curve::fs::LocalFileSystem; namespace curve { namespace chunkserver { -// chunkfilepool 配置选项 -struct ChunkfilePoolOptions { - // 开关,是否从chunkfile pool取chunk - bool getChunkFromPool; - - // chunkfilepool 文件夹路径,当getChunkFromPool为false的时候,需要设置该选项 - char chunkFilePoolDir[256]; - - // 配置文件的chunk大小 - uint32_t chunkSize; - - // 配置文件中的metapage大小 +struct FilePoolOptions { + bool getFileFromPool; + // it should be set when getFileFromPool=false + char filePoolDir[256]; + uint32_t fileSize; uint32_t metaPageSize; - - // chunkfilepool meta文件地址 char metaPath[256]; - - // cpmetafilesize是chunkfilepool的 metafile长度 - uint32_t cpMetaFileSize; - - // GetChunk重试次数 + uint32_t metaFileSize; + // retry times for get file uint16_t retryTimes; - ChunkfilePoolOptions() { - getChunkFromPool = true; - cpMetaFileSize = 4096; - chunkSize = 0; + FilePoolOptions() { + getFileFromPool = true; + metaFileSize = 4096; + fileSize = 0; metaPageSize = 0; retryTimes = 5; ::memset(metaPath, 0, 256); - ::memset(chunkFilePoolDir, 0, 256); + ::memset(filePoolDir, 0, 256); } - ChunkfilePoolOptions& operator=(const ChunkfilePoolOptions& other) { - getChunkFromPool = other.getChunkFromPool; - cpMetaFileSize = other.cpMetaFileSize; - chunkSize = other.chunkSize; + FilePoolOptions& operator=(const FilePoolOptions& other) { + getFileFromPool = other.getFileFromPool; + metaFileSize = other.metaFileSize; + fileSize = other.fileSize; retryTimes = other.retryTimes; metaPageSize = other.metaPageSize; ::memcpy(metaPath, other.metaPath, 256); - ::memcpy(chunkFilePoolDir, other.chunkFilePoolDir, 256); + ::memcpy(filePoolDir, other.filePoolDir, 256); return *this; } - ChunkfilePoolOptions(const ChunkfilePoolOptions& other) { - getChunkFromPool = other.getChunkFromPool; - cpMetaFileSize = other.cpMetaFileSize; - chunkSize = other.chunkSize; + FilePoolOptions(const FilePoolOptions& other) { + getFileFromPool = other.getFileFromPool; + metaFileSize = other.metaFileSize; + fileSize = other.fileSize; retryTimes = other.retryTimes; metaPageSize = other.metaPageSize; ::memcpy(metaPath, other.metaPath, 256); - ::memcpy(chunkFilePoolDir, other.chunkFilePoolDir, 256); + ::memcpy(filePoolDir, other.filePoolDir, 256); } }; -typedef struct ChunkFilePoolState { +typedef struct FilePoolState { // 预分配的chunk还有多少没有被datastore使用 uint64_t preallocatedChunksLeft; // chunksize uint32_t chunkSize; // metapage size uint32_t metaPageSize; -} ChunkFilePoolState_t; +} FilePoolState_t; -class ChunkfilePoolHelper { +class FilePoolHelper { public: - static const char* kChunkSize; + static const char* kFileSize; static const char* kMetaPageSize; - static const char* kChunkFilePoolPath; + static const char* kFilePoolPath; static const char* kCRC; static const uint32_t kPersistSize; @@ -117,14 +105,14 @@ class ChunkfilePoolHelper { * @param[in]: 用于持久化的文件系统 * @param[in]: chunkSize每个chunk的大小 * @param[in]: metaPageSize每个chunkfile的metapage大小 - * @param[in]: chunkfilepool_path是chunk池的路径 + * @param[in]: FilePool_path是chunk池的路径 * @param[in]: persistPathmeta信息要持久化的路径 * @return: 成功0, 否则-1 */ static int PersistEnCodeMetaInfo(std::shared_ptr fsptr, - uint32_t chunkSize, + uint32_t fileSize, uint32_t metaPageSize, - const std::string& chunkfilepool_path, + const std::string& filepoolPath, const std::string& persistPath); /** @@ -134,53 +122,52 @@ class ChunkfilePoolHelper { * @param[in]: meta文件大小 * @param[out]: chunkSize每个chunk的大小 * @param[out]: metaPageSize每个chunkfile的metapage大小 - * @param[out]: chunkfilepool_path是chunk池的路径 + * @param[out]: FilePool_path是chunk池的路径 * @return: 成功0, 否则-1 */ static int DecodeMetaInfoFromMetaFile( std::shared_ptr fsptr, const std::string& metaFilePath, uint32_t metaFileSize, - uint32_t* chunkSize, + uint32_t* fileSize, uint32_t* metaPageSize, - std::string* chunkfilepool_path); + std::string* filepoolPath); }; -class CURVE_CACHELINE_ALIGNMENT ChunkfilePool { +class CURVE_CACHELINE_ALIGNMENT FilePool { public: - // fsptr 本地文件系统. - explicit ChunkfilePool(std::shared_ptr fsptr); - virtual ~ChunkfilePool() = default; + explicit FilePool(std::shared_ptr fsptr); + virtual ~FilePool() = default; /** * 初始化函数 * @param: cfop是配置选项 */ - virtual bool Initialize(const ChunkfilePoolOptions& cfop); + virtual bool Initialize(const FilePoolOptions& cfop); /** * datastore通过GetChunk接口获取新的chunk,GetChunk内部会将metapage原子赋值后返回。 * @param: chunkpath是新的chunkfile路径 * @param: metapage是新的chunk的metapage信息 */ - virtual int GetChunk(const std::string& chunkpath, char* metapage); + virtual int GetFile(const std::string& chunkpath, char* metapage); /** * datastore删除chunk直接回收,不真正删除 * @param: chunkpath是需要回收的chunk路径 */ - virtual int RecycleChunk(const std::string& chunkpath); + virtual int RecycleFile(const std::string& chunkpath); /** * 获取当前chunkfile pool大小 */ virtual size_t Size(); /** - * 获取chunkfilePool的分配状态 + * 获取FilePool的分配状态 */ - virtual ChunkFilePoolState_t GetState(); + virtual FilePoolState_t GetState(); /** - * 获取当前chunkfilepool的option配置信息 + * 获取当前FilePool的option配置信息 */ - virtual ChunkfilePoolOptions GetChunkFilePoolOpt() { - return chunkPoolOpt_; + virtual FilePoolOptions GetFilePoolOpt() { + return poolOpt_; } /** * 析构,释放资源 @@ -191,6 +178,7 @@ class CURVE_CACHELINE_ALIGNMENT ChunkfilePool { * 测试使用 */ virtual void SetLocalFileSystem(std::shared_ptr fs) { + CHECK(fs != nullptr) << "fs ptr allocate failed!"; fsptr_ = fs; } @@ -207,7 +195,7 @@ class CURVE_CACHELINE_ALIGNMENT ChunkfilePool { */ bool WriteMetaPage(const std::string& sourcepath, char* page); /** - * 直接分配chunk,不从chunkfilepool获取 + * 直接分配chunk,不从FilePool获取 * @param: chunkpath为datastore中chunk文件的路径 * @return: 成功返回0,否则返回小于0 */ @@ -217,7 +205,7 @@ class CURVE_CACHELINE_ALIGNMENT ChunkfilePool { // 保护tmpChunkvec_ std::mutex mtx_; - // 当前chunkfilepool的预分配文件,文件夹路径 + // 当前FilePool的预分配文件,文件夹路径 std::string currentdir_; // chunkserver端封装的底层文件系统接口,提供操作文件的基本接口 @@ -229,13 +217,13 @@ class CURVE_CACHELINE_ALIGNMENT ChunkfilePool { // 当前最大的文件名数字格式 std::atomic currentmaxfilenum_; - // chunkfilepool配置选项 - ChunkfilePoolOptions chunkPoolOpt_; + // FilePool配置选项 + FilePoolOptions poolOpt_; - // chunkfilepool分配状态 - ChunkFilePoolState_t currentState_; + // FilePool分配状态 + FilePoolState_t currentState_; }; } // namespace chunkserver } // namespace curve -#endif // SRC_CHUNKSERVER_DATASTORE_CHUNKFILE_POOL_H_ +#endif // SRC_CHUNKSERVER_DATASTORE_FILE_POOL_H_ diff --git a/src/chunkserver/passive_getfn.cpp b/src/chunkserver/passive_getfn.cpp index ef3fe23bd3..e04914d58f 100644 --- a/src/chunkserver/passive_getfn.cpp +++ b/src/chunkserver/passive_getfn.cpp @@ -29,15 +29,25 @@ namespace curve { namespace chunkserver { uint32_t GetChunkLeftFunc(void* arg) { - ChunkfilePool* chunkfilePool = reinterpret_cast(arg); + FilePool* chunkFilePool = reinterpret_cast(arg); uint32_t chunkLeft = 0; - if (chunkfilePool != nullptr) { - ChunkFilePoolState poolState = chunkfilePool->GetState(); + if (chunkFilePool != nullptr) { + FilePoolState poolState = chunkFilePool->GetState(); chunkLeft = poolState.preallocatedChunksLeft; } return chunkLeft; } +uint32_t GetWalSegmentLeftFunc(void* arg) { + FilePool* walFilePool = reinterpret_cast(arg); + uint32_t segmentLeft = 0; + if (walFilePool != nullptr) { + FilePoolState poolState = walFilePool->GetState(); + segmentLeft = poolState.preallocatedChunksLeft; + } + return segmentLeft; +} + uint32_t GetDatastoreChunkCountFunc(void* arg) { CSDataStore* dataStore = reinterpret_cast(arg); uint32_t chunkCount = 0; diff --git a/src/chunkserver/passive_getfn.h b/src/chunkserver/passive_getfn.h index 689a32f282..c8b4805c46 100644 --- a/src/chunkserver/passive_getfn.h +++ b/src/chunkserver/passive_getfn.h @@ -25,7 +25,7 @@ #include "src/chunkserver/trash.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/datastore/chunkfile_pool.h" +#include "src/chunkserver/datastore/file_pool.h" namespace curve { namespace chunkserver { @@ -65,6 +65,11 @@ namespace chunkserver { * @param arg: chunkfilepool的对象指针 */ uint32_t GetChunkLeftFunc(void* arg); + /** + * 获取walfilepool中剩余chunk的数量 + * @param arg: walfilepool的对象指针 + */ + uint32_t GetWalSegmentLeftFunc(void* arg); /** * 获取trash中chunk的数量 * @param arg: trash的对象指针 diff --git a/src/chunkserver/raftlog/BUILD b/src/chunkserver/raftlog/BUILD new file mode 100644 index 0000000000..0c2620bcd4 --- /dev/null +++ b/src/chunkserver/raftlog/BUILD @@ -0,0 +1,54 @@ +# +# Copyright (c) 2020 NetEase Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +COPTS = [ + "-DGFLAGS=gflags", + "-DOS_LINUX", + "-DSNAPPY", + "-DHAVE_SSE42", + "-fno-omit-frame-pointer", + "-momit-leaf-frame-pointer", + "-msse4.2", + "-pthread", + "-Wsign-compare", + "-Wno-unused-parameter", + "-Wno-unused-variable", + "-Woverloaded-virtual", + "-Wnon-virtual-dtor", + "-Wno-missing-field-initializers", + "-std=c++11", +] + +cc_library( + name = "chunkserver-raft-log", + srcs = glob( + ["*.cpp"], + ), + hdrs = glob([ + "*.h", + ]), + copts = COPTS, + visibility = ["//visibility:public"], + deps = [ + "//external:braft", + "//external:bthread", + "//external:butil", + "//external:gflags", + "//external:glog", + "//external:protobuf", + "//src/chunkserver/datastore:chunkserver_datastore", + ], +) diff --git a/src/chunkserver/raftlog/curve_segment.cpp b/src/chunkserver/raftlog/curve_segment.cpp new file mode 100644 index 0000000000..b1f276f96b --- /dev/null +++ b/src/chunkserver/raftlog/curve_segment.cpp @@ -0,0 +1,831 @@ +/* + * Copyright (c) 2020 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: curve + * Created Date: 2020-09-02 + * Author: charisu + */ + +// Copyright (c) 2015 Baidu.com, Inc. All Rights Reserved +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Authors: Wang,Yao(wangyao02@baidu.com) +// Zhangyi Chen(chenzhangyi01@baidu.com) +// Xiong,Kai(xiongkai@baidu.com) + +#include +#include +#include +#include +#include +#include "src/chunkserver/raftlog/curve_segment.h" +#include "src/chunkserver/raftlog/define.h" + +namespace curve { +namespace chunkserver { + +DEFINE_bool(raftSyncSegments, true, "call fsync when a segment is closed"); +DEFINE_bool(enableWalDirectWrite, true, "enable wal direct write or not"); + +std::shared_ptr kWalFilePool = nullptr; + +int CurveSegment::create() { + if (!_is_open) { + CHECK(false) << "Create on a closed segment at first_index=" + << _first_index << " in " << _path; + return -1; + } + + std::string path(_path); + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, _first_index); + char* metaPage = new char[_meta_page_size]; + memset(metaPage, 0, sizeof(metaPage)); + memcpy(metaPage, &_meta.bytes, sizeof(_meta.bytes)); + int res = kWalFilePool->GetFile(path, metaPage); + delete metaPage; + if (res != 0) { + LOG(ERROR) << "Get segment from chunk file pool fail!"; + return -1; + } + _fd = ::open(path.c_str(), O_RDWR|O_NOATIME, 0644); + if (_fd >= 0) { + butil::make_close_on_exec(_fd); + } + res = ::lseek(_fd, _meta_page_size, SEEK_SET); + if (res != _meta_page_size) { + LOG(ERROR) << "lseek fail!"; + return -1; + } + LOG_IF(INFO, _fd >= 0) << "Created new segment `" << path + << "' with fd=" << _fd; + if (FLAGS_raftSyncSegments) { + _direct_fd = ::open(path.c_str(), O_RDWR|O_NOATIME|O_DIRECT, 0644); + LOG_IF(FATAL, _direct_fd < 0) << "failed to open file with O_DIRECT"; + butil::make_close_on_exec(_direct_fd); + } + _meta.bytes += _meta_page_size; + _update_meta_page(); + return _fd >= 0 ? 0 : -1; +} + +struct CurveSegment::EntryHeader { + int64_t term; + int type; + int checksum_type; + uint32_t data_len; + uint32_t data_real_len; + uint32_t data_checksum; +}; + +std::ostream& operator<<(std::ostream& os, + const CurveSegment::EntryHeader& h) { + os << "{term=" << h.term << ", type=" << h.type << ", data_len=" + << h.data_len << ", data_real_len=" << h.data_real_len + << ", checksum_type=" << h.checksum_type << ", data_checksum=" + << h.data_checksum << '}'; + return os; +} + +int ftruncate_uninterrupted(int fd, off_t length) { + int rc = 0; + do { + rc = ftruncate(fd, length); + } while (rc == -1 && errno == EINTR); + return rc; +} + +int CurveSegment::load(braft::ConfigurationManager* configuration_manager) { + int ret = 0; + + std::string path(_path); + if (_is_open) { + if (_from_pool) { + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, + _first_index); + } else { + butil::string_appendf(&path, "/" BRAFT_SEGMENT_OPEN_PATTERN, + _first_index); + } + } else { + if (_from_pool) { + butil::string_appendf(&path, "/" CURVE_SEGMENT_CLOSED_PATTERN, + _first_index, _last_index.load()); + } else { + butil::string_appendf(&path, "/" BRAFT_SEGMENT_CLOSED_PATTERN, + _first_index, _last_index.load()); + } + } + _fd = ::open(path.c_str(), O_RDWR|O_NOATIME); + if (_fd < 0) { + LOG(ERROR) << "Fail to open " << path << ", " << berror(); + return -1; + } + butil::make_close_on_exec(_fd); + if (FLAGS_raftSyncSegments) { + _direct_fd = ::open(path.c_str(), O_RDWR|O_NOATIME|O_DIRECT); + LOG_IF(FATAL, _direct_fd < 0) << "failed to open file with O_DIRECT"; + butil::make_close_on_exec(_direct_fd); + } + + // get file size + struct stat st_buf; + if (fstat(_fd, &st_buf) != 0) { + LOG(ERROR) << "Fail to get the stat of " << path << ", " << berror(); + ::close(_fd); + _fd = -1; + return -1; + } + + // load meta page + if (_from_pool && _load_meta() != 0) { + LOG(ERROR) << "Load wal meta page fail"; + return -1; + } + + // load entry index + int64_t load_size = _from_pool ? _meta.bytes : st_buf.st_size; + int64_t entry_off = _from_pool ? _meta_page_size : 0; + int64_t actual_last_index = _first_index - 1; + for (int64_t i = _first_index; entry_off < load_size; i++) { + EntryHeader header; + size_t header_size = _from_pool ? + kEntryHeaderSize : kBraftEntryHeaderSize; + const int rc = _load_entry(entry_off, &header, NULL, header_size); + if (rc > 0) { + // The last log was not completely written, + // which should be truncated + break; + } + if (rc < 0) { + ret = rc; + break; + } + // rc == 0 + const int64_t skip_len = header_size + header.data_len; + if (entry_off + skip_len > load_size) { + // The last log was not completely written and it should be + // truncated + break; + } + if (header.type == braft::ENTRY_TYPE_CONFIGURATION) { + butil::IOBuf data; + // Header will be parsed again but it's fine as configuration + // changing is rare + if (_load_entry(entry_off, NULL, &data, + header_size + header.data_real_len) != 0) { + break; + } + scoped_refptr entry = new braft::LogEntry(); + entry->id.index = i; + entry->id.term = header.term; + butil::Status status = parse_configuration_meta(data, entry); + if (status.ok()) { + braft::ConfigurationEntry conf_entry(*entry); + configuration_manager->add(conf_entry); + } else { + LOG(ERROR) << "fail to parse configuration meta, path: " + << _path << " entry_off " << entry_off; + ret = -1; + break; + } + } + _offset_and_term.push_back(std::make_pair(entry_off, header.term)); + ++actual_last_index; + entry_off += skip_len; + } + + const int64_t last_index = _last_index.load(butil::memory_order_relaxed); + if (ret == 0 && !_is_open) { + if (actual_last_index < last_index) { + LOG(ERROR) << "data lost in a full segment, path: " << _path + << " first_index: " << _first_index << " expect_last_index: " + << last_index << " actual_last_index: " << actual_last_index; + ret = -1; + } else if (actual_last_index > last_index) { + // FIXME(zhengpengfei): should we ignore garbage entries silently + LOG(ERROR) << "found garbage in a full segment, path: " << _path + << " first_index: " << _first_index << " expect_last_index: " + << last_index << " actual_last_index: " << actual_last_index; + ret = -1; + } + } + + if (ret != 0) { + return ret; + } + + if (_is_open) { + _last_index = actual_last_index; + } + + // truncate last uncompleted entry + if (!_from_pool && entry_off != st_buf.st_size) { + LOG(INFO) << "truncate last uncompleted write entry, path: " << _path + << " first_index: " << _first_index << " old_size: " + << st_buf.st_size << " new_size: " << entry_off; + ret = ftruncate_uninterrupted(_fd, entry_off); + } + + // seek to end, for opening segment + ::lseek(_fd, entry_off, SEEK_SET); + + _meta.bytes = entry_off; + return ret; +} + +int CurveSegment::_load_meta() { + char* metaPage = new char[_meta_page_size]; + int res = ::pread(_fd, metaPage, _meta_page_size, 0); + if (res != _meta_page_size) { + delete metaPage; + return -1; + } + memcpy(&_meta.bytes, metaPage, sizeof(_meta.bytes)); + delete metaPage; + LOG(INFO) << "loaded bytes: " << _meta.bytes; + return 0; +} + +inline bool verify_checksum(int checksum_type, + const char* data, size_t len, uint32_t value) { + switch (checksum_type) { + case CHECKSUM_MURMURHASH32: + return (value == braft::murmurhash32(data, len)); + case CHECKSUM_CRC32: + return (value == braft::crc32(data, len)); + default: + LOG(ERROR) << "Unknown checksum_type=" << checksum_type; + return false; + } +} + +inline bool verify_checksum(int checksum_type, + const butil::IOBuf& data, uint32_t value) { + switch (checksum_type) { + case CHECKSUM_MURMURHASH32: + return (value == braft::murmurhash32(data)); + case CHECKSUM_CRC32: + return (value == braft::crc32(data)); + default: + LOG(ERROR) << "Unknown checksum_type=" << checksum_type; + return false; + } +} + +inline uint32_t get_checksum(int checksum_type, const char* data, size_t len) { + switch (checksum_type) { + case CHECKSUM_MURMURHASH32: + return braft::murmurhash32(data, len); + case CHECKSUM_CRC32: + return braft::crc32(data, len); + default: + CHECK(false) << "Unknown checksum_type=" << checksum_type; + abort(); + return 0; + } +} + +inline uint32_t get_checksum(int checksum_type, const butil::IOBuf& data) { + switch (checksum_type) { + case CHECKSUM_MURMURHASH32: + return braft::murmurhash32(data); + case CHECKSUM_CRC32: + return braft::crc32(data); + default: + CHECK(false) << "Unknown checksum_type=" << checksum_type; + abort(); + return 0; + } +} + +std::string CurveSegment::file_name() { + if (!_is_open) { + if (_from_pool) { + return butil::string_printf(CURVE_SEGMENT_CLOSED_PATTERN, + _first_index, + _last_index.load()); + } else { + return butil::string_printf(BRAFT_SEGMENT_CLOSED_PATTERN, + _first_index, + _last_index.load()); + } + } else { + if (_from_pool) { + return butil::string_printf(CURVE_SEGMENT_OPEN_PATTERN, + _first_index); + } else { + return butil::string_printf(BRAFT_SEGMENT_OPEN_PATTERN, + _first_index); + } + } +} + +int CurveSegment::_load_entry(off_t offset, EntryHeader* head, + butil::IOBuf* data, size_t size_hint) const { + size_t header_size = _from_pool ? kEntryHeaderSize : kBraftEntryHeaderSize; + butil::IOPortal buf; + size_t to_read = std::max(size_hint, header_size); + const ssize_t n = braft::file_pread(&buf, _fd, offset, to_read); + if (n != (ssize_t)to_read) { + return n < 0 ? -1 : 1; + } + + char* header_buf = new char[header_size]; + const char *p = (const char *)buf.fetch(header_buf, header_size); + int64_t term = 0; + uint32_t meta_field; + uint32_t data_len = 0; + uint32_t data_real_len = 0; + uint32_t data_checksum = 0; + uint32_t header_checksum = 0; + butil::RawUnpacker un_packer(p); + un_packer.unpack64((uint64_t&)term) + .unpack32(meta_field) + .unpack32(data_len); + if (_from_pool) { + un_packer.unpack32(data_real_len); + } else { + data_real_len = data_len; + } + un_packer.unpack32(data_checksum) + .unpack32(header_checksum); + EntryHeader tmp; + tmp.term = term; + tmp.type = meta_field >> 24; + tmp.checksum_type = (meta_field << 8) >> 24; + tmp.data_len = data_len; + tmp.data_real_len = data_real_len; + tmp.data_checksum = data_checksum; + if (!verify_checksum(tmp.checksum_type, + p, header_size - 4, header_checksum)) { + LOG(ERROR) << "Found corrupted header at offset=" << offset + << ", header=" << tmp << ", path: " << _path; + return -1; + } + if (head != NULL) { + *head = tmp; + } + delete header_buf; + if (data != NULL) { + if (buf.length() < header_size + data_real_len) { + const size_t to_read = header_size + data_real_len - buf.length(); + const ssize_t n = braft::file_pread(&buf, _fd, + offset + buf.length(), to_read); + if (n != (ssize_t)to_read) { + return n < 0 ? -1 : 1; + } + } else if (buf.length() > header_size + data_real_len) { + buf.pop_back(buf.length() - header_size - data_real_len); + } + CHECK_EQ(buf.length(), header_size + data_real_len); + buf.pop_front(header_size); + if (!verify_checksum(tmp.checksum_type, buf, tmp.data_checksum)) { + LOG(ERROR) << "Found corrupted data at offset=" + << offset + header_size + << " header=" << tmp + << " path: " << _path; + return -1; + } + data->swap(buf); + } + return 0; +} + +int CurveSegment::append(const braft::LogEntry* entry) { + if (BAIDU_UNLIKELY(!entry || !_is_open)) { + return EINVAL; + } else if (entry->id.index != + _last_index.load(butil::memory_order_consume) + 1) { + CHECK(false) << "entry->index=" << entry->id.index + << " _last_index=" << _last_index + << " _first_index=" << _first_index; + return ERANGE; + } + butil::IOBuf data; + switch (entry->type) { + case braft::ENTRY_TYPE_DATA: + data.append(entry->data); + break; + case braft::ENTRY_TYPE_NO_OP: + break; + case braft::ENTRY_TYPE_CONFIGURATION: + { + butil::Status status = serialize_configuration_meta(entry, data); + if (!status.ok()) { + LOG(ERROR) << "Fail to serialize ConfigurationPBMeta, path: " + << _path; + return -1; + } + } + break; + default: + LOG(FATAL) << "unknow entry type: " << entry->type + << ", path: " << _path; + return -1; + } + uint32_t data_check_sum = get_checksum(_checksum_type, data); + uint32_t real_length = data.length(); + size_t to_write = kEntryHeaderSize + data.length(); + uint32_t zero_bytes_num = 0; + // 4KB alignment + if (to_write % _meta_page_size != 0) { + zero_bytes_num = (to_write / _meta_page_size + 1) * + _meta_page_size - to_write; + } + data.resize(data.length() + zero_bytes_num); + to_write = kEntryHeaderSize + data.length(); + CHECK_LE(data.length(), 1ul << 56ul); + char* write_buf = nullptr; + if (FLAGS_enableWalDirectWrite) { + int ret = posix_memalign(reinterpret_cast(&write_buf), + _meta_page_size, to_write); + LOG_IF(FATAL, ret < 0 || write_buf == nullptr) + << "posix_memalign WAL write buffer failed " << strerror(ret); + } else { + write_buf = new char[kEntryHeaderSize]; + } + + const uint32_t meta_field = (entry->type << 24) | (_checksum_type << 16); + butil::RawPacker packer(write_buf); + packer.pack64(entry->id.term) + .pack32(meta_field) + .pack32((uint32_t)data.length()) + .pack32(real_length) + .pack32(data_check_sum); + packer.pack32(get_checksum( + _checksum_type, write_buf, kEntryHeaderSize - 4)); + if (FLAGS_enableWalDirectWrite) { + data.copy_to(write_buf + kEntryHeaderSize, real_length); + int ret = ::pwrite(_direct_fd, write_buf, to_write, _meta.bytes); + free(write_buf); + if (ret != to_write) { + LOG(ERROR) << "Fail to write directly to fd=" << _direct_fd; + return -1; + } + } else { + butil::IOBuf header; + header.append(write_buf, kEntryHeaderSize); + delete write_buf; + butil::IOBuf* pieces[2] = { &header, &data }; + size_t start = 0; + ssize_t written = 0; + while (written < (ssize_t)to_write) { + const ssize_t n = butil::IOBuf::cut_multiple_into_file_descriptor( + _fd, pieces + start, ARRAY_SIZE(pieces) - start); + if (n < 0) { + LOG(ERROR) << "Fail to write to fd=" << _fd + << ", path: " << _path << berror(); + return -1; + } + written += n; + for (; start < ARRAY_SIZE(pieces) && pieces[start]->empty(); + ++start) {} + } + } + { + BAIDU_SCOPED_LOCK(_mutex); + _offset_and_term.push_back(std::make_pair(_meta.bytes, entry->id.term)); + _last_index.fetch_add(1, butil::memory_order_relaxed); + _meta.bytes += to_write; + } + return _update_meta_page(); +} + +int CurveSegment::_update_meta_page() { + char* metaPage = nullptr; + int ret = posix_memalign(reinterpret_cast(&metaPage), + _meta_page_size, _meta_page_size); + LOG_IF(FATAL, ret < 0 || metaPage == nullptr) + << "posix_memalign WAL meta page failed " << strerror(ret); + memset(metaPage, 0, _meta_page_size); + memcpy(metaPage, &_meta.bytes, sizeof(_meta.bytes)); + if (FLAGS_enableWalDirectWrite) { + ret = ::pwrite(_direct_fd, metaPage, _meta_page_size, 0); + } else { + ret = ::pwrite(_fd, metaPage, _meta_page_size, 0); + } + free(metaPage); + if (ret != _meta_page_size) { + LOG(ERROR) << "Fail to write meta page into fd=" + << (FLAGS_enableWalDirectWrite ? _direct_fd : _fd) + << ", path: " << _path << berror(); + return -1; + } + return 0; +} + +braft::LogEntry* CurveSegment::get(const int64_t index) const { + LogMeta meta; + if (_get_meta(index, &meta) != 0) { + return NULL; + } + + bool ok = true; + braft::LogEntry* entry = NULL; + do { + braft::ConfigurationPBMeta configuration_meta; + EntryHeader header; + butil::IOBuf data; + if (_load_entry(meta.offset, &header, &data, + meta.length) != 0) { + ok = false; + break; + } + CHECK_EQ(meta.term, header.term); + entry = new braft::LogEntry(); + entry->AddRef(); + switch (header.type) { + case braft::ENTRY_TYPE_DATA: + entry->data.swap(data); + break; + case braft::ENTRY_TYPE_NO_OP: + CHECK(data.empty()) << "Data of NO_OP must be empty"; + break; + case braft::ENTRY_TYPE_CONFIGURATION: + { + butil::Status status = parse_configuration_meta(data, entry); + if (!status.ok()) { + LOG(WARNING) << "Fail to parse ConfigurationPBMeta, path: " + << _path; + ok = false; + break; + } + } + break; + default: + CHECK(false) << "Unknown entry type, path: " << _path; + break; + } + + if (!ok) { + break; + } + entry->id.index = index; + entry->id.term = header.term; + entry->type = (braft::EntryType)header.type; + } while (0); + + if (!ok && entry != NULL) { + entry->Release(); + entry = NULL; + } + return entry; +} + +int CurveSegment::_get_meta(int64_t index, LogMeta* meta) const { + BAIDU_SCOPED_LOCK(_mutex); + if (index > _last_index.load(butil::memory_order_relaxed) + || index < _first_index) { + // out of range + BRAFT_VLOG << "_last_index=" + << _last_index.load(butil::memory_order_relaxed) + << " _first_index=" << _first_index; + return -1; + } else if (_last_index == _first_index - 1) { + BRAFT_VLOG << "_last_index=" + << _last_index.load(butil::memory_order_relaxed) + << " _first_index=" << _first_index; + // empty + return -1; + } + int64_t meta_index = index - _first_index; + int64_t entry_cursor = _offset_and_term[meta_index].first; + int64_t next_cursor = (index < + _last_index.load(butil::memory_order_relaxed)) + ? _offset_and_term[meta_index + 1].first : _meta.bytes; + DCHECK_LT(entry_cursor, next_cursor); + meta->offset = entry_cursor; + meta->term = _offset_and_term[meta_index].second; + meta->length = next_cursor - entry_cursor; + return 0; +} + +int64_t CurveSegment::get_term(const int64_t index) const { + LogMeta meta; + if (_get_meta(index, &meta) != 0) { + return 0; + } + return meta.term; +} + +int CurveSegment::close(bool will_sync) { + CHECK(_is_open); + + std::string old_path(_path); + if (_from_pool) { + butil::string_appendf(&old_path, "/" CURVE_SEGMENT_OPEN_PATTERN, + _first_index); + } else { + butil::string_appendf(&old_path, "/" BRAFT_SEGMENT_OPEN_PATTERN, + _first_index); + } + std::string new_path(_path); + if (_from_pool) { + butil::string_appendf(&new_path, "/" CURVE_SEGMENT_CLOSED_PATTERN, + _first_index, _last_index.load()); + } else { + butil::string_appendf(&new_path, "/" BRAFT_SEGMENT_CLOSED_PATTERN, + _first_index, _last_index.load()); + } + + LOG(INFO) << "close a full segment. Current first_index: " << _first_index + << " last_index: " << _last_index + << " raft_sync_segments: " << FLAGS_raftSyncSegments + << " will_sync: " << will_sync + << " path: " << new_path; + int ret = 0; + if (_last_index > _first_index) { + if (FLAGS_raftSyncSegments && will_sync && + !FLAGS_enableWalDirectWrite) { + ret = braft::raft_fsync(_fd); + } + } + if (ret == 0) { + _is_open = false; + const int rc = ::rename(old_path.c_str(), new_path.c_str()); + LOG_IF(INFO, rc == 0) << "Renamed `" << old_path + << "' to `" << new_path <<'\''; + LOG_IF(ERROR, rc != 0) << "Fail to rename `" << old_path + << "' to `" << new_path <<"\', " + << berror(); + return rc; + } + return ret; +} + +int CurveSegment::sync(bool will_sync) { + if (_last_index > _first_index) { + // CHECK(_is_open); + if (braft::FLAGS_raft_sync && will_sync && FLAGS_enableWalDirectWrite) { + return braft::raft_fsync(_fd); + } else { + return 0; + } + } else { + return 0; + } +} + +static void* run_unlink(void* arg) { + std::string* file_path = (std::string*) arg; + butil::Timer timer; + timer.start(); + int ret = ::unlink(file_path->c_str()); + timer.stop(); + BRAFT_VLOG << "unlink " << *file_path << " ret " << ret + << " time: " << timer.u_elapsed(); + delete file_path; + + return NULL; +} + +int CurveSegment::unlink() { + int ret = 0; + do { + std::string path(_path); + if (_from_pool) { + if (_is_open) { + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, + _first_index); + } else { + butil::string_appendf(&path, "/" CURVE_SEGMENT_CLOSED_PATTERN, + _first_index, _last_index.load()); + } + int res = kWalFilePool->RecycleFile(path); + if (res != 0) { + LOG(ERROR) << "Return segment to chunk file pool fail!"; + return -1; + } + return 0; + } + + if (_is_open) { + butil::string_appendf(&path, "/" BRAFT_SEGMENT_OPEN_PATTERN, + _first_index); + } else { + butil::string_appendf(&path, "/" BRAFT_SEGMENT_CLOSED_PATTERN, + _first_index, _last_index.load()); + } + std::string tmp_path(path); + tmp_path.append(".tmp"); + ret = ::rename(path.c_str(), tmp_path.c_str()); + if (ret != 0) { + PLOG(ERROR) << "Fail to rename " << path << " to " << tmp_path; + break; + } + + // start bthread to unlink + std::string* file_path = new std::string(tmp_path); + bthread_t tid; + if (bthread_start_background(&tid, &BTHREAD_ATTR_NORMAL, + run_unlink, file_path) != 0) { + run_unlink(file_path); + } + + LOG(INFO) << "Unlinked segment `" << path << '\''; + } while (0); + + return ret; +} + +int CurveSegment::truncate(const int64_t last_index_kept) { + int64_t truncate_size = 0; + int64_t first_truncate_in_offset = 0; + std::unique_lock lck(_mutex); + if (last_index_kept >= _last_index) { + return 0; + } + first_truncate_in_offset = last_index_kept + 1 - _first_index; + truncate_size = _offset_and_term[first_truncate_in_offset].first; + BRAFT_VLOG << "Truncating " << _path << " first_index: " << _first_index + << " last_index from " << _last_index << " to " << last_index_kept + << " truncate size to " << truncate_size; + lck.unlock(); + + // Truncate on a full segment need to rename back to inprogess segment + // again, because the node may crash before truncate. + if (!_is_open) { + std::string old_path(_path); + if (_from_pool) { + butil::string_appendf(&old_path, "/" CURVE_SEGMENT_CLOSED_PATTERN, + _first_index, _last_index.load()); + } else { + butil::string_appendf(&old_path, "/" BRAFT_SEGMENT_CLOSED_PATTERN, + _first_index, _last_index.load()); + } + std::string new_path(_path); + if (_from_pool) { + butil::string_appendf(&new_path, "/" CURVE_SEGMENT_OPEN_PATTERN, + _first_index); + } else { + butil::string_appendf(&new_path, "/" BRAFT_SEGMENT_OPEN_PATTERN, + _first_index); + } + int ret = ::rename(old_path.c_str(), new_path.c_str()); + LOG_IF(INFO, ret == 0) << "Renamed `" << old_path << "' to `" + << new_path << '\''; + LOG_IF(ERROR, ret != 0) << "Fail to rename `" << old_path << "' to `" + << new_path << "', " << berror(); + if (ret != 0) { + return ret; + } + _is_open = true; + } + + if (_from_pool) { + _meta.bytes = truncate_size; + int ret = _update_meta_page(); + if (ret < 0) { + return ret; + } + } else { + // truncate fd + int ret = ftruncate_uninterrupted(_fd, truncate_size); + if (ret < 0) { + return ret; + } + } + + // seek fd + off_t ret_off = ::lseek(_fd, truncate_size, SEEK_SET); + if (ret_off < 0) { + PLOG(ERROR) << "Fail to lseek fd=" << _fd << " to size=" + << truncate_size << " path: " << _path; + return -1; + } + + lck.lock(); + // update memory var + _offset_and_term.resize(first_truncate_in_offset); + _last_index.store(last_index_kept, butil::memory_order_relaxed); + _meta.bytes = truncate_size; + return 0; +} + +} // namespace chunkserver +} // namespace curve diff --git a/src/chunkserver/raftlog/curve_segment.h b/src/chunkserver/raftlog/curve_segment.h new file mode 100644 index 0000000000..5f28bf6016 --- /dev/null +++ b/src/chunkserver/raftlog/curve_segment.h @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2020 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: curve + * Created Date: 2020-09-02 + * Author: charisu + */ + +// Copyright (c) 2015 Baidu.com, Inc. All Rights Reserved +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Authors: Wang,Yao(wangyao02@baidu.com) +// Zhangyi Chen(chenzhangyi01@baidu.com) +// Xiong,Kai(xiongkai@baidu.com) + +#ifndef SRC_CHUNKSERVER_RAFTLOG_CURVE_SEGMENT_H_ +#define SRC_CHUNKSERVER_RAFTLOG_CURVE_SEGMENT_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "src/chunkserver/datastore/file_pool.h" + +namespace curve { +namespace chunkserver { + +DECLARE_bool(enableWalDirectWrite); + +extern std::shared_ptr kWalFilePool; + +struct CurveSegmentMeta { + CurveSegmentMeta() : bytes(0) {} + int64_t bytes; +}; + +class BAIDU_CACHELINE_ALIGNMENT CurveSegment + : public butil::RefCountedThreadSafe { + public: + CurveSegment(const std::string& path, const int64_t first_index, + int checksum_type, bool from_pool) + : _path(path), _meta(CurveSegmentMeta()), + _fd(-1), _is_open(true), + _first_index(first_index), _last_index(first_index - 1), + _checksum_type(checksum_type), + _from_pool(from_pool) { + if (_from_pool) { + _meta_page_size = kWalFilePool->GetFilePoolOpt().metaPageSize; + } + } + CurveSegment(const std::string& path, const int64_t first_index, + const int64_t last_index, int checksum_type, bool from_pool) + : _path(path), _meta(CurveSegmentMeta()), + _fd(-1), _is_open(false), + _first_index(first_index), _last_index(last_index), + _checksum_type(checksum_type), + _from_pool(from_pool) { + if (_from_pool) { + _meta_page_size = kWalFilePool->GetFilePoolOpt().metaPageSize; + } + } + + struct EntryHeader; + + // create open segment + int create(); + + // load open or closed segment + // open fd, load index, truncate uncompleted entry + int load(braft::ConfigurationManager* configuration_manager); + + // serialize entry, and append to open segment + int append(const braft::LogEntry* entry); + + // get entry by index + braft::LogEntry* get(const int64_t index) const; + + // get entry's term by index + int64_t get_term(const int64_t index) const; + + // close open segment + int close(bool will_sync = true); + + // sync open segment + int sync(bool will_sync); + + // unlink segment + int unlink(); + + // truncate segment to last_index_kept + int truncate(const int64_t last_index_kept); + + bool is_open() const { + return _is_open; + } + + int64_t bytes() const { + return _meta.bytes; + } + + int64_t first_index() const { + return _first_index; + } + + int64_t last_index() const { + return _last_index.load(butil::memory_order_consume); + } + + std::string file_name(); + + bool from_pool() { + return _from_pool; + } + private: + friend class butil::RefCountedThreadSafe; + ~CurveSegment() { + if (_fd >= 0) { + ::close(_fd); + _fd = -1; + } + } + + struct LogMeta { + off_t offset; + size_t length; + int64_t term; + }; + + int _load_entry(off_t offset, EntryHeader *head, butil::IOBuf *body, + size_t size_hint) const; + + int _get_meta(int64_t index, LogMeta* meta) const; + + int _load_meta(); + + int _update_meta_page(); + + std::string _path; + CurveSegmentMeta _meta; + mutable braft::raft_mutex_t _mutex; + int _fd; + int _direct_fd; + bool _is_open; + const int64_t _first_index; + butil::atomic _last_index; + int _checksum_type; + std::vector > _offset_and_term; + bool _from_pool; + uint32_t _meta_page_size; +}; + +} // namespace chunkserver +} // namespace curve + +#endif // SRC_CHUNKSERVER_RAFTLOG_CURVE_SEGMENT_H_ diff --git a/src/chunkserver/raftlog/curve_segment_log_storage.cpp b/src/chunkserver/raftlog/curve_segment_log_storage.cpp new file mode 100644 index 0000000000..127e8eac50 --- /dev/null +++ b/src/chunkserver/raftlog/curve_segment_log_storage.cpp @@ -0,0 +1,710 @@ +/* + * Copyright (c) 2020 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: curve + * Created Date: 2020-09-03 + * Author: charisu + */ + +// Copyright (c) 2015 Baidu.com, Inc. All Rights Reserved +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Authors: Wang,Yao(wangyao02@baidu.com) +// Zhangyi Chen(chenzhangyi01@baidu.com) +// Xiong,Kai(xiongkai@baidu.com) + +#include +#include +#include "src/chunkserver/raftlog/curve_segment_log_storage.h" +#include "src/chunkserver/raftlog/define.h" + +namespace curve { +namespace chunkserver { + +void RegisterCurveSegmentLogStorageOrDie() { + static CurveSegmentLogStorage logStorage; + braft::log_storage_extension()->RegisterOrDie( + "curve", &logStorage); +} + +int CurveSegmentLogStorage::init( + braft::ConfigurationManager* configuration_manager) { + butil::FilePath dir_path(_path); + butil::File::Error e; + if (!butil::CreateDirectoryAndGetError( + dir_path, &e, braft::FLAGS_raft_create_parent_directories)) { + LOG(ERROR) << "Fail to create " << dir_path.value() << " : " << e; + return -1; + } + + if (butil::crc32c::IsFastCrc32Supported()) { + _checksum_type = CHECKSUM_CRC32; + LOG_ONCE(INFO) + << "Use crc32c as the checksum type of appending entries"; + } else { + _checksum_type = CHECKSUM_MURMURHASH32; + LOG_ONCE(INFO) + << "Use murmurhash32 as the checksum type of appending entries"; + } + + int ret = 0; + bool is_empty = false; + do { + ret = load_meta(); + if (ret != 0 && errno == ENOENT) { + LOG(WARNING) << _path << " is empty"; + is_empty = true; + } else if (ret != 0) { + break; + } + + ret = list_segments(is_empty); + if (ret != 0) { + break; + } + + ret = load_segments(configuration_manager); + if (ret != 0) { + break; + } + } while (0); + + if (is_empty) { + _first_log_index.store(1); + _last_log_index.store(0); + ret = save_meta(1); + } + return ret; +} + +int CurveSegmentLogStorage::load_meta() { + butil::Timer timer; + timer.start(); + + std::string meta_path(_path); + meta_path.append("/" BRAFT_SEGMENT_META_FILE); + + braft::ProtoBufFile pb_file(meta_path); + braft::LogPBMeta meta; + if (0 != pb_file.load(&meta)) { + PLOG_IF(ERROR, errno != ENOENT) + << "Fail to load meta from " << meta_path; + return -1; + } + + _first_log_index.store(meta.first_log_index()); + + timer.stop(); + LOG(INFO) << "log load_meta " << meta_path + << " first_log_index: " << meta.first_log_index() + << " time: " << timer.u_elapsed(); + return 0; +} + +int CurveSegmentLogStorage::list_segments(bool is_empty) { + butil::DirReaderPosix dir_reader(_path.c_str()); + if (!dir_reader.IsValid()) { + LOG(WARNING) << "directory reader failed, maybe NOEXIST or PERMISSION." + << " path: " << _path; + return -1; + } + + // restore segment meta + while (dir_reader.Next()) { + // unlink unneed segments and unfinished unlinked segments + if ((is_empty && 0 == strncmp(dir_reader.name(), + "log_", strlen("log_"))) || + (0 == strncmp(dir_reader.name() + + (strlen(dir_reader.name()) - strlen(".tmp")), + ".tmp", strlen(".tmp")))) { + std::string segment_path(_path); + segment_path.append("/"); + segment_path.append(dir_reader.name()); + ::unlink(segment_path.c_str()); + + LOG(WARNING) << "unlink unused segment, path: " << segment_path; + + continue; + } + + // Is braft log pattern + int match = 0; + int64_t first_index = 0; + int64_t last_index = 0; + match = sscanf(dir_reader.name(), BRAFT_SEGMENT_CLOSED_PATTERN, + &first_index, &last_index); + if (match == 2) { + LOG(INFO) << "restore closed segment, path: " << _path + << " first_index: " << first_index + << " last_index: " << last_index; + CurveSegment* segment = new CurveSegment(_path, first_index, + last_index, _checksum_type, false); + _segments[first_index] = segment; + continue; + } + + match = sscanf(dir_reader.name(), BRAFT_SEGMENT_OPEN_PATTERN, + &first_index); + if (match == 1) { + BRAFT_VLOG << "restore open segment, path: " << _path + << " first_index: " << first_index; + if (!_open_segment) { + _open_segment = + new CurveSegment(_path, first_index, _checksum_type, false); + continue; + } else { + LOG(WARNING) << "open segment conflict, path: " << _path + << " first_index: " << first_index; + return -1; + } + } + + // Is curve log pattern + match = sscanf(dir_reader.name(), CURVE_SEGMENT_CLOSED_PATTERN, + &first_index, &last_index); + if (match == 2) { + LOG(INFO) << "restore closed segment, path: " << _path + << " first_index: " << first_index + << " last_index: " << last_index; + CurveSegment* segment = new CurveSegment(_path, first_index, + last_index, _checksum_type, true); + _segments[first_index] = segment; + continue; + } + + match = sscanf(dir_reader.name(), CURVE_SEGMENT_OPEN_PATTERN, + &first_index); + if (match == 1) { + BRAFT_VLOG << "restore open segment, path: " << _path + << " first_index: " << first_index; + if (!_open_segment) { + _open_segment = + new CurveSegment(_path, first_index, _checksum_type, true); + continue; + } else { + LOG(WARNING) << "open segment conflict, path: " << _path + << " first_index: " << first_index; + return -1; + } + } + } + + // check segment + int64_t last_log_index = -1; + SegmentMap::iterator it; + for (it = _segments.begin(); it != _segments.end(); ) { + CurveSegment* segment = it->second.get(); + if (segment->first_index() > segment->last_index()) { + LOG(WARNING) << "closed segment is bad, path: " << _path + << " first_index: " << segment->first_index() + << " last_index: " << segment->last_index(); + return -1; + } else if (last_log_index != -1 && + segment->first_index() != last_log_index + 1) { + LOG(WARNING) << "closed segment not in order, path: " << _path + << " first_index: " << segment->first_index() + << " last_log_index: " << last_log_index; + return -1; + } else if (last_log_index == -1 && + _first_log_index.load(butil::memory_order_acquire) + < segment->first_index()) { + LOG(WARNING) << "closed segment has hole, path: " << _path + << " first_log_index: " + << _first_log_index.load(butil::memory_order_relaxed) + << " first_index: " << segment->first_index() + << " last_index: " << segment->last_index(); + return -1; + } else if (last_log_index == -1 && + _first_log_index > segment->last_index()) { + LOG(WARNING) << "closed segment need discard, path: " << _path + << " first_log_index: " + << _first_log_index.load(butil::memory_order_relaxed) + << " first_index: " << segment->first_index() + << " last_index: " << segment->last_index(); + segment->unlink(); + _segments.erase(it++); + continue; + } + + last_log_index = segment->last_index(); + ++it; + } + if (_open_segment) { + if (last_log_index == -1 && + _first_log_index.load(butil::memory_order_relaxed) < + _open_segment->first_index()) { + LOG(WARNING) << "open segment has hole, path: " << _path + << " first_log_index: " + << _first_log_index.load(butil::memory_order_relaxed) + << " first_index: " << _open_segment->first_index(); + } else if (last_log_index != -1 && + _open_segment->first_index() != last_log_index + 1) { + LOG(WARNING) << "open segment has hole, path: " << _path + << " first_log_index: " + << _first_log_index.load(butil::memory_order_relaxed) + << " first_index: " << _open_segment->first_index(); + } + CHECK_LE(last_log_index, _open_segment->last_index()); + } + + return 0; +} + +int CurveSegmentLogStorage::load_segments( + braft::ConfigurationManager* configuration_manager) { + int ret = 0; + + // closed segments + SegmentMap::iterator it; + for (it = _segments.begin(); it != _segments.end(); ++it) { + CurveSegment* segment = it->second.get(); + LOG(INFO) << "load closed segment, path: " << _path + << " first_index: " << segment->first_index() + << " last_index: " << segment->last_index(); + ret = segment->load(configuration_manager); + if (ret != 0) { + return ret; + } + _last_log_index.store(segment->last_index(), + butil::memory_order_release); + } + + // open segment + if (_open_segment) { + LOG(INFO) << "load open segment, path: " << _path + << " first_index: " << _open_segment->first_index(); + ret = _open_segment->load(configuration_manager); + if (ret != 0) { + return ret; + } + if (_first_log_index.load() > _open_segment->last_index()) { + LOG(WARNING) << "open segment need discard, path: " << _path + << " first_log_index: " << _first_log_index.load() + << " first_index: " << _open_segment->first_index() + << " last_index: " << _open_segment->last_index(); + _open_segment->unlink(); + _open_segment = NULL; + } else { + _last_log_index.store(_open_segment->last_index(), + butil::memory_order_release); + } + if (!_open_segment->from_pool()) { + LOG(INFO) << "Loaded a braft open segment, close it directly"; + _segments[_open_segment->first_index()] = _open_segment; + if (_open_segment->close() == 0) { + _open_segment = new CurveSegment(_path, last_log_index() + 1, + _checksum_type, true); + if (_open_segment->create() != 0) { + _open_segment = NULL; + LOG(ERROR) << "Create new segment fail"; + return -1; + } + } + } + } + if (_last_log_index == 0) { + _last_log_index = _first_log_index - 1; + } + return 0; +} + +int64_t CurveSegmentLogStorage::last_log_index() { + return _last_log_index.load(butil::memory_order_acquire); +} + +braft::LogEntry* CurveSegmentLogStorage::get_entry(const int64_t index) { + scoped_refptr ptr; + if (get_segment(index, &ptr) != 0) { + return NULL; + } + return ptr->get(index); +} + +int CurveSegmentLogStorage::get_segment(int64_t index, + scoped_refptr* ptr) { + BAIDU_SCOPED_LOCK(_mutex); + int64_t first_index = first_log_index(); + int64_t last_index = last_log_index(); + if (first_index == last_index + 1) { + return -1; + } + if (index < first_index || index > last_index + 1) { + LOG_IF(WARNING, index > last_index) << "Attempted to access entry " + << index << " outside of log, " + << " first_log_index: " << first_index + << " last_log_index: " << last_index; + return -1; + } else if (index == last_index + 1) { + return -1; + } + + if (_open_segment && index >= _open_segment->first_index()) { + *ptr = _open_segment; + CHECK(ptr->get() != NULL); + } else { + CHECK(!_segments.empty()); + SegmentMap::iterator it = _segments.upper_bound(index); + SegmentMap::iterator saved_it = it; + --it; + CHECK(it != saved_it); + *ptr = it->second; + } + return 0; +} + +int64_t CurveSegmentLogStorage::get_term(const int64_t index) { + scoped_refptr ptr; + if (get_segment(index, &ptr) != 0) { + return 0; + } + return ptr->get_term(index); +} + +int CurveSegmentLogStorage::append_entry(const braft::LogEntry* entry) { + scoped_refptr segment = + open_segment(entry->data.size() + kEntryHeaderSize); + if (NULL == segment) { + return EIO; + } + int ret = segment->append(entry); + if (ret != 0 && ret != EEXIST) { + return ret; + } + if (EEXIST == ret && entry->id.term != get_term(entry->id.index)) { + return EINVAL; + } + _last_log_index.fetch_add(1, butil::memory_order_release); + + return segment->sync(_enable_sync); +} + +int CurveSegmentLogStorage::append_entries( + const std::vector& entries) { + if (entries.empty()) { + return 0; + } + if (_last_log_index.load(butil::memory_order_relaxed) + 1 + != entries.front()->id.index) { + LOG(FATAL) << "There's gap between appending entries and" + << " _last_log_index path: " << _path; + return -1; + } + scoped_refptr last_segment = NULL; + for (size_t i = 0; i < entries.size(); i++) { + braft::LogEntry* entry = entries[i]; + + scoped_refptr segment = + open_segment(entry->data.size() + kEntryHeaderSize); + if (NULL == segment) { + return i; + } + int ret = segment->append(entry); + if (0 != ret) { + return i; + } + _last_log_index.fetch_add(1, butil::memory_order_release); + last_segment = segment; + } + last_segment->sync(_enable_sync); + return entries.size(); +} + +int CurveSegmentLogStorage::truncate_prefix(const int64_t first_index_kept) { + // segment files + if (_first_log_index.load(butil::memory_order_acquire) >= + first_index_kept) { + BRAFT_VLOG << "Nothing is going to happen since _first_log_index=" + << _first_log_index.load(butil::memory_order_relaxed) + << " >= first_index_kept=" + << first_index_kept; + return 0; + } + // NOTE: truncate_prefix is not important, as it has nothing to do with + // consensus. We try to save meta on the disk first to make sure even if + // the deleting fails or the process crashes (which is unlikely to happen). + // The new process would see the latest `first_log_index' + if (save_meta(first_index_kept) != 0) { + PLOG(ERROR) << "Fail to save meta, path: " << _path; + return -1; + } + std::vector > popped; + pop_segments(first_index_kept, &popped); + for (size_t i = 0; i < popped.size(); ++i) { + popped[i]->unlink(); + popped[i] = NULL; + } + return 0; +} + +int CurveSegmentLogStorage::save_meta(const int64_t log_index) { + butil::Timer timer; + timer.start(); + + std::string meta_path(_path); + meta_path.append("/" BRAFT_SEGMENT_META_FILE); + + braft::LogPBMeta meta; + meta.set_first_log_index(log_index); + braft::ProtoBufFile pb_file(meta_path); + int ret = pb_file.save(&meta, braft::raft_sync_meta()); + + timer.stop(); + PLOG_IF(ERROR, ret != 0) << "Fail to save meta to " << meta_path; + LOG(INFO) << "log save_meta " << meta_path << " first_log_index: " + << log_index << " time: " << timer.u_elapsed(); + return ret; +} + +void CurveSegmentLogStorage::pop_segments( + const int64_t first_index_kept, + std::vector >* popped) { + popped->clear(); + popped->reserve(32); + BAIDU_SCOPED_LOCK(_mutex); + _first_log_index.store(first_index_kept, butil::memory_order_release); + for (SegmentMap::iterator it = _segments.begin(); it != _segments.end();) { + scoped_refptr& segment = it->second; + if (segment->last_index() < first_index_kept) { + popped->push_back(segment); + _segments.erase(it++); + } else { + return; + } + } + if (_open_segment) { + if (_open_segment->last_index() < first_index_kept) { + popped->push_back(_open_segment); + _open_segment = NULL; + // _log_storage is empty + _last_log_index.store(first_index_kept - 1); + } else { + CHECK(_open_segment->first_index() <= first_index_kept); + } + } else { + // _log_storage is empty + _last_log_index.store(first_index_kept - 1); + } +} + +void CurveSegmentLogStorage::pop_segments_from_back( + const int64_t last_index_kept, + std::vector >* popped, + scoped_refptr* last_segment) { + popped->clear(); + popped->reserve(32); + *last_segment = NULL; + BAIDU_SCOPED_LOCK(_mutex); + _last_log_index.store(last_index_kept, butil::memory_order_release); + if (_open_segment) { + if (_open_segment->first_index() <= last_index_kept) { + *last_segment = _open_segment; + return; + } + popped->push_back(_open_segment); + _open_segment = NULL; + } + for (SegmentMap::reverse_iterator + it = _segments.rbegin(); it != _segments.rend(); ++it) { + if (it->second->first_index() <= last_index_kept) { + // Not return as we need to maintain _segments at the end of this + // routine + break; + } + popped->push_back(it->second); + // XXX: C++03 not support erase reverse_iterator + } + for (size_t i = 0; i < popped->size(); i++) { + _segments.erase((*popped)[i]->first_index()); + } + if (_segments.rbegin() != _segments.rend()) { + *last_segment = _segments.rbegin()->second; + } else { + // all the logs have been cleared, the we move _first_log_index to the + // next index + _first_log_index.store(last_index_kept + 1, + butil::memory_order_release); + } +} + +int CurveSegmentLogStorage::truncate_suffix(const int64_t last_index_kept) { + // segment files + std::vector > popped; + scoped_refptr last_segment; + pop_segments_from_back(last_index_kept, &popped, &last_segment); + bool truncate_last_segment = false; + int ret = -1; + + if (last_segment) { + if (_first_log_index.load(butil::memory_order_relaxed) <= + _last_log_index.load(butil::memory_order_relaxed)) { + truncate_last_segment = true; + } else { + // trucate_prefix() and truncate_suffix() to discard entire logs + BAIDU_SCOPED_LOCK(_mutex); + popped.push_back(last_segment); + _segments.erase(last_segment->first_index()); + if (_open_segment) { + CHECK(_open_segment.get() == last_segment.get()); + _open_segment = NULL; + } + } + } + + // The truncate suffix order is crucial to satisfy log matching + // property of raft log must be truncated from back to front. + for (size_t i = 0; i < popped.size(); ++i) { + ret = popped[i]->unlink(); + if (ret != 0) { + return ret; + } + popped[i] = NULL; + } + if (truncate_last_segment) { + bool closed = !last_segment->is_open(); + ret = last_segment->truncate(last_index_kept); + if (ret == 0 && closed && last_segment->is_open()) { + BAIDU_SCOPED_LOCK(_mutex); + CHECK(!_open_segment); + _open_segment.swap(last_segment); + } + } + + return ret; +} + +int CurveSegmentLogStorage::reset(const int64_t next_log_index) { + if (next_log_index <= 0) { + LOG(ERROR) << "Invalid next_log_index=" << next_log_index + << " path: " << _path; + return EINVAL; + } + std::vector > popped; + std::unique_lock lck(_mutex); + popped.reserve(_segments.size()); + for (SegmentMap::const_iterator + it = _segments.begin(); it != _segments.end(); ++it) { + popped.push_back(it->second); + } + _segments.clear(); + if (_open_segment) { + popped.push_back(_open_segment); + _open_segment = NULL; + } + _first_log_index.store(next_log_index, butil::memory_order_relaxed); + _last_log_index.store(next_log_index - 1, butil::memory_order_relaxed); + lck.unlock(); + // NOTE: see the comments in truncate_prefix + if (save_meta(next_log_index) != 0) { + PLOG(ERROR) << "Fail to save meta, path: " << _path; + return -1; + } + for (size_t i = 0; i < popped.size(); ++i) { + popped[i]->unlink(); + popped[i] = NULL; + } + return 0; +} + +void CurveSegmentLogStorage::list_files(std::vector* seg_files) { + BAIDU_SCOPED_LOCK(_mutex); + seg_files->push_back(BRAFT_SEGMENT_META_FILE); + for (SegmentMap::iterator it = _segments.begin(); + it != _segments.end(); ++it) { + scoped_refptr& segment = it->second; + seg_files->push_back(segment->file_name()); + } + if (_open_segment) { + seg_files->push_back(_open_segment->file_name()); + } +} + +void CurveSegmentLogStorage::sync() { + std::vector > segments; + { + BAIDU_SCOPED_LOCK(_mutex); + for (SegmentMap::iterator it = _segments.begin(); + it != _segments.end(); ++it) { + segments.push_back(it->second); + } + } + + for (size_t i = 0; i < segments.size(); i++) { + segments[i]->sync(true); + } +} + +braft::LogStorage* CurveSegmentLogStorage::new_instance( + const std::string& uri) const { + return new CurveSegmentLogStorage(uri); +} + +scoped_refptr CurveSegmentLogStorage::open_segment( + size_t to_write) { + scoped_refptr prev_open_segment; + { + BAIDU_SCOPED_LOCK(_mutex); + if (!_open_segment) { + _open_segment = new CurveSegment(_path, last_log_index() + 1, + _checksum_type, true); + if (_open_segment->create() != 0) { + _open_segment = NULL; + return NULL; + } + } + uint32_t maxTotalFileSize = kWalFilePool->GetFilePoolOpt().fileSize + + kWalFilePool->GetFilePoolOpt().metaPageSize; + if (_open_segment->bytes() + to_write > maxTotalFileSize) { + _segments[_open_segment->first_index()] = _open_segment; + prev_open_segment.swap(_open_segment); + } + } + do { + if (prev_open_segment) { + if (prev_open_segment->close(_enable_sync) == 0) { + BAIDU_SCOPED_LOCK(_mutex); + _open_segment = new CurveSegment(_path, last_log_index() + 1, + _checksum_type, true); + if (_open_segment->create() == 0) { + // success + break; + } + } + PLOG(ERROR) << "Fail to close old open_segment or create new" + << " open_segment path: " << _path; + // Failed, revert former changes + BAIDU_SCOPED_LOCK(_mutex); + _segments.erase(prev_open_segment->first_index()); + _open_segment.swap(prev_open_segment); + return NULL; + } + } while (0); + return _open_segment; +} + +} // namespace chunkserver +} // namespace curve diff --git a/src/chunkserver/raftlog/curve_segment_log_storage.h b/src/chunkserver/raftlog/curve_segment_log_storage.h new file mode 100644 index 0000000000..6efe76c78d --- /dev/null +++ b/src/chunkserver/raftlog/curve_segment_log_storage.h @@ -0,0 +1,161 @@ +/* + * Copyright (c) 2020 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: curve + * Created Date: 2020-09-03 + * Author: charisu + */ + +// Copyright (c) 2015 Baidu.com, Inc. All Rights Reserved +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Authors: Wang,Yao(wangyao02@baidu.com) +// Zhangyi Chen(chenzhangyi01@baidu.com) +// Xiong,Kai(xiongkai@baidu.com) + +#ifndef SRC_CHUNKSERVER_RAFTLOG_CURVE_SEGMENT_LOG_STORAGE_H_ +#define SRC_CHUNKSERVER_RAFTLOG_CURVE_SEGMENT_LOG_STORAGE_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "src/chunkserver/raftlog/curve_segment.h" + +namespace curve { +namespace chunkserver { + +void RegisterCurveSegmentLogStorageOrDie(); + +// LogStorage use segmented append-only file, all data in disk, all index +// in memory. append one log entry, only cause one disk write, every disk +// write will call fsync(). +// +// SegmentLog layout: +// log_meta: record start_log +// log_000001-0001000: closed segment +// log_inprogress_0001001: open segment +class CurveSegmentLogStorage : public braft::LogStorage { + public: + typedef std::map > SegmentMap; + + explicit CurveSegmentLogStorage(const std::string& path, + bool enable_sync = true) + : _path(path) + , _first_log_index(1) + , _last_log_index(0) + , _checksum_type(0) + , _enable_sync(enable_sync) + {} + + CurveSegmentLogStorage() + : _first_log_index(1) + , _last_log_index(0) + , _checksum_type(0) + , _enable_sync(true) + {} + + virtual ~CurveSegmentLogStorage() {} + + // init logstorage, check consistency and integrity + virtual int init(braft::ConfigurationManager* configuration_manager); + + // first log index in log + virtual int64_t first_log_index() { + return _first_log_index.load(butil::memory_order_acquire); + } + + // last log index in log + virtual int64_t last_log_index(); + + // get logentry by index + virtual braft::LogEntry* get_entry(const int64_t index); + + // get logentry's term by index + virtual int64_t get_term(const int64_t index); + + // append entry to log + int append_entry(const braft::LogEntry* entry); + + // append entries to log, return success append number + virtual int append_entries(const std::vector& entries); + + // delete logs from storage's head, [1, first_index_kept) will be discarded + virtual int truncate_prefix(const int64_t first_index_kept); + + // delete uncommitted logs from storage's tail, + // (last_index_kept, infinity) will be discarded + virtual int truncate_suffix(const int64_t last_index_kept); + + virtual int reset(const int64_t next_log_index); + + LogStorage* new_instance(const std::string& uri) const; + + SegmentMap& segments() { + return _segments; + } + + void list_files(std::vector* seg_files); + + void sync(); + + private: + scoped_refptr open_segment(size_t to_write); + int save_meta(const int64_t log_index); + int load_meta(); + int list_segments(bool is_empty); + int load_segments(braft::ConfigurationManager* configuration_manager); + int get_segment(int64_t log_index, scoped_refptr* ptr); + void pop_segments( + int64_t first_index_kept, + std::vector >* poped); + void pop_segments_from_back( + const int64_t first_index_kept, + std::vector >* popped, + scoped_refptr* last_segment); + + + std::string _path; + butil::atomic _first_log_index; + butil::atomic _last_log_index; + braft::raft_mutex_t _mutex; + SegmentMap _segments; + scoped_refptr _open_segment; + int _checksum_type; + bool _enable_sync; +}; + +} // namespace chunkserver +} // namespace curve + +#endif // SRC_CHUNKSERVER_RAFTLOG_CURVE_SEGMENT_LOG_STORAGE_H_ diff --git a/src/chunkserver/raftlog/define.h b/src/chunkserver/raftlog/define.h new file mode 100644 index 0000000000..b2ead185eb --- /dev/null +++ b/src/chunkserver/raftlog/define.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2020 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: curve + * Created Date: 2020-09-03 + * Author: charisu + */ + +// Copyright (c) 2015 Baidu.com, Inc. All Rights Reserved +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Authors: Wang,Yao(wangyao02@baidu.com) +// Zhangyi Chen(chenzhangyi01@baidu.com) +// Xiong,Kai(xiongkai@baidu.com) + +#ifndef SRC_CHUNKSERVER_RAFTLOG_DEFINE_H_ +#define SRC_CHUNKSERVER_RAFTLOG_DEFINE_H_ +namespace curve { +namespace chunkserver { + +#define CURVE_SEGMENT_OPEN_PATTERN "curve_log_inprogress_%020" PRId64 +#define CURVE_SEGMENT_CLOSED_PATTERN "curve_log_%020" PRId64 "_%020" PRId64 +#define BRAFT_SEGMENT_OPEN_PATTERN "log_inprogress_%020" PRId64 +#define BRAFT_SEGMENT_CLOSED_PATTERN "log_%020" PRId64 "_%020" PRId64 +#define BRAFT_SEGMENT_META_FILE "log_meta" + +// Format of Header, all fields are in network order +// | -------------------- term (64bits) ------------------------- | +// | entry-type (8bits) | checksum_type (8bits) | reserved(16bits) | +// | ------------------ data len (32bits) ----------------------- | +// | --------------- data real len (32bits) --------------------- | +// | data_checksum (32bits) | header checksum (32bits) | + +const size_t kEntryHeaderSize = 28; +const size_t kBraftEntryHeaderSize = 24; + +enum CheckSumType { + CHECKSUM_MURMURHASH32 = 0, + CHECKSUM_CRC32 = 1, +}; + +} // namespace chunkserver +} // namespace curve + +#endif // SRC_CHUNKSERVER_RAFTLOG_DEFINE_H_ diff --git a/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.cpp b/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.cpp index dd17214dd9..18479b26a6 100644 --- a/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.cpp +++ b/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.cpp @@ -28,11 +28,11 @@ namespace curve { namespace chunkserver { CurveFilesystemAdaptor::CurveFilesystemAdaptor( - std::shared_ptr chunkfilePool, + std::shared_ptr chunkFilePool, std::shared_ptr lfs) { lfs_ = lfs; - chunkfilePool_ = chunkfilePool; - uint64_t metapageSize = chunkfilePool_->GetChunkFilePoolOpt().metaPageSize; + chunkFilePool_ = chunkFilePool; + uint64_t metapageSize = chunkFilePool->GetFilePoolOpt().metaPageSize; tempMetaPageContent = new (std::nothrow) char[metapageSize]; CHECK(tempMetaPageContent != nullptr); memset(tempMetaPageContent, 0, metapageSize); @@ -79,8 +79,8 @@ braft::FileAdaptor* CurveFilesystemAdaptor::open(const std::string& path, (oflag & O_CREAT) && false == lfs_->FileExists(path)) { // 从chunkfile pool中取出chunk返回 - int rc = chunkfilePool_->GetChunk(path, tempMetaPageContent); - // 如果从chunkfilepool中取失败,返回错误。 + int rc = chunkFilePool_->GetFile(path, tempMetaPageContent); + // 如果从FilePool中取失败,返回错误。 if (rc != 0) { LOG(ERROR) << "get chunk from chunkfile pool failed!"; return NULL; @@ -134,8 +134,8 @@ bool CurveFilesystemAdaptor::delete_file(const std::string& path, if (NeedFilter(path)) { return lfs_->Delete(path) == 0; } else { - // chunkfilePool内部会检查path对应文件合法性,如果不符合就直接删除 - return chunkfilePool_->RecycleChunk(path) == 0; + // chunkfilepool内部会检查path对应文件合法性,如果不符合就直接删除 + return chunkFilePool_->RecycleFile(path) == 0; } } } @@ -160,7 +160,7 @@ bool CurveFilesystemAdaptor::RecycleDirRecursive( break; } } else { - int ret = chunkfilePool_->RecycleChunk(todeletePath); + int ret = chunkFilePool_->RecycleFile(todeletePath); if (ret < 0) { rc = false; LOG(ERROR) << "recycle " << path + filepath << ", failed!"; @@ -175,8 +175,8 @@ bool CurveFilesystemAdaptor::RecycleDirRecursive( bool CurveFilesystemAdaptor::rename(const std::string& old_path, const std::string& new_path) { if (!NeedFilter(new_path) && lfs_->FileExists(new_path)) { - // chunkfilePool内部会检查path对应文件合法性,如果不符合就直接删除 - chunkfilePool_->RecycleChunk(new_path); + // chunkfilepool内部会检查path对应文件合法性,如果不符合就直接删除 + chunkFilePool_->RecycleFile(new_path); } return lfs_->Rename(old_path, new_path) == 0; } diff --git a/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h b/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h index 56a1723a40..4e6737b8d4 100644 --- a/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h +++ b/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h @@ -29,7 +29,7 @@ #include #include -#include "src/chunkserver/datastore/chunkfile_pool.h" +#include "src/chunkserver/datastore/file_pool.h" #include "src/chunkserver/raftsnapshot/curve_file_adaptor.h" /** @@ -44,7 +44,7 @@ */ using curve::fs::LocalFileSystem; -using curve::chunkserver::ChunkfilePool; +using curve::chunkserver::FilePool; namespace curve { namespace chunkserver { @@ -59,10 +59,10 @@ class CurveFilesystemAdaptor : public braft::PosixFileSystemAdaptor { public: /** * 构造函数 - * @param: chunkfilePool用于获取和回收chunk文件 + * @param: chunkfilepool用于获取和回收chunk文件 * @param: lfs用于进行一些文件操作,比如打开或者删除目录 */ - CurveFilesystemAdaptor(std::shared_ptr chunkfilePool, + CurveFilesystemAdaptor(std::shared_ptr filePool, std::shared_ptr lfs); CurveFilesystemAdaptor(); virtual ~CurveFilesystemAdaptor(); @@ -144,9 +144,9 @@ class CurveFilesystemAdaptor : public braft::PosixFileSystemAdaptor { char* tempMetaPageContent; // 我们自己的文件系统,这里文件系统会做一些打开及删除目录操作 std::shared_ptr lfs_; - // 操作chunkfilepool的指针,这个chunkfilePool_与copysetnode的 - // chunkfilePool_应该是全局唯一的,保证操作chunkfilepool的原子性 - std::shared_ptr chunkfilePool_; + // 操作chunkfilepool的指针,这个FilePool_与copysetnode的 + // chunkfilepool_应该是全局唯一的,保证操作chunkfilepool的原子性 + std::shared_ptr chunkFilePool_; // 过滤名单,在当前vector中的文件名,都不从chunkfilepool中取文件 // 回收的时候也直接删除这些文件,不进入chunkfilepool std::vector filterList_; diff --git a/src/chunkserver/trash.cpp b/src/chunkserver/trash.cpp index eaf1d1b198..8edebf0ef7 100644 --- a/src/chunkserver/trash.cpp +++ b/src/chunkserver/trash.cpp @@ -54,7 +54,7 @@ int Trash::Init(TrashOptions options) { expiredAfterSec_ = options.expiredAfterSec; scanPeriodSec_ = options.scanPeriodSec; localFileSystem_ = options.localFileSystem; - chunkfilePool_ = options.chunkfilePool; + chunkFilePool_ = options.chunkFilePool; chunkNum_.store(0); // 读取trash目录下的所有目录 @@ -253,10 +253,10 @@ bool Trash::RecycleIfChunkfile( return true; } - // 是chunkfile, 回收到chunkfilepool中 - if (0 != chunkfilePool_->RecycleChunk(filepath)) { + // 是chunkfile, 回收到FilePool中 + if (0 != chunkFilePool_->RecycleFile(filepath)) { LOG(ERROR) << "Trash failed recycle chunk " << filepath - << " to chunkfilePool"; + << " to FilePool"; return false; } diff --git a/src/chunkserver/trash.h b/src/chunkserver/trash.h index c5f25f4353..0982551bc1 100644 --- a/src/chunkserver/trash.h +++ b/src/chunkserver/trash.h @@ -26,7 +26,7 @@ #include #include #include "src/fs/local_filesystem.h" -#include "src/chunkserver/datastore/chunkfile_pool.h" +#include "src/chunkserver/datastore/file_pool.h" #include "src/common/concurrent/concurrent.h" #include "src/common/interruptible_sleeper.h" @@ -45,7 +45,7 @@ struct TrashOptions{ int scanPeriodSec; std::shared_ptr localFileSystem; - std::shared_ptr chunkfilePool; + std::shared_ptr chunkFilePool; }; class Trash { @@ -144,7 +144,7 @@ class Trash { std::shared_ptr localFileSystem_; // chunk池子 - std::shared_ptr chunkfilePool_; + std::shared_ptr chunkFilePool_; // 回收站全路径 std::string trashPath_; diff --git a/src/common/curve_define.h b/src/common/curve_define.h index a5a880fb7e..644c8dfc1e 100644 --- a/src/common/curve_define.h +++ b/src/common/curve_define.h @@ -52,8 +52,8 @@ const uint32_t kKB = 1024; const uint32_t kMB = 1024*kKB; const uint32_t kGB = 1024*kMB; -// maigic number用于chunkfilepool_meta file计算crc -const char kChunkFilePoolMaigic[3] = "01"; +// maigic number用于FilePool_meta file计算crc +const char kFilePoolMaigic[3] = "01"; } // namespace common } // namespace curve diff --git a/src/fs/ext4_filesystem_impl.cpp b/src/fs/ext4_filesystem_impl.cpp index 59e4b6d5e6..efcaa23d70 100644 --- a/src/fs/ext4_filesystem_impl.cpp +++ b/src/fs/ext4_filesystem_impl.cpp @@ -25,6 +25,7 @@ #include #include #include +#include #include "src/common/string_util.h" #include "src/fs/ext4_filesystem_impl.h" diff --git a/src/tools/curve_format_main.cpp b/src/tools/curve_format_main.cpp index 0b75925600..016dd53a0b 100644 --- a/src/tools/curve_format_main.cpp +++ b/src/tools/curve_format_main.cpp @@ -36,7 +36,7 @@ #include "src/fs/local_filesystem.h" #include "src/common/crc32.h" #include "src/common/curve_define.h" -#include "src/chunkserver/datastore/chunkfile_pool.h" +#include "src/chunkserver/datastore/file_pool.h" /** * chunkfile pool预分配工具,提供两种分配方式 @@ -47,37 +47,37 @@ */ DEFINE_bool(allocateByPercent, true, - "allocate chunkfilepool by percent of disk size or by chunk num!"); + "allocate filePool by percent of disk size or by chunk num!"); -DEFINE_uint32(chunksize, +DEFINE_uint32(fileSize, 16 * 1024 * 1024, "chunk size"); -DEFINE_uint32(metapagsize, +DEFINE_uint32(metaPagSize, 4 * 1024, "metapage size for every chunk"); -DEFINE_string(filesystem_path, +DEFINE_string(fileSystemPath, "./", "chunkserver disk path"); -DEFINE_string(chunkfilepool_dir, - "./chunkfilepool/", +DEFINE_string(filePoolDir, + "./filePool/", "chunkfile pool dir"); -DEFINE_string(chunkfilepool_metapath, - "./chunkfilepool.meta", +DEFINE_string(filePoolMetaPath, + "./filePool.meta", "chunkfile pool meta info file path."); // preallocateNum仅在测试的时候使用,测试提前预分配固定数量的chunk // 当设置这个值的时候可以不用设置allocatepercent -DEFINE_uint32(preallocateNum, +DEFINE_uint32(preAllocateNum, 0, "preallocate chunk nums, this is JUST for curve test"); // 在系统初始化的时候,管理员需要预先格式化磁盘,并进行预分配 // 这时候只需要指定allocatepercent,allocatepercent是占整个盘的空间的百分比 -DEFINE_uint32(allocatepercent, +DEFINE_uint32(allocatePercent, 80, "preallocate storage percent of total disk"); @@ -90,7 +90,7 @@ using curve::fs::FileSystemType; using curve::fs::LocalFsFactory; using curve::fs::FileSystemInfo; using curve::fs::LocalFileSystem; -using curve::common::kChunkFilePoolMaigic; +using curve::common::kFilePoolMaigic; class CompareInternal { public: @@ -109,9 +109,9 @@ struct AllocateStruct { uint64_t chunknum; }; -int AllocateChunks(AllocateStruct* allocatestruct) { - char* data = new(std::nothrow)char[FLAGS_chunksize + FLAGS_metapagsize]; - memset(data, 0, FLAGS_chunksize + FLAGS_metapagsize); +int AllocateFiles(AllocateStruct* allocatestruct) { + char* data = new(std::nothrow)char[FLAGS_fileSize + FLAGS_metaPagSize]; + memset(data, 0, FLAGS_fileSize + FLAGS_metaPagSize); uint64_t count = 0; while (count < allocatestruct->chunknum) { @@ -123,7 +123,7 @@ int AllocateChunks(AllocateStruct* allocatestruct) { allocatestruct->allocateChunknum->load()); } std::string tmpchunkfilepath - = FLAGS_chunkfilepool_dir + "/" + filename; + = FLAGS_filePoolDir + "/" + filename; int ret = allocatestruct->fsptr->Open(tmpchunkfilepath.c_str(), O_RDWR | O_CREAT); @@ -135,7 +135,7 @@ int AllocateChunks(AllocateStruct* allocatestruct) { int fd = ret; ret = allocatestruct->fsptr->Fallocate(fd, 0, 0, - FLAGS_chunksize+FLAGS_metapagsize); + FLAGS_fileSize + FLAGS_metaPagSize); if (ret < 0) { allocatestruct->fsptr->Close(fd); *allocatestruct->checkwrong = true; @@ -145,7 +145,7 @@ int AllocateChunks(AllocateStruct* allocatestruct) { if (FLAGS_needWriteZero) { ret = allocatestruct->fsptr->Write(fd, data, 0, - FLAGS_chunksize+FLAGS_metapagsize); + FLAGS_fileSize + FLAGS_metaPagSize); if (ret < 0) { allocatestruct->fsptr->Close(fd); *allocatestruct->checkwrong = true; @@ -186,12 +186,12 @@ int main(int argc, char** argv) { std::atomic allocateChunknum_(0); std::vector tmpvec; - if (fsptr->Mkdir(FLAGS_chunkfilepool_dir.c_str()) < 0) { - LOG(ERROR) << "mkdir failed!, " << FLAGS_chunkfilepool_dir.c_str(); + if (fsptr->Mkdir(FLAGS_filePoolDir.c_str()) < 0) { + LOG(ERROR) << "mkdir failed!, " << FLAGS_filePoolDir.c_str(); return -1; } - if (fsptr->List(FLAGS_chunkfilepool_dir.c_str(), &tmpvec) < 0) { - LOG(ERROR) << "list dir failed!, " << FLAGS_chunkfilepool_dir.c_str(); + if (fsptr->List(FLAGS_filePoolDir.c_str(), &tmpvec) < 0) { + LOG(ERROR) << "list dir failed!, " << FLAGS_filePoolDir.c_str(); return -1; } @@ -200,7 +200,7 @@ int main(int argc, char** argv) { allocateChunknum_.store(size + 1); FileSystemInfo finfo; - int r = fsptr->Statfs(FLAGS_filesystem_path, &finfo); + int r = fsptr->Statfs(FLAGS_fileSystemPath, &finfo); if (r != 0) { LOG(ERROR) << "get disk usage info failed!"; return -1; @@ -211,19 +211,19 @@ int main(int argc, char** argv) { << ", total space = " << finfo.total << ", freepercent = " << freepercent; - if (freepercent < FLAGS_allocatepercent && FLAGS_allocateByPercent) { + if (freepercent < FLAGS_allocatePercent && FLAGS_allocateByPercent) { LOG(ERROR) << "disk free space not enough."; return 0; } uint64_t preAllocateChunkNum = 0; - uint64_t preAllocateSize = FLAGS_allocatepercent * finfo.total / 100; + uint64_t preAllocateSize = FLAGS_allocatePercent * finfo.total / 100; if (FLAGS_allocateByPercent) { preAllocateChunkNum = preAllocateSize - / (FLAGS_chunksize + FLAGS_metapagsize); + / (FLAGS_fileSize + FLAGS_metaPagSize); } else { - preAllocateChunkNum = FLAGS_preallocateNum; + preAllocateChunkNum = FLAGS_preAllocateNum; } bool checkwrong = false; @@ -237,8 +237,8 @@ int main(int argc, char** argv) { allocateStruct.mtx = &mtx; allocateStruct.chunknum = threadAllocateNum; - thvec.push_back(std::move(std::thread(AllocateChunks, &allocateStruct))); - thvec.push_back(std::move(std::thread(AllocateChunks, &allocateStruct))); + thvec.push_back(std::move(std::thread(AllocateFiles, &allocateStruct))); + thvec.push_back(std::move(std::thread(AllocateFiles, &allocateStruct))); for (auto& iter : thvec) { iter.join(); @@ -249,12 +249,12 @@ int main(int argc, char** argv) { return -1; } - int ret = curve::chunkserver::ChunkfilePoolHelper::PersistEnCodeMetaInfo( + int ret = curve::chunkserver::FilePoolHelper::PersistEnCodeMetaInfo( fsptr, - FLAGS_chunksize, - FLAGS_metapagsize, - FLAGS_chunkfilepool_dir, - FLAGS_chunkfilepool_metapath); + FLAGS_fileSize, + FLAGS_metaPagSize, + FLAGS_filePoolDir, + FLAGS_filePoolMetaPath); if (ret == -1) { LOG(ERROR) << "persist chunkfile pool meta info failed!"; @@ -266,36 +266,36 @@ int main(int argc, char** argv) { uint32_t metapagesize = 0; std::string chunkfilePath; - ret = curve::chunkserver::ChunkfilePoolHelper::DecodeMetaInfoFromMetaFile( + ret = curve::chunkserver::FilePoolHelper::DecodeMetaInfoFromMetaFile( fsptr, - FLAGS_chunkfilepool_metapath, + FLAGS_filePoolMetaPath, 4096, &chunksize, &metapagesize, &chunkfilePath); if (ret == -1) { LOG(ERROR) << "chunkfile pool meta info file got something wrong!"; - fsptr->Delete(FLAGS_chunkfilepool_metapath.c_str()); + fsptr->Delete(FLAGS_filePoolMetaPath.c_str()); return -1; } bool valid = false; do { - if (chunksize != FLAGS_chunksize) { + if (chunksize != FLAGS_fileSize) { LOG(ERROR) << "chunksize meta info persistency wrong!"; break; } - if (metapagesize != FLAGS_metapagsize) { + if (metapagesize != FLAGS_metaPagSize) { LOG(ERROR) << "metapagesize meta info persistency wrong!"; break; } if (strcmp(chunkfilePath.c_str(), - FLAGS_chunkfilepool_dir.c_str()) != 0) { + FLAGS_filePoolDir.c_str()) != 0) { LOG(ERROR) << "meta info persistency failed!" << ", read chunkpath = " << chunkfilePath.c_str() - << ", real chunkpath = " << FLAGS_chunkfilepool_dir.c_str(); + << ", real chunkpath = " << FLAGS_filePoolDir.c_str(); break; } diff --git a/src/tools/metric_name.h b/src/tools/metric_name.h index 814e47f9ee..106bc13ff9 100644 --- a/src/tools/metric_name.h +++ b/src/tools/metric_name.h @@ -102,6 +102,14 @@ inline std::string GetCSLeftChunkName(const std::string& csAddr) { return metricName; } +inline std::string GetCSLeftWalSegmentName(const std::string& csAddr) { + std::string tmpName = kChunkServerMetricPrefix + + csAddr + "_walfilepool_left"; + std::string metricName; + bvar::to_underscored_name(&metricName, tmpName); + return metricName; +} + inline std::string GetOpNumMetricName(const std::string& opName) { std::string tmpName = kSechduleOpMetricpPrefix + opName + "_num"; diff --git a/src/tools/status_tool.cpp b/src/tools/status_tool.cpp index 403b162763..ba2ca8e7a5 100644 --- a/src/tools/status_tool.cpp +++ b/src/tools/status_tool.cpp @@ -31,6 +31,7 @@ DEFINE_bool(checkCSAlive, false, "if true, it will check the online state of " "chunkservers with rpc in chunkserver-list"); DEFINE_bool(listClientInRepo, true, "if true, list-client will list all clients" " include that in repo"); +DEFINE_uint64(walSegmentSize, 8388608, "wal segment size"); DECLARE_string(mdsAddr); DECLARE_string(etcdAddr); DECLARE_string(mdsDummyPort); @@ -691,7 +692,8 @@ int StatusTool::PrintChunkserverStatus(bool checkLeftSize) { uint64_t total = 0; uint64_t online = 0; uint64_t offline = 0; - std::vector leftSize; + std::vector chunkLeftSize; + std::vector walSegmentLeftSize; std::vector offlineCs; // 获取chunkserver的online状态 for (const auto& chunkserver : chunkservers) { @@ -718,7 +720,19 @@ int StatusTool::PrintChunkserverStatus(bool checkLeftSize) { continue; } uint64_t size = chunkNum * FLAGS_chunkSize; - leftSize.emplace_back(size / mds::kGB); + chunkLeftSize.emplace_back(size / mds::kGB); + // walfilepool left size + metricName = GetCSLeftWalSegmentName(csAddr); + uint64_t walSegmentNum; + res = metricClient_->GetMetricUint(csAddr, metricName, &walSegmentNum); + if (res != MetricRet::kOK) { + std::cout << "Get left wal segment size of chunkserver " << csAddr + << " fail!" << std::endl; + ret = -1; + continue; + } + size = walSegmentNum * FLAGS_walSegmentSize; + walSegmentLeftSize.emplace_back(size / mds::kGB); } // 获取offline chunkserver的恢复状态 std::vector offlineRecover; @@ -759,14 +773,16 @@ int StatusTool::PrintChunkserverStatus(bool checkLeftSize) { return ret; } - PrintCsLeftSizeStatistics(leftSize); + PrintCsLeftSizeStatistics("chunkfilepool", chunkLeftSize); + PrintCsLeftSizeStatistics("walfilepool", walSegmentLeftSize); return ret; } void StatusTool::PrintCsLeftSizeStatistics( + const std::string& name, const std::vector& leftSize) { if (leftSize.empty()) { - std::cout << "No chunkserver left chunk size found!" << std::endl; + std::cout << "No " << name << " left size found!" << std::endl; return; } uint64_t min = leftSize[0]; @@ -791,7 +807,8 @@ void StatusTool::PrintCsLeftSizeStatistics( double var = sum / leftSize.size(); std:: cout.setf(std::ios::fixed); std::cout<< std::setprecision(2); - std::cout << "left size: min = " << min << "GB" + std::cout<< name; + std::cout << " left size: min = " << min << "GB" << ", max = " << max << "GB" << ", average = " << avg << "GB" << ", range = " << range << "GB" diff --git a/src/tools/status_tool.h b/src/tools/status_tool.h index eb33246650..81b7d379ef 100644 --- a/src/tools/status_tool.h +++ b/src/tools/status_tool.h @@ -138,7 +138,8 @@ class StatusTool : public CurveTool { int PrintChunkserverStatus(bool checkLeftSize = true); int PrintClientStatus(); int ClientListCmd(); - void PrintCsLeftSizeStatistics(const std::vector& leftSize); + void PrintCsLeftSizeStatistics(const std::string& name, + const std::vector& leftSize); int PrintSnapshotCloneStatus(); /** diff --git a/test/chunkserver/BUILD b/test/chunkserver/BUILD index 708b18d0e3..3dca553c18 100644 --- a/test/chunkserver/BUILD +++ b/test/chunkserver/BUILD @@ -31,7 +31,7 @@ DEPS = [ "//test/fs:fs_mock", "//test/chunkserver:chunkserver_mock", "//test/chunkserver/datastore:datastore_mock", - "//test/chunkserver/datastore:chunkfilepool_helper", + "//test/chunkserver/datastore:filepool_helper", "@com_google_googletest//:gtest", "@com_google_googletest//:gtest_main", ] diff --git a/test/chunkserver/chunkserver_test_util.cpp b/test/chunkserver/chunkserver_test_util.cpp index b32cbc15bd..3e897f0ef2 100644 --- a/test/chunkserver/chunkserver_test_util.cpp +++ b/test/chunkserver/chunkserver_test_util.cpp @@ -61,13 +61,13 @@ std::string Exec(const char *cmd) { return result; } -std::shared_ptr InitChunkfilePool(std::shared_ptr fsptr, //NOLINT +std::shared_ptr InitFilePool(std::shared_ptr fsptr, //NOLINT int chunkfileCount, int chunkfileSize, int metaPageSize, std::string poolpath, std::string metaPath) { - auto filePoolPtr = std::make_shared(fsptr); + auto filePoolPtr = std::make_shared(fsptr); if (filePoolPtr == nullptr) { LOG(FATAL) << "allocate chunkfile pool failed!"; } @@ -85,9 +85,9 @@ std::shared_ptr InitChunkfilePool(std::shared_ptr(fs); - if (nullptr == copysetNodeOptions.chunkfilePool) { + copysetNodeOptions.chunkFilePool = std::make_shared(fs); + if (nullptr == copysetNodeOptions.chunkFilePool) { LOG(FATAL) << "new chunfilepool failed"; } - ChunkfilePoolOptions cfop; - if (false == copysetNodeOptions.chunkfilePool->Initialize(cfop)) { + FilePoolOptions cfop; + if (false == copysetNodeOptions.chunkFilePool->Initialize(cfop)) { LOG(FATAL) << "chunfilepool init failed"; } else { LOG(INFO) << "chunfilepool init success"; @@ -248,8 +248,8 @@ TestCluster::TestCluster(const std::string &clusterName, int TestCluster::StartPeer(const PeerId &peerId, const bool empty, - bool get_chunk_from_pool, - bool create_chunkfilepool) { + bool getChunkFromPool, + bool createChunkFilePool) { LOG(INFO) << "going start peer: " << peerId.to_string(); auto it = peersMap_.find(peerId.to_string()); if (it != peersMap_.end()) { @@ -293,7 +293,7 @@ int TestCluster::StartPeer(const PeerId &peerId, } else if (0 == pid) { /* 在子进程起一个 ChunkServer */ StartPeerNode(peer->options, peer->conf, - get_chunk_from_pool, create_chunkfilepool); + getChunkFromPool, createChunkFilePool); exit(0); } @@ -430,8 +430,8 @@ int TestCluster::SetElectionTimeoutMs(int electionTimeoutMs) { int TestCluster::StartPeerNode(CopysetNodeOptions options, const Configuration conf, - bool enable_getchunk_from_pool, - bool create_chunkfilepool) { + bool enableGetchunkFromPool, + bool createChunkFilePool) { /** * 用于注释,说明 cmd format */ @@ -488,11 +488,11 @@ int TestCluster::StartPeerNode(CopysetNodeOptions options, std::string getchunk_from_pool; butil::string_printf(&getchunk_from_pool, "-enable_getchunk_from_pool=%d", - enable_getchunk_from_pool); + enableGetchunkFromPool); std::string create_pool; butil::string_printf(&create_pool, "-create_chunkfilepool=%d", - create_chunkfilepool); + createChunkFilePool); std::string logic_pool_id; butil::string_printf(&logic_pool_id, "-logic_pool_id=%d", logicPoolID_); std::string copyset_id; diff --git a/test/chunkserver/chunkserver_test_util.h b/test/chunkserver/chunkserver_test_util.h index b39e328d6d..b329e069cd 100644 --- a/test/chunkserver/chunkserver_test_util.h +++ b/test/chunkserver/chunkserver_test_util.h @@ -32,7 +32,7 @@ #include #include -#include "src/chunkserver/datastore/chunkfile_pool.h" +#include "src/chunkserver/datastore/file_pool.h" #include "include/chunkserver/chunkserver_common.h" #include "src/fs/local_filesystem.h" #include "src/chunkserver/copyset_node.h" @@ -45,15 +45,15 @@ using curve::fs::LocalFileSystem; std::string Exec(const char *cmd); /** - * 当前chunkfilepool需要事先格式化,才能使用,此函数用于事先格式化chunkfilepool + * 当前FilePool需要事先格式化,才能使用,此函数用于事先格式化FilePool * @param fsptr:本文文件系统指针 * @param chunkfileSize:chunk文件的大小 * @param metaPageSize:chunk文件的meta page大小 * @param poolpath:文件池的路径,例如./chunkfilepool/ * @param metaPath:meta文件路径,例如./chunkfilepool/chunkfilepool.meta - * @return 初始化成功返回ChunkfilePool指针,否则返回null + * @return 初始化成功返回FilePool指针,否则返回null */ -std::shared_ptr InitChunkfilePool(std::shared_ptr fsptr, //NOLINT +std::shared_ptr InitFilePool(std::shared_ptr fsptr, //NOLINT int chunkfileCount, int chunkfileSize, int metaPageSize, @@ -118,14 +118,14 @@ class TestCluster { * 启动一个 Peer * @param peerId * @param empty 初始化配置是否为空 - * @param: get_chunk_from_pool是否从chunkfilepool获取chunk - * @param: create_chunkfilepool是否创建chunkfilepool,重启的情况下不需要 + * @param: get_chunk_from_pool是否从FilePool获取chunk + * @param: createFilePool是否创建FilePool,重启的情况下不需要 * @return 0:成功,-1 失败 */ int StartPeer(const PeerId &peerId, const bool empty = false, - bool get_chunk_from_pool = false, - bool create_chunkfilepool = true); + bool getChunkFrom_pool = false, + bool createFilePool = true); /** * 关闭一个 peer,使用 SIGINT * @param peerId @@ -171,7 +171,7 @@ class TestCluster { static int StartPeerNode(CopysetNodeOptions options, const Configuration conf, bool from_chunkfile_pool = false, - bool create_chunkfilepool = true); + bool createFilePool = true); public: /** diff --git a/test/chunkserver/copyset_node_manager_test.cpp b/test/chunkserver/copyset_node_manager_test.cpp index 151522ba42..ba2a5ee895 100644 --- a/test/chunkserver/copyset_node_manager_test.cpp +++ b/test/chunkserver/copyset_node_manager_test.cpp @@ -71,8 +71,8 @@ class CopysetNodeManagerTest : public ::testing::Test { LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); ASSERT_TRUE(nullptr != fs); defaultOptions_.localFileSystem = fs; - defaultOptions_.chunkfilePool = - std::make_shared(fs); + defaultOptions_.chunkFilePool = + std::make_shared(fs); defaultOptions_.trash = std::make_shared(); } diff --git a/test/chunkserver/copyset_node_test.cpp b/test/chunkserver/copyset_node_test.cpp index e9b40c7449..77a25bcb61 100644 --- a/test/chunkserver/copyset_node_test.cpp +++ b/test/chunkserver/copyset_node_test.cpp @@ -141,8 +141,8 @@ class CopysetNodeTest : public ::testing::Test { LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); ASSERT_TRUE(nullptr != fs); defaultOptions_.localFileSystem = fs; - defaultOptions_.chunkfilePool = - std::make_shared(fs); + defaultOptions_.chunkFilePool = + std::make_shared(fs); defaultOptions_.trash = std::make_shared(); } diff --git a/test/chunkserver/copyset_service_test.cpp b/test/chunkserver/copyset_service_test.cpp index f7fb690d25..080c5c32b7 100644 --- a/test/chunkserver/copyset_service_test.cpp +++ b/test/chunkserver/copyset_service_test.cpp @@ -104,8 +104,8 @@ TEST_F(CopysetServiceTest, basic) { copysetNodeOptions.raftSnapshotUri = copysetDir; copysetNodeOptions.concurrentapply = new ConcurrentApplyModule(); copysetNodeOptions.localFileSystem = fs; - copysetNodeOptions.chunkfilePool = - std::make_shared(fs); + copysetNodeOptions.chunkFilePool = + std::make_shared(fs); ASSERT_EQ(0, copysetNodeManager->Init(copysetNodeOptions)); ASSERT_EQ(0, copysetNodeManager->Run()); @@ -209,8 +209,8 @@ TEST_F(CopysetServiceTest, basic2) { copysetNodeOptions.raftSnapshotUri = copysetDir; copysetNodeOptions.concurrentapply = new ConcurrentApplyModule(); copysetNodeOptions.localFileSystem = fs; - copysetNodeOptions.chunkfilePool = - std::make_shared(fs); + copysetNodeOptions.chunkFilePool = + std::make_shared(fs); ASSERT_EQ(0, copysetNodeManager->Init(copysetNodeOptions)); ASSERT_EQ(0, copysetNodeManager->Run()); diff --git a/test/chunkserver/datastore/BUILD b/test/chunkserver/datastore/BUILD index 117d65eb69..6fcf1ce001 100644 --- a/test/chunkserver/datastore/BUILD +++ b/test/chunkserver/datastore/BUILD @@ -17,7 +17,7 @@ cc_library( name = "datastore_mock", srcs = [ - "mock_chunkfile_pool.h", + "mock_file_pool.h", "mock_datastore.h" ], deps = [ @@ -27,10 +27,10 @@ cc_library( ) cc_library( - name = "chunkfilepool_helper", + name = "filepool_helper", srcs = [ - "chunkfilepool_helper.cpp", - "chunkfilepool_helper.h", + "filepool_helper.cpp", + "filepool_helper.h", ], deps = [ "@com_google_googletest//:gtest", @@ -43,8 +43,8 @@ cc_library( cc_test( name = "curve_datastore_unittest", srcs = [ - "chunkfilepool_unittest.cpp", - "chunkfilepool_mock_unittest.cpp", + "filepool_unittest.cpp", + "filepool_mock_unittest.cpp", "datastore_mock_unittest.cpp", "datastore_unittest_main.cpp", "file_helper_unittest.cpp", @@ -62,6 +62,6 @@ cc_test( "//test/fs:fs_mock", "//test/chunkserver/datastore:datastore_mock", "//external:json", - "//test/chunkserver/datastore:chunkfilepool_helper", + "//test/chunkserver/datastore:filepool_helper", ], ) diff --git a/test/chunkserver/datastore/datastore_mock_unittest.cpp b/test/chunkserver/datastore/datastore_mock_unittest.cpp index fdee83b7ce..165d4ab507 100644 --- a/test/chunkserver/datastore/datastore_mock_unittest.cpp +++ b/test/chunkserver/datastore/datastore_mock_unittest.cpp @@ -32,7 +32,7 @@ #include "src/chunkserver/datastore/define.h" #include "src/chunkserver/datastore/filename_operator.h" #include "src/chunkserver/datastore/chunkserver_datastore.h" -#include "test/chunkserver/datastore/mock_chunkfile_pool.h" +#include "test/chunkserver/datastore/mock_file_pool.h" #include "test/fs/mock_local_filesystem.h" using curve::fs::LocalFileSystem; @@ -100,7 +100,7 @@ class CSDataStore_test : public testing::Test { public: void SetUp() { lfs_ = std::make_shared(); - fpool_ = std::make_shared(lfs_); + fpool_ = std::make_shared(lfs_); DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; @@ -184,10 +184,10 @@ class CSDataStore_test : public testing::Test { .WillRepeatedly(Return(3)); EXPECT_CALL(*lfs_, Open(chunk2Path, Truly(hasCreatFlag))) .Times(0); - // fake fpool->GetChunk() - ON_CALL(*fpool_, GetChunk(_, NotNull())) + // fake fpool->GetFile() + ON_CALL(*fpool_, GetFile(_, NotNull())) .WillByDefault(Return(0)); - EXPECT_CALL(*fpool_, RecycleChunk(_)) + EXPECT_CALL(*fpool_, RecycleFile(_)) .WillRepeatedly(Return(0)); // fake Close ON_CALL(*lfs_, Close(_)) @@ -239,7 +239,7 @@ class CSDataStore_test : public testing::Test { protected: int fdMock; std::shared_ptr lfs_; - std::shared_ptr fpool_; + std::shared_ptr fpool_; std::shared_ptr dataStore; char chunk1MetaPage[PAGE_SIZE]; char chunk2MetaPage[PAGE_SIZE]; @@ -680,10 +680,10 @@ TEST_F(CSDataStore_test, WriteChunkTest1) { offset, length, nullptr)); - // expect call chunkfile pool GetChunk + // expect call chunkfile pool GetFile EXPECT_CALL(*lfs_, FileExists(chunk3Path)) .WillOnce(Return(false)); - EXPECT_CALL(*fpool_, GetChunk(chunk3Path, NotNull())) + EXPECT_CALL(*fpool_, GetFile(chunk3Path, NotNull())) .WillOnce(Return(0)); EXPECT_CALL(*lfs_, Open(chunk3Path, _)) .Times(1) @@ -995,8 +995,8 @@ TEST_F(CSDataStore_test, WriteChunkTest7) { // snapshot not exists EXPECT_CALL(*lfs_, FileExists(snapPath)) .WillOnce(Return(false)); - // expect call chunkfile pool GetChunk - EXPECT_CALL(*fpool_, GetChunk(snapPath, NotNull())) + // expect call chunkfile pool GetFile + EXPECT_CALL(*fpool_, GetFile(snapPath, NotNull())) .WillOnce(Return(0)); EXPECT_CALL(*lfs_, Open(snapPath, _)) .WillOnce(Return(4)); @@ -1256,10 +1256,10 @@ TEST_F(CSDataStore_test, WriteChunkTest13) { // create new chunk and open it string chunk3Path = string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); - // expect call chunkfile pool GetChunk + // expect call chunkfile pool GetFile EXPECT_CALL(*lfs_, FileExists(chunk3Path)) .WillOnce(Return(false)); - EXPECT_CALL(*fpool_, GetChunk(chunk3Path, NotNull())) + EXPECT_CALL(*fpool_, GetFile(chunk3Path, NotNull())) .WillOnce(Return(0)); EXPECT_CALL(*lfs_, Open(chunk3Path, _)) .Times(1) @@ -1441,10 +1441,10 @@ TEST_F(CSDataStore_test, WriteChunkTest14) { // create new chunk and open it string chunk3Path = string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); - // expect call chunkfile pool GetChunk + // expect call chunkfile pool GetFile EXPECT_CALL(*lfs_, FileExists(chunk3Path)) .WillOnce(Return(false)); - EXPECT_CALL(*fpool_, GetChunk(chunk3Path, NotNull())) + EXPECT_CALL(*fpool_, GetFile(chunk3Path, NotNull())) .WillOnce(Return(0)); EXPECT_CALL(*lfs_, Open(chunk3Path, _)) .Times(1) @@ -1744,7 +1744,7 @@ TEST_F(CSDataStore_test, WriteChunkErrorTest1) { // getchunk failed EXPECT_CALL(*lfs_, FileExists(snapPath)) .WillOnce(Return(false)); - EXPECT_CALL(*fpool_, GetChunk(snapPath, NotNull())) + EXPECT_CALL(*fpool_, GetFile(snapPath, NotNull())) .WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, @@ -1759,10 +1759,10 @@ TEST_F(CSDataStore_test, WriteChunkErrorTest1) { ASSERT_EQ(2, info.curSn); ASSERT_EQ(0, info.snapSn); - // expect call chunkfile pool GetChunk + // expect call chunkfile pool GetFile EXPECT_CALL(*lfs_, FileExists(snapPath)) .WillOnce(Return(false)); - EXPECT_CALL(*fpool_, GetChunk(snapPath, NotNull())) + EXPECT_CALL(*fpool_, GetFile(snapPath, NotNull())) .WillOnce(Return(0)); // open snapshot failed EXPECT_CALL(*lfs_, Open(snapPath, _)) @@ -1826,10 +1826,10 @@ TEST_F(CSDataStore_test, WriteChunkErrorTest2) { // will Open snapshot file, snap sn equals 2 string snapPath = string(baseDir) + "/" + FileNameOperator::GenerateSnapshotName(id, 2); - // expect call chunk file pool GetChunk + // expect call chunk file pool GetFile EXPECT_CALL(*lfs_, FileExists(snapPath)) .WillOnce(Return(false)); - EXPECT_CALL(*fpool_, GetChunk(snapPath, NotNull())) + EXPECT_CALL(*fpool_, GetFile(snapPath, NotNull())) .WillOnce(Return(0)); EXPECT_CALL(*lfs_, Open(snapPath, _)) .WillOnce(Return(4)); @@ -1888,10 +1888,10 @@ TEST_F(CSDataStore_test, WriteChunkErrorTest3) { // will Open snapshot file, snap sn equals 2 string snapPath = string(baseDir) + "/" + FileNameOperator::GenerateSnapshotName(id, 2); - // expect call chunk file pool GetChunk + // expect call chunk file pool GetFile EXPECT_CALL(*lfs_, FileExists(snapPath)) .WillOnce(Return(false)); - EXPECT_CALL(*fpool_, GetChunk(snapPath, NotNull())) + EXPECT_CALL(*fpool_, GetFile(snapPath, NotNull())) .WillOnce(Return(0)); EXPECT_CALL(*lfs_, Open(snapPath, _)) .WillOnce(Return(4)); @@ -2020,10 +2020,10 @@ TEST_F(CSDataStore_test, WriteChunkErrorTest4) { // will Open snapshot file, snap sn equals 2 string snapPath = string(baseDir) + "/" + FileNameOperator::GenerateSnapshotName(id, 2); - // expect call chunk file pool GetChunk + // expect call chunk file pool GetFile EXPECT_CALL(*lfs_, FileExists(snapPath)) .WillOnce(Return(false)); - EXPECT_CALL(*fpool_, GetChunk(snapPath, NotNull())) + EXPECT_CALL(*fpool_, GetFile(snapPath, NotNull())) .WillOnce(Return(0)); EXPECT_CALL(*lfs_, Open(snapPath, _)) .WillOnce(Return(4)); @@ -2102,10 +2102,10 @@ TEST_F(CSDataStore_test, WriteChunkErrorTest5) { string chunk3Path = string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); - // expect call chunk file pool GetChunk + // expect call chunk file pool GetFile EXPECT_CALL(*lfs_, FileExists(chunk3Path)) .WillOnce(Return(false)); - EXPECT_CALL(*fpool_, GetChunk(chunk3Path, NotNull())) + EXPECT_CALL(*fpool_, GetFile(chunk3Path, NotNull())) .WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, dataStore->WriteChunk(id, @@ -2118,7 +2118,7 @@ TEST_F(CSDataStore_test, WriteChunkErrorTest5) { // getchunk success EXPECT_CALL(*lfs_, FileExists(chunk3Path)) .WillOnce(Return(false)); - EXPECT_CALL(*fpool_, GetChunk(chunk3Path, NotNull())) + EXPECT_CALL(*fpool_, GetFile(chunk3Path, NotNull())) .WillOnce(Return(0)); // set open chunk file failed EXPECT_CALL(*lfs_, Open(chunk3Path, _)) @@ -2243,10 +2243,10 @@ TEST_F(CSDataStore_test, WriteChunkErrorTest6) { // create new chunk and open it string chunk3Path = string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); - // expect call chunkfile pool GetChunk + // expect call chunkfile pool GetFile EXPECT_CALL(*lfs_, FileExists(chunk3Path)) .WillOnce(Return(false)); - EXPECT_CALL(*fpool_, GetChunk(chunk3Path, NotNull())) + EXPECT_CALL(*fpool_, GetFile(chunk3Path, NotNull())) .WillOnce(Return(0)); EXPECT_CALL(*lfs_, Open(chunk3Path, _)) .Times(1) @@ -2466,10 +2466,10 @@ TEST_F(CSDataStore_test, ReadChunkTest4) { // create new chunk and open it string chunk3Path = string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); - // expect call chunkfile pool GetChunk + // expect call chunkfile pool GetFile EXPECT_CALL(*lfs_, FileExists(chunk3Path)) .WillOnce(Return(false)); - EXPECT_CALL(*fpool_, GetChunk(chunk3Path, NotNull())) + EXPECT_CALL(*fpool_, GetFile(chunk3Path, NotNull())) .WillOnce(Return(0)); EXPECT_CALL(*lfs_, Open(chunk3Path, _)) .Times(1) @@ -2946,8 +2946,8 @@ TEST_F(CSDataStore_test, DeleteChunkTest3) { // chunk will be closed EXPECT_CALL(*lfs_, Close(3)) .Times(1); - // expect to call chunkfilepool RecycleChunk - EXPECT_CALL(*fpool_, RecycleChunk(chunk2Path)) + // expect to call FilePool RecycleFile + EXPECT_CALL(*fpool_, RecycleFile(chunk2Path)) .WillOnce(Return(0)); EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteChunk(id, sn)); @@ -2979,8 +2979,8 @@ TEST_F(CSDataStore_test, DeleteChunkTest4) { // chunk will be closed EXPECT_CALL(*lfs_, Close(3)) .Times(0); - // expect to call chunkfilepool RecycleChunk - EXPECT_CALL(*fpool_, RecycleChunk(chunk2Path)) + // expect to call FilePool RecycleFile + EXPECT_CALL(*fpool_, RecycleFile(chunk2Path)) .Times(0); EXPECT_EQ(CSErrorCode::BackwardRequestError, dataStore->DeleteChunk(id, 1)); @@ -2991,8 +2991,8 @@ TEST_F(CSDataStore_test, DeleteChunkTest4) { // chunk will be closed EXPECT_CALL(*lfs_, Close(3)) .Times(1); - // expect to call chunkfilepool RecycleChunk - EXPECT_CALL(*fpool_, RecycleChunk(chunk2Path)) + // expect to call FilePool RecycleFile + EXPECT_CALL(*fpool_, RecycleFile(chunk2Path)) .WillOnce(Return(0)); EXPECT_EQ(CSErrorCode::Success, dataStore->DeleteChunk(id, 3)); @@ -3019,8 +3019,8 @@ TEST_F(CSDataStore_test, DeleteChunkErrorTest1) { // chunk will be closed EXPECT_CALL(*lfs_, Close(3)) .Times(1); - // expect to call chunkfilepool RecycleChunk - EXPECT_CALL(*fpool_, RecycleChunk(chunk2Path)) + // expect to call FilePool RecycleFile + EXPECT_CALL(*fpool_, RecycleFile(chunk2Path)) .WillOnce(Return(-1)); EXPECT_EQ(CSErrorCode::InternalError, dataStore->DeleteChunk(id, sn)); @@ -3088,8 +3088,8 @@ TEST_F(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest2) { // snapshot will be closed EXPECT_CALL(*lfs_, Close(2)) .Times(1); - // expect to call chunkfilepool RecycleChunk - EXPECT_CALL(*fpool_, RecycleChunk(chunk1snap1Path)) + // expect to call FilePool RecycleFile + EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)) .Times(1); // chunk's metapage should not be updated EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, PAGE_SIZE)) @@ -3143,8 +3143,8 @@ TEST_F(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest3) { // snapshot will be closed EXPECT_CALL(*lfs_, Close(2)) .Times(1); - // expect to call chunkfilepool RecycleChunk - EXPECT_CALL(*fpool_, RecycleChunk(chunk1snap1Path)) + // expect to call FilePool RecycleFile + EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)) .Times(1); // chunk's metapage should not be updated EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, PAGE_SIZE)) @@ -3176,8 +3176,8 @@ TEST_F(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest4) { // snapshot will be closed EXPECT_CALL(*lfs_, Close(2)) .Times(1); - // expect to call chunkfilepool RecycleChunk - EXPECT_CALL(*fpool_, RecycleChunk(chunk1snap1Path)) + // expect to call FilePool RecycleFile + EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)) .Times(1); // chunk's metapage will be updated EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, PAGE_SIZE)) @@ -3271,10 +3271,10 @@ TEST_F(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest7) { // create new chunk and open it string chunk3Path = string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); - // expect call chunkfile pool GetChunk + // expect call chunkfile pool GetFile EXPECT_CALL(*lfs_, FileExists(chunk3Path)) .WillOnce(Return(false)); - EXPECT_CALL(*fpool_, GetChunk(chunk3Path, NotNull())) + EXPECT_CALL(*fpool_, GetFile(chunk3Path, NotNull())) .WillOnce(Return(0)); EXPECT_CALL(*lfs_, Open(chunk3Path, _)) .Times(1) @@ -3347,8 +3347,8 @@ TEST_F(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest8) { // snapshot will not be closed EXPECT_CALL(*lfs_, Close(2)) .Times(0); - // expect to call chunkfilepool RecycleChunk - EXPECT_CALL(*fpool_, RecycleChunk(chunk1snap1Path)) + // expect to call FilePool RecycleFile + EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)) .Times(0); // chunk's metapage should be updated EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, PAGE_SIZE)) @@ -3398,8 +3398,8 @@ TEST_F(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest9) { // snapshot will not be closed EXPECT_CALL(*lfs_, Close(2)) .Times(0); - // expect to call chunkfilepool RecycleChunk - EXPECT_CALL(*fpool_, RecycleChunk(chunk1snap1Path)) + // expect to call FilePool RecycleFile + EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)) .Times(0); // chunk's metapage should not be updated EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, PAGE_SIZE)) @@ -3467,8 +3467,8 @@ TEST_F(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnErrorTest2) { // snapshot will be closed EXPECT_CALL(*lfs_, Close(2)) .Times(1); - // expect to call chunkfilepool RecycleChunk - EXPECT_CALL(*fpool_, RecycleChunk(chunk1snap1Path)) + // expect to call FilePool RecycleFile + EXPECT_CALL(*fpool_, RecycleFile(chunk1snap1Path)) .WillOnce(Return(-1)); // chunk's metapage will be updated EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, PAGE_SIZE)) @@ -3543,10 +3543,10 @@ TEST_F(CSDataStore_test, CreateCloneChunkTest) { // create new chunk and open it string chunk3Path = string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); - // expect call chunkfile pool GetChunk + // expect call chunkfile pool GetFile EXPECT_CALL(*lfs_, FileExists(chunk3Path)) .WillOnce(Return(false)); - EXPECT_CALL(*fpool_, GetChunk(chunk3Path, NotNull())) + EXPECT_CALL(*fpool_, GetFile(chunk3Path, NotNull())) .WillOnce(Return(0)); EXPECT_CALL(*lfs_, Open(chunk3Path, _)) .Times(1) @@ -3693,10 +3693,10 @@ TEST_F(CSDataStore_test, CreateCloneChunkErrorTest) { // create new chunk and open it string chunk3Path = string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); - // expect call chunk file pool GetChunk + // expect call chunk file pool GetFile EXPECT_CALL(*lfs_, FileExists(chunk3Path)) .WillOnce(Return(false)); - EXPECT_CALL(*fpool_, GetChunk(chunk3Path, NotNull())) + EXPECT_CALL(*fpool_, GetFile(chunk3Path, NotNull())) .WillOnce(Return(-UT_ERRNO)); EXPECT_EQ(CSErrorCode::InternalError, dataStore->CreateCloneChunk(id, @@ -3756,10 +3756,10 @@ TEST_F(CSDataStore_test, PasteChunkTest1) { // create new chunk and open it string chunk3Path = string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); - // expect call chunkfile pool GetChunk + // expect call chunkfile pool GetFile EXPECT_CALL(*lfs_, FileExists(chunk3Path)) .WillOnce(Return(false)); - EXPECT_CALL(*fpool_, GetChunk(chunk3Path, NotNull())) + EXPECT_CALL(*fpool_, GetFile(chunk3Path, NotNull())) .WillOnce(Return(0)); EXPECT_CALL(*lfs_, Open(chunk3Path, _)) .Times(1) @@ -3977,10 +3977,10 @@ TEST_F(CSDataStore_test, PasteChunkErrorTest1) { // create new chunk and open it string chunk3Path = string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); - // expect call chunkfile pool GetChunk + // expect call chunkfile pool GetFile EXPECT_CALL(*lfs_, FileExists(chunk3Path)) .WillOnce(Return(false)); - EXPECT_CALL(*fpool_, GetChunk(chunk3Path, NotNull())) + EXPECT_CALL(*fpool_, GetFile(chunk3Path, NotNull())) .WillOnce(Return(0)); EXPECT_CALL(*lfs_, Open(chunk3Path, _)) .Times(1) diff --git a/test/chunkserver/datastore/chunkfilepool_helper.cpp b/test/chunkserver/datastore/filepool_helper.cpp similarity index 97% rename from test/chunkserver/datastore/chunkfilepool_helper.cpp rename to test/chunkserver/datastore/filepool_helper.cpp index 2142170d5a..3c7c2f1361 100644 --- a/test/chunkserver/datastore/chunkfilepool_helper.cpp +++ b/test/chunkserver/datastore/filepool_helper.cpp @@ -20,7 +20,7 @@ * Author: tongguangxun */ -#include "test/chunkserver/datastore/chunkfilepool_helper.h" +#include "test/chunkserver/datastore/filepool_helper.h" void allocateChunk(std::shared_ptr fsptr, uint32_t num, diff --git a/test/chunkserver/datastore/chunkfilepool_helper.h b/test/chunkserver/datastore/filepool_helper.h similarity index 86% rename from test/chunkserver/datastore/chunkfilepool_helper.h rename to test/chunkserver/datastore/filepool_helper.h index 63b6e9b68b..e2968c5042 100644 --- a/test/chunkserver/datastore/chunkfilepool_helper.h +++ b/test/chunkserver/datastore/filepool_helper.h @@ -19,8 +19,8 @@ * File Created: Tuesday, 29th January 2019 11:44:59 am * Author: tongguangxun */ -#ifndef TEST_CHUNKSERVER_DATASTORE_CHUNKFILEPOOL_HELPER_H_ -#define TEST_CHUNKSERVER_DATASTORE_CHUNKFILEPOOL_HELPER_H_ +#ifndef TEST_CHUNKSERVER_DATASTORE_FILEPOOL_HELPER_H_ +#define TEST_CHUNKSERVER_DATASTORE_FILEPOOL_HELPER_H_ #include #include @@ -40,4 +40,4 @@ void allocateChunk(std::shared_ptr fsptr, std::string poolDir, uint32_t chunkSize); -#endif // TEST_CHUNKSERVER_DATASTORE_CHUNKFILEPOOL_HELPER_H_ +#endif // TEST_CHUNKSERVER_DATASTORE_FILEPOOL_HELPER_H_ diff --git a/test/chunkserver/datastore/chunkfilepool_mock_unittest.cpp b/test/chunkserver/datastore/filepool_mock_unittest.cpp similarity index 85% rename from test/chunkserver/datastore/chunkfilepool_mock_unittest.cpp rename to test/chunkserver/datastore/filepool_mock_unittest.cpp index 80af5275c6..95f6691083 100644 --- a/test/chunkserver/datastore/chunkfilepool_mock_unittest.cpp +++ b/test/chunkserver/datastore/filepool_mock_unittest.cpp @@ -30,7 +30,7 @@ #include "include/chunkserver/chunkserver_common.h" #include "src/common/crc32.h" #include "src/common/curve_define.h" -#include "src/chunkserver/datastore/chunkfile_pool.h" +#include "src/chunkserver/datastore/file_pool.h" #include "test/fs/mock_local_filesystem.h" using ::testing::_; @@ -48,7 +48,7 @@ using ::testing::SetArgPointee; using ::testing::SetArrayArgument; using curve::fs::MockLocalFileSystem; -using curve::common::kChunkFilePoolMaigic; +using curve::common::kFilePoolMaigic; namespace curve { namespace chunkserver { @@ -76,18 +76,18 @@ class CSChunkfilePoolMockTest : public testing::Test { Json::Value GenerateMetaJson() { // 正常的meta文件的json格式 - uint32_t crcsize = sizeof(kChunkFilePoolMaigic) + + uint32_t crcsize = sizeof(kFilePoolMaigic) + sizeof(CHUNK_SIZE) + sizeof(PAGE_SIZE) + poolDir.size(); char* crcbuf = new char[crcsize]; - ::memcpy(crcbuf, kChunkFilePoolMaigic, - sizeof(kChunkFilePoolMaigic)); - ::memcpy(crcbuf + sizeof(kChunkFilePoolMaigic), + ::memcpy(crcbuf, kFilePoolMaigic, + sizeof(kFilePoolMaigic)); + ::memcpy(crcbuf + sizeof(kFilePoolMaigic), &CHUNK_SIZE, sizeof(uint32_t)); - ::memcpy(crcbuf + sizeof(uint32_t) + sizeof(kChunkFilePoolMaigic), + ::memcpy(crcbuf + sizeof(uint32_t) + sizeof(kFilePoolMaigic), &PAGE_SIZE, sizeof(uint32_t)); - ::memcpy(crcbuf + 2 * sizeof(uint32_t) + sizeof(kChunkFilePoolMaigic), + ::memcpy(crcbuf + 2 * sizeof(uint32_t) + sizeof(kFilePoolMaigic), poolDir.c_str(), poolDir.size()); uint32_t crc = ::curve::common::CRC32(crcbuf, crcsize); delete[] crcbuf; @@ -115,10 +115,10 @@ class CSChunkfilePoolMockTest : public testing::Test { .Times(1); } - void FakePool(ChunkfilePool* pool, - const ChunkfilePoolOptions& options, + void FakePool(FilePool* pool, + const FilePoolOptions& options, uint32_t fileNum) { - if (options.getChunkFromPool) { + if (options.getFileFromPool) { FakeMetaFile(); std::vector fileNames; struct stat fileInfo; @@ -167,7 +167,7 @@ TEST_F(CSChunkfilePoolMockTest, PersistEnCodeMetaInfoTest) { EXPECT_CALL(*lfs_, Close(_)) .Times(0); ASSERT_EQ(-1, - ChunkfilePoolHelper::PersistEnCodeMetaInfo(lfs_, + FilePoolHelper::PersistEnCodeMetaInfo(lfs_, CHUNK_SIZE, PAGE_SIZE, poolDir, @@ -182,7 +182,7 @@ TEST_F(CSChunkfilePoolMockTest, PersistEnCodeMetaInfoTest) { EXPECT_CALL(*lfs_, Close(1)) .Times(1); ASSERT_EQ(-1, - ChunkfilePoolHelper::PersistEnCodeMetaInfo(lfs_, + FilePoolHelper::PersistEnCodeMetaInfo(lfs_, CHUNK_SIZE, PAGE_SIZE, poolDir, @@ -197,7 +197,7 @@ TEST_F(CSChunkfilePoolMockTest, PersistEnCodeMetaInfoTest) { EXPECT_CALL(*lfs_, Close(1)) .Times(1); ASSERT_EQ(0, - ChunkfilePoolHelper::PersistEnCodeMetaInfo(lfs_, + FilePoolHelper::PersistEnCodeMetaInfo(lfs_, CHUNK_SIZE, PAGE_SIZE, poolDir, @@ -219,7 +219,7 @@ TEST_F(CSChunkfilePoolMockTest, DecodeMetaInfoFromMetaFileTest) { EXPECT_CALL(*lfs_, Close(_)) .Times(0); ASSERT_EQ(-1, - ChunkfilePoolHelper::DecodeMetaInfoFromMetaFile(lfs_, + FilePoolHelper::DecodeMetaInfoFromMetaFile(lfs_, poolMetaPath, metaFileSize, &chunksize, @@ -235,7 +235,7 @@ TEST_F(CSChunkfilePoolMockTest, DecodeMetaInfoFromMetaFileTest) { EXPECT_CALL(*lfs_, Close(1)) .Times(1); ASSERT_EQ(-1, - ChunkfilePoolHelper::DecodeMetaInfoFromMetaFile(lfs_, + FilePoolHelper::DecodeMetaInfoFromMetaFile(lfs_, poolMetaPath, metaFileSize, &chunksize, @@ -253,7 +253,7 @@ TEST_F(CSChunkfilePoolMockTest, DecodeMetaInfoFromMetaFileTest) { EXPECT_CALL(*lfs_, Close(1)) .Times(1); ASSERT_EQ(-1, - ChunkfilePoolHelper::DecodeMetaInfoFromMetaFile(lfs_, + FilePoolHelper::DecodeMetaInfoFromMetaFile(lfs_, poolMetaPath, metaFileSize, &chunksize, @@ -276,7 +276,7 @@ TEST_F(CSChunkfilePoolMockTest, DecodeMetaInfoFromMetaFileTest) { EXPECT_CALL(*lfs_, Close(1)) .Times(1); ASSERT_EQ(-1, - ChunkfilePoolHelper::DecodeMetaInfoFromMetaFile(lfs_, + FilePoolHelper::DecodeMetaInfoFromMetaFile(lfs_, poolMetaPath, metaFileSize, &chunksize, @@ -299,14 +299,14 @@ TEST_F(CSChunkfilePoolMockTest, DecodeMetaInfoFromMetaFileTest) { EXPECT_CALL(*lfs_, Close(1)) .Times(1); ASSERT_EQ(-1, - ChunkfilePoolHelper::DecodeMetaInfoFromMetaFile(lfs_, + FilePoolHelper::DecodeMetaInfoFromMetaFile(lfs_, poolMetaPath, metaFileSize, &chunksize, &metapagesize, &chunkfilePath)); } - // 解析Json格式成功,kChunkFilePoolPath为空 + // 解析Json格式成功,kFilePoolPath为空 { char buf[metaFileSize] = {0}; Json::Value root = GenerateMetaJson(); @@ -322,7 +322,7 @@ TEST_F(CSChunkfilePoolMockTest, DecodeMetaInfoFromMetaFileTest) { EXPECT_CALL(*lfs_, Close(1)) .Times(1); ASSERT_EQ(-1, - ChunkfilePoolHelper::DecodeMetaInfoFromMetaFile(lfs_, + FilePoolHelper::DecodeMetaInfoFromMetaFile(lfs_, poolMetaPath, metaFileSize, &chunksize, @@ -345,7 +345,7 @@ TEST_F(CSChunkfilePoolMockTest, DecodeMetaInfoFromMetaFileTest) { EXPECT_CALL(*lfs_, Close(1)) .Times(1); ASSERT_EQ(-1, - ChunkfilePoolHelper::DecodeMetaInfoFromMetaFile(lfs_, + FilePoolHelper::DecodeMetaInfoFromMetaFile(lfs_, poolMetaPath, metaFileSize, &chunksize, @@ -368,7 +368,7 @@ TEST_F(CSChunkfilePoolMockTest, DecodeMetaInfoFromMetaFileTest) { EXPECT_CALL(*lfs_, Close(1)) .Times(1); ASSERT_EQ(-1, - ChunkfilePoolHelper::DecodeMetaInfoFromMetaFile(lfs_, + FilePoolHelper::DecodeMetaInfoFromMetaFile(lfs_, poolMetaPath, metaFileSize, &chunksize, @@ -390,7 +390,7 @@ TEST_F(CSChunkfilePoolMockTest, DecodeMetaInfoFromMetaFileTest) { EXPECT_CALL(*lfs_, Close(1)) .Times(1); ASSERT_EQ(0, - ChunkfilePoolHelper::DecodeMetaInfoFromMetaFile(lfs_, + FilePoolHelper::DecodeMetaInfoFromMetaFile(lfs_, poolMetaPath, metaFileSize, &chunksize, @@ -401,22 +401,22 @@ TEST_F(CSChunkfilePoolMockTest, DecodeMetaInfoFromMetaFileTest) { TEST_F(CSChunkfilePoolMockTest, InitializeTest) { // 初始化options - ChunkfilePoolOptions options; - options.getChunkFromPool = true; - memcpy(options.chunkFilePoolDir, poolDir.c_str(), poolDir.size()); - options.chunkSize = CHUNK_SIZE; + FilePoolOptions options; + options.getFileFromPool = true; + memcpy(options.filePoolDir, poolDir.c_str(), poolDir.size()); + options.fileSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; memcpy(options.metaPath, poolMetaPath.c_str(), poolMetaPath.size()); - options.cpMetaFileSize = metaFileSize; + options.metaFileSize = metaFileSize; options.retryTimes = 3; - /****************getChunkFromPool为true**************/ + /****************getFileFromPool为true**************/ // checkvalid时失败 { // DecodeMetaInfoFromMetaFile在上面已经单独测试过了 // 这里选上面中的一组异常用例来检验即可 // 解析json格式失败 - ChunkfilePool pool(lfs_); + FilePool pool(lfs_); char buf[metaFileSize] = {0}; EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) .WillOnce(Return(1)); @@ -427,9 +427,9 @@ TEST_F(CSChunkfilePoolMockTest, InitializeTest) { .Times(1); ASSERT_EQ(false, pool.Initialize(options)); } - // getChunkFromPool为true,checkvalid成功,当前目录不存在 + // getFileFromPool为true,checkvalid成功,当前目录不存在 { - ChunkfilePool pool(lfs_); + FilePool pool(lfs_); FakeMetaFile(); EXPECT_CALL(*lfs_, DirExists(_)) .WillOnce(Return(false)); @@ -437,7 +437,7 @@ TEST_F(CSChunkfilePoolMockTest, InitializeTest) { } // 当前目录存在,list目录失败 { - ChunkfilePool pool(lfs_); + FilePool pool(lfs_); FakeMetaFile(); EXPECT_CALL(*lfs_, DirExists(_)) .WillOnce(Return(true)); @@ -447,7 +447,7 @@ TEST_F(CSChunkfilePoolMockTest, InitializeTest) { } // list目录成功,文件名中包含非数字字符 { - ChunkfilePool pool(lfs_); + FilePool pool(lfs_); FakeMetaFile(); EXPECT_CALL(*lfs_, DirExists(_)) .WillOnce(Return(true)); @@ -460,7 +460,7 @@ TEST_F(CSChunkfilePoolMockTest, InitializeTest) { } // list目录成功,目录中包含非普通文件类型的对象 { - ChunkfilePool pool(lfs_); + FilePool pool(lfs_); FakeMetaFile(); EXPECT_CALL(*lfs_, DirExists(_)) .WillOnce(Return(true)); @@ -475,7 +475,7 @@ TEST_F(CSChunkfilePoolMockTest, InitializeTest) { } // list目录成功,open文件时失败 { - ChunkfilePool pool(lfs_); + FilePool pool(lfs_); FakeMetaFile(); EXPECT_CALL(*lfs_, DirExists(_)) .WillOnce(Return(true)); @@ -492,7 +492,7 @@ TEST_F(CSChunkfilePoolMockTest, InitializeTest) { } // stat文件信息时失败 { - ChunkfilePool pool(lfs_); + FilePool pool(lfs_); FakeMetaFile(); EXPECT_CALL(*lfs_, DirExists(_)) .WillOnce(Return(true)); @@ -513,7 +513,7 @@ TEST_F(CSChunkfilePoolMockTest, InitializeTest) { } // stat文件信息成功,文件大小不匹配 { - ChunkfilePool pool(lfs_); + FilePool pool(lfs_); FakeMetaFile(); EXPECT_CALL(*lfs_, DirExists(_)) .WillOnce(Return(true)); @@ -538,7 +538,7 @@ TEST_F(CSChunkfilePoolMockTest, InitializeTest) { } // 文件信息匹配 { - ChunkfilePool pool(lfs_); + FilePool pool(lfs_); FakeMetaFile(); EXPECT_CALL(*lfs_, DirExists(_)) .WillOnce(Return(true)); @@ -563,11 +563,11 @@ TEST_F(CSChunkfilePoolMockTest, InitializeTest) { ASSERT_EQ(1, pool.Size()); } - /****************getChunkFromPool为false**************/ - options.getChunkFromPool = false; + /****************getFileFromPool为false**************/ + options.getFileFromPool = false; // 当前目录不存在,创建目录失败 { - ChunkfilePool pool(lfs_); + FilePool pool(lfs_); EXPECT_CALL(*lfs_, DirExists(_)) .WillOnce(Return(false)); EXPECT_CALL(*lfs_, Mkdir(_)) @@ -576,7 +576,7 @@ TEST_F(CSChunkfilePoolMockTest, InitializeTest) { } // 当前目录不存在,创建目录成功 { - ChunkfilePool pool(lfs_); + FilePool pool(lfs_); EXPECT_CALL(*lfs_, DirExists(_)) .WillOnce(Return(false)); EXPECT_CALL(*lfs_, Mkdir(_)) @@ -585,49 +585,49 @@ TEST_F(CSChunkfilePoolMockTest, InitializeTest) { } // 当前目录存在 { - ChunkfilePool pool(lfs_); + FilePool pool(lfs_); EXPECT_CALL(*lfs_, DirExists(_)) .WillOnce(Return(true)); ASSERT_EQ(true, pool.Initialize(options)); } } -TEST_F(CSChunkfilePoolMockTest, GetChunkTest) { +TEST_F(CSChunkfilePoolMockTest, GetFileTest) { // 初始化options - ChunkfilePoolOptions options; - options.getChunkFromPool = true; - memcpy(options.chunkFilePoolDir, poolDir.c_str(), poolDir.size()); - options.chunkSize = CHUNK_SIZE; + FilePoolOptions options; + options.getFileFromPool = true; + memcpy(options.filePoolDir, poolDir.c_str(), poolDir.size()); + options.fileSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; memcpy(options.metaPath, poolMetaPath.c_str(), poolMetaPath.size()); - options.cpMetaFileSize = metaFileSize; + options.metaFileSize = metaFileSize; int retryTimes = 3; options.retryTimes = retryTimes; char metapage[PAGE_SIZE] = {0}; - /****************getChunkFromPool为true**************/ + /****************getFileFromPool为true**************/ // 没有剩余chunk的情况 { - ChunkfilePool pool(lfs_); + FilePool pool(lfs_); FakePool(&pool, options, 0); - ASSERT_EQ(-1, pool.GetChunk(targetPath, metapage)); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); } // 存在chunk,open时失败 { - ChunkfilePool pool(lfs_); + FilePool pool(lfs_); FakePool(&pool, options, 10); EXPECT_CALL(*lfs_, Open(_, _)) .Times(retryTimes) .WillRepeatedly(Return(-1)); EXPECT_CALL(*lfs_, Close(_)) .Times(0); - ASSERT_EQ(-1, pool.GetChunk(targetPath, metapage)); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); ASSERT_EQ(10 - retryTimes, pool.Size()); } // 存在chunk,write时失败 { - ChunkfilePool pool(lfs_); + FilePool pool(lfs_); FakePool(&pool, options, 10); EXPECT_CALL(*lfs_, Open(_, _)) .Times(retryTimes) @@ -637,12 +637,12 @@ TEST_F(CSChunkfilePoolMockTest, GetChunkTest) { .WillRepeatedly(Return(-1)); EXPECT_CALL(*lfs_, Close(1)) .Times(retryTimes); - ASSERT_EQ(-1, pool.GetChunk(targetPath, metapage)); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); ASSERT_EQ(10 - retryTimes, pool.Size()); } // 存在chunk,fsync时失败 { - ChunkfilePool pool(lfs_); + FilePool pool(lfs_); FakePool(&pool, options, 10); EXPECT_CALL(*lfs_, Open(_, _)) .Times(retryTimes) @@ -655,12 +655,12 @@ TEST_F(CSChunkfilePoolMockTest, GetChunkTest) { .WillRepeatedly(Return(-1)); EXPECT_CALL(*lfs_, Close(1)) .Times(retryTimes); - ASSERT_EQ(-1, pool.GetChunk(targetPath, metapage)); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); ASSERT_EQ(10 - retryTimes, pool.Size()); } // 存在chunk,close时失败 { - ChunkfilePool pool(lfs_); + FilePool pool(lfs_); FakePool(&pool, options, 10); EXPECT_CALL(*lfs_, Open(_, _)) .Times(retryTimes) @@ -674,12 +674,12 @@ TEST_F(CSChunkfilePoolMockTest, GetChunkTest) { EXPECT_CALL(*lfs_, Close(1)) .Times(retryTimes) .WillRepeatedly(Return(-1)); - ASSERT_EQ(-1, pool.GetChunk(targetPath, metapage)); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); ASSERT_EQ(10 - retryTimes, pool.Size()); } // 存在chunk,rename时返回EEXIST错误 { - ChunkfilePool pool(lfs_); + FilePool pool(lfs_); FakePool(&pool, options, 10); EXPECT_CALL(*lfs_, Open(_, _)) .WillOnce(Return(1)); @@ -691,12 +691,12 @@ TEST_F(CSChunkfilePoolMockTest, GetChunkTest) { .WillOnce(Return(0)); EXPECT_CALL(*lfs_, Rename(_, _, _)) .WillOnce(Return(-EEXIST)); - ASSERT_EQ(-EEXIST, pool.GetChunk(targetPath, metapage)); + ASSERT_EQ(-EEXIST, pool.GetFile(targetPath, metapage)); ASSERT_EQ(9, pool.Size()); } // 存在chunk,rename时返回非EEXIST错误 { - ChunkfilePool pool(lfs_); + FilePool pool(lfs_); FakePool(&pool, options, 10); EXPECT_CALL(*lfs_, Open(_, _)) .Times(retryTimes) @@ -713,12 +713,12 @@ TEST_F(CSChunkfilePoolMockTest, GetChunkTest) { EXPECT_CALL(*lfs_, Rename(_, _, _)) .Times(retryTimes) .WillRepeatedly(Return(-1)); - ASSERT_EQ(-1, pool.GetChunk(targetPath, metapage)); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); ASSERT_EQ(10 - retryTimes, pool.Size()); } // 存在chunk,rename成功 { - ChunkfilePool pool(lfs_); + FilePool pool(lfs_); FakePool(&pool, options, 10); EXPECT_CALL(*lfs_, Open(_, _)) .WillOnce(Return(1)); @@ -730,26 +730,26 @@ TEST_F(CSChunkfilePoolMockTest, GetChunkTest) { .WillOnce(Return(0)); EXPECT_CALL(*lfs_, Rename(_, _, _)) .WillOnce(Return(0)); - ASSERT_EQ(0, pool.GetChunk(targetPath, metapage)); + ASSERT_EQ(0, pool.GetFile(targetPath, metapage)); ASSERT_EQ(9, pool.Size()); } - options.getChunkFromPool = false; - /****************getChunkFromPool为false**************/ + options.getFileFromPool = false; + /****************getFileFromPool为false**************/ // open 时失败 { - ChunkfilePool pool(lfs_); + FilePool pool(lfs_); FakePool(&pool, options, 0); EXPECT_CALL(*lfs_, Open(_, _)) .Times(retryTimes) .WillRepeatedly(Return(-1)); EXPECT_CALL(*lfs_, Close(1)) .Times(0); - ASSERT_EQ(-1, pool.GetChunk(targetPath, metapage)); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); } // fallocate 时失败 { - ChunkfilePool pool(lfs_); + FilePool pool(lfs_); FakePool(&pool, options, 0); EXPECT_CALL(*lfs_, Open(_, _)) .Times(retryTimes) @@ -759,11 +759,11 @@ TEST_F(CSChunkfilePoolMockTest, GetChunkTest) { .WillRepeatedly(Return(-1)); EXPECT_CALL(*lfs_, Close(1)) .Times(retryTimes); - ASSERT_EQ(-1, pool.GetChunk(targetPath, metapage)); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); } // write 时失败 { - ChunkfilePool pool(lfs_); + FilePool pool(lfs_); FakePool(&pool, options, 0); EXPECT_CALL(*lfs_, Open(_, _)) .Times(retryTimes) @@ -777,11 +777,11 @@ TEST_F(CSChunkfilePoolMockTest, GetChunkTest) { .WillRepeatedly(Return(-1)); EXPECT_CALL(*lfs_, Close(1)) .Times(retryTimes); - ASSERT_EQ(-1, pool.GetChunk(targetPath, metapage)); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); } // fsync 时失败 { - ChunkfilePool pool(lfs_); + FilePool pool(lfs_); FakePool(&pool, options, 0); EXPECT_CALL(*lfs_, Open(_, _)) .Times(retryTimes) @@ -798,11 +798,11 @@ TEST_F(CSChunkfilePoolMockTest, GetChunkTest) { .WillRepeatedly(Return(-1)); EXPECT_CALL(*lfs_, Close(1)) .Times(retryTimes); - ASSERT_EQ(-1, pool.GetChunk(targetPath, metapage)); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); } // close 时失败 { - ChunkfilePool pool(lfs_); + FilePool pool(lfs_); FakePool(&pool, options, 0); EXPECT_CALL(*lfs_, Open(_, _)) .Times(retryTimes) @@ -820,46 +820,46 @@ TEST_F(CSChunkfilePoolMockTest, GetChunkTest) { EXPECT_CALL(*lfs_, Close(1)) .Times(retryTimes) .WillRepeatedly(Return(-1)); - ASSERT_EQ(-1, pool.GetChunk(targetPath, metapage)); + ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); } } -TEST_F(CSChunkfilePoolMockTest, RecycleChunkTest) { +TEST_F(CSChunkfilePoolMockTest, RecycleFileTest) { // 初始化options - ChunkfilePoolOptions options; - options.getChunkFromPool = true; - memcpy(options.chunkFilePoolDir, poolDir.c_str(), poolDir.size()); - options.chunkSize = CHUNK_SIZE; + FilePoolOptions options; + options.getFileFromPool = true; + memcpy(options.filePoolDir, poolDir.c_str(), poolDir.size()); + options.fileSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; memcpy(options.metaPath, poolMetaPath.c_str(), poolMetaPath.size()); - options.cpMetaFileSize = metaFileSize; + options.metaFileSize = metaFileSize; int retryTimes = 3; options.retryTimes = retryTimes; - /****************getChunkFromPool为false**************/ - options.getChunkFromPool = false; + /****************getFileFromPool为false**************/ + options.getFileFromPool = false; // delete文件时失败 { - ChunkfilePool pool(lfs_); + FilePool pool(lfs_); FakePool(&pool, options, 0); EXPECT_CALL(*lfs_, Delete(filePath1)) .WillOnce(Return(-1)); - ASSERT_EQ(-1, pool.RecycleChunk(filePath1)); + ASSERT_EQ(-1, pool.RecycleFile(filePath1)); } // delete文件成功 { - ChunkfilePool pool(lfs_); + FilePool pool(lfs_); FakePool(&pool, options, 0); EXPECT_CALL(*lfs_, Delete(filePath1)) .WillOnce(Return(0)); - ASSERT_EQ(0, pool.RecycleChunk(filePath1)); + ASSERT_EQ(0, pool.RecycleFile(filePath1)); } - /****************getChunkFromPool为true**************/ - options.getChunkFromPool = true; + /****************getFileFromPool为true**************/ + options.getFileFromPool = true; // open失败 { - ChunkfilePool pool(lfs_); + FilePool pool(lfs_); FakePool(&pool, options, 0); EXPECT_CALL(*lfs_, Open(targetPath, _)) @@ -868,7 +868,7 @@ TEST_F(CSChunkfilePoolMockTest, RecycleChunkTest) { EXPECT_CALL(*lfs_, Delete(targetPath)) .WillOnce(Return(0)); // Delete 成功就返回0 - ASSERT_EQ(0, pool.RecycleChunk(targetPath)); + ASSERT_EQ(0, pool.RecycleFile(targetPath)); EXPECT_CALL(*lfs_, Open(targetPath, _)) .WillOnce(Return(-1)); @@ -876,12 +876,12 @@ TEST_F(CSChunkfilePoolMockTest, RecycleChunkTest) { EXPECT_CALL(*lfs_, Delete(targetPath)) .WillOnce(Return(-1)); // Delete 失败就返回错误码 - ASSERT_EQ(-1, pool.RecycleChunk(targetPath)); + ASSERT_EQ(-1, pool.RecycleFile(targetPath)); } // Fstat失败 { - ChunkfilePool pool(lfs_); + FilePool pool(lfs_); FakePool(&pool, options, 0); EXPECT_CALL(*lfs_, Open(targetPath, _)) @@ -894,7 +894,7 @@ TEST_F(CSChunkfilePoolMockTest, RecycleChunkTest) { EXPECT_CALL(*lfs_, Delete(targetPath)) .WillOnce(Return(0)); // Delete 成功就返回0 - ASSERT_EQ(0, pool.RecycleChunk(targetPath)); + ASSERT_EQ(0, pool.RecycleFile(targetPath)); EXPECT_CALL(*lfs_, Open(targetPath, _)) .WillOnce(Return(1)); @@ -906,12 +906,12 @@ TEST_F(CSChunkfilePoolMockTest, RecycleChunkTest) { EXPECT_CALL(*lfs_, Delete(targetPath)) .WillOnce(Return(-1)); // Delete 失败就返回错误码 - ASSERT_EQ(-1, pool.RecycleChunk(targetPath)); + ASSERT_EQ(-1, pool.RecycleFile(targetPath)); } // Fstat成功,大小不匹配 { - ChunkfilePool pool(lfs_); + FilePool pool(lfs_); FakePool(&pool, options, 0); struct stat fileInfo; fileInfo.st_size = CHUNK_SIZE; @@ -927,7 +927,7 @@ TEST_F(CSChunkfilePoolMockTest, RecycleChunkTest) { EXPECT_CALL(*lfs_, Delete(targetPath)) .WillOnce(Return(0)); // Delete 成功就返回0 - ASSERT_EQ(0, pool.RecycleChunk(targetPath)); + ASSERT_EQ(0, pool.RecycleFile(targetPath)); EXPECT_CALL(*lfs_, Open(targetPath, _)) .WillOnce(Return(1)); @@ -940,12 +940,12 @@ TEST_F(CSChunkfilePoolMockTest, RecycleChunkTest) { EXPECT_CALL(*lfs_, Delete(targetPath)) .WillOnce(Return(-1)); // Delete 失败就返回错误码 - ASSERT_EQ(-1, pool.RecycleChunk(targetPath)); + ASSERT_EQ(-1, pool.RecycleFile(targetPath)); } // Fstat信息匹配,rename失败 { - ChunkfilePool pool(lfs_); + FilePool pool(lfs_); FakePool(&pool, options, 0); struct stat fileInfo; fileInfo.st_size = CHUNK_SIZE + PAGE_SIZE; @@ -959,13 +959,13 @@ TEST_F(CSChunkfilePoolMockTest, RecycleChunkTest) { .Times(1); EXPECT_CALL(*lfs_, Rename(_, _, _)) .WillOnce(Return(-1)); - ASSERT_EQ(-1, pool.RecycleChunk(targetPath)); + ASSERT_EQ(-1, pool.RecycleFile(targetPath)); ASSERT_EQ(0, pool.Size()); } // Fstat信息匹配,rename成功 { - ChunkfilePool pool(lfs_); + FilePool pool(lfs_); FakePool(&pool, options, 0); struct stat fileInfo; fileInfo.st_size = CHUNK_SIZE + PAGE_SIZE; @@ -979,7 +979,7 @@ TEST_F(CSChunkfilePoolMockTest, RecycleChunkTest) { .Times(1); EXPECT_CALL(*lfs_, Rename(_, _, _)) .WillOnce(Return(0)); - ASSERT_EQ(0, pool.RecycleChunk(targetPath)); + ASSERT_EQ(0, pool.RecycleFile(targetPath)); ASSERT_EQ(1, pool.Size()); } } diff --git a/test/chunkserver/datastore/chunkfilepool_unittest.cpp b/test/chunkserver/datastore/filepool_unittest.cpp similarity index 52% rename from test/chunkserver/datastore/chunkfilepool_unittest.cpp rename to test/chunkserver/datastore/filepool_unittest.cpp index 4f3d329fbc..384b83183c 100644 --- a/test/chunkserver/datastore/chunkfilepool_unittest.cpp +++ b/test/chunkserver/datastore/filepool_unittest.cpp @@ -31,7 +31,7 @@ #include "src/common/crc32.h" #include "src/common/curve_define.h" #include "src/fs/local_filesystem.h" -#include "src/chunkserver/datastore/chunkfile_pool.h" +#include "src/chunkserver/datastore/file_pool.h" #include "test/fs/mock_local_filesystem.h" using ::testing::_; @@ -50,28 +50,28 @@ using ::testing::ReturnArg; using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::chunkserver::ChunkfilePool; -using curve::chunkserver::ChunkfilePoolOptions; -using curve::chunkserver::ChunkFilePoolState_t; -using curve::common::kChunkFilePoolMaigic; -using curve::chunkserver::ChunkfilePoolHelper; +using curve::chunkserver::FilePool; +using curve::chunkserver::FilePoolOptions; +using curve::chunkserver::FilePoolState_t; +using curve::common::kFilePoolMaigic; +using curve::chunkserver::FilePoolHelper; -class CSChunkfilePool_test : public testing::Test { +class CSFilePool_test : public testing::Test { public: void SetUp() { fsptr = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); - ChunkfilepoolPtr_ = std::make_shared(fsptr); - if (ChunkfilepoolPtr_ == nullptr) { + chunkFilePoolPtr_ = std::make_shared(fsptr); + if (chunkFilePoolPtr_ == nullptr) { LOG(FATAL) << "allocate chunkfile pool failed!"; } int count = 1; fsptr->Mkdir("./cspooltest/"); - std::string dirname = "./cspooltest/chunkfilepool"; + std::string dirname = "./cspooltest/filePool"; while (count < 51) { - std::string filename = "./cspooltest/chunkfilepool/" + std::string filename = "./cspooltest/filePool/" + std::to_string(count); - fsptr->Mkdir("./cspooltest/chunkfilepool"); + fsptr->Mkdir("./cspooltest/filePool"); int fd = fsptr->Open(filename.c_str(), O_RDWR | O_CREAT); char data[8192]; memset(data, 'a', 8192); @@ -83,19 +83,19 @@ class CSChunkfilePool_test : public testing::Test { uint32_t chunksize = 4096; uint32_t metapagesize = 4096; - int ret = ChunkfilePoolHelper::PersistEnCodeMetaInfo( + int ret = FilePoolHelper::PersistEnCodeMetaInfo( fsptr, chunksize, metapagesize, dirname, - "./cspooltest/chunkfilepool.meta"); + "./cspooltest/filePool.meta"); if (ret == -1) { LOG(ERROR) << "persist chunkfile pool meta info failed!"; return; } - int fd = fsptr->Open("./cspooltest/chunkfilepool.meta2", + int fd = fsptr->Open("./cspooltest/filePool.meta2", O_RDWR | O_CREAT); if (fd < 0) { return; @@ -114,22 +114,22 @@ class CSChunkfilePool_test : public testing::Test { void TearDown() { std::vector filename; - fsptr->List("./cspooltest/chunkfilepool", &filename); + fsptr->List("./cspooltest/filePool", &filename); for (auto iter : filename) { - auto path = "./cspooltest/chunkfilepool/" + iter; + auto path = "./cspooltest/filePool/" + iter; int err = fsptr->Delete(path.c_str()); if (err) { LOG(INFO) << "unlink file failed!, errno = " << errno; } } - fsptr->Delete("./cspooltest/chunkfilepool"); - fsptr->Delete("./cspooltest/chunkfilepool.meta"); - fsptr->Delete("./cspooltest/chunkfilepool.meta2"); + fsptr->Delete("./cspooltest/filePool"); + fsptr->Delete("./cspooltest/filePool.meta"); + fsptr->Delete("./cspooltest/filePool.meta2"); fsptr->Delete("./cspooltest"); - ChunkfilepoolPtr_->UnInitialize(); + chunkFilePoolPtr_->UnInitialize(); } - std::shared_ptr ChunkfilepoolPtr_; + std::shared_ptr chunkFilePoolPtr_; std::shared_ptr fsptr; }; @@ -147,70 +147,70 @@ bool CheckFileOpenOrNot(const std::string& filename) { return out.find("No such file or directory") != out.npos; } -TEST_F(CSChunkfilePool_test, InitializeTest) { - std::string chunkfilepool = "./cspooltest/chunkfilepool.meta"; +TEST_F(CSFilePool_test, InitializeTest) { + std::string filePool = "./cspooltest/filePool.meta"; - ChunkfilePoolOptions cfop; - cfop.chunkSize = 4096; + FilePoolOptions cfop; + cfop.fileSize = 4096; cfop.metaPageSize = 4096; - memcpy(cfop.metaPath, chunkfilepool.c_str(), chunkfilepool.size()); + memcpy(cfop.metaPath, filePool.c_str(), filePool.size()); // initialize - ASSERT_TRUE(ChunkfilepoolPtr_->Initialize(cfop)); - ASSERT_EQ(50, ChunkfilepoolPtr_->Size()); - // 初始化阶段会扫描chunkfilepool内的所有文件,在扫描结束之后需要关闭这些文件 + ASSERT_TRUE(chunkFilePoolPtr_->Initialize(cfop)); + ASSERT_EQ(50, chunkFilePoolPtr_->Size()); + // 初始化阶段会扫描FilePool内的所有文件,在扫描结束之后需要关闭这些文件 // 防止过多的文件描述符被占用 - ASSERT_FALSE(CheckFileOpenOrNot("./cspooltest/chunkfilepool/1")); - ASSERT_FALSE(CheckFileOpenOrNot("./cspooltest/chunkfilepool/2")); - cfop.chunkSize = 8192; + ASSERT_FALSE(CheckFileOpenOrNot("./cspooltest/filePool/1")); + ASSERT_FALSE(CheckFileOpenOrNot("./cspooltest/filePool/2")); + cfop.fileSize = 8192; cfop.metaPageSize = 4096; // test meta content wrong - ASSERT_FALSE(ChunkfilepoolPtr_->Initialize(cfop)); - cfop.chunkSize = 8192; + ASSERT_FALSE(chunkFilePoolPtr_->Initialize(cfop)); + cfop.fileSize = 8192; cfop.metaPageSize = 4096; - ASSERT_FALSE(ChunkfilepoolPtr_->Initialize(cfop)); + ASSERT_FALSE(chunkFilePoolPtr_->Initialize(cfop)); // invalid file name - std::string filename = "./cspooltest/chunkfilepool/a"; - cfop.chunkSize = 4096; + std::string filename = "./cspooltest/filePool/a"; + cfop.fileSize = 4096; cfop.metaPageSize = 4096; int fd = fsptr->Open(filename.c_str(), O_RDWR | O_CREAT); char data[8192]; memset(data, 'a', 8192); fsptr->Write(fd, data, 0, 8192); fsptr->Close(fd); - ASSERT_FALSE(ChunkfilepoolPtr_->Initialize(cfop)); + ASSERT_FALSE(chunkFilePoolPtr_->Initialize(cfop)); // test meta file wrong - chunkfilepool = "./cspooltest/chunkfilepool.meta2"; - cfop.chunkSize = 4096; + filePool = "./cspooltest/filePool.meta2"; + cfop.fileSize = 4096; cfop.metaPageSize = 4096; - memcpy(cfop.metaPath, chunkfilepool.c_str(), chunkfilepool.size()); - ASSERT_FALSE(ChunkfilepoolPtr_->Initialize(cfop)); + memcpy(cfop.metaPath, filePool.c_str(), filePool.size()); + ASSERT_FALSE(chunkFilePoolPtr_->Initialize(cfop)); // test meta file not exist - chunkfilepool = "./cspooltest/chunkfilepool.meta3"; - cfop.chunkSize = 4096; + filePool = "./cspooltest/FilePool.meta3"; + cfop.fileSize = 4096; cfop.metaPageSize = 4096; - memcpy(cfop.metaPath, chunkfilepool.c_str(), chunkfilepool.size()); - ASSERT_FALSE(ChunkfilepoolPtr_->Initialize(cfop)); + memcpy(cfop.metaPath, filePool.c_str(), filePool.size()); + ASSERT_FALSE(chunkFilePoolPtr_->Initialize(cfop)); - fsptr->Delete("./cspooltest/chunkfilepool/a"); - fsptr->Delete("./cspooltest/chunkfilepool.meta3"); + fsptr->Delete("./cspooltest/filePool/a"); + fsptr->Delete("./cspooltest/filePool.meta3"); } -TEST_F(CSChunkfilePool_test, GetChunkTest) { - std::string chunkfilepool = "./cspooltest/chunkfilepool.meta"; - ChunkfilePoolOptions cfop; - cfop.chunkSize = 4096; +TEST_F(CSFilePool_test, GetFileTest) { + std::string filePool = "./cspooltest/filePool.meta"; + FilePoolOptions cfop; + cfop.fileSize = 4096; cfop.metaPageSize = 4096; - memcpy(cfop.metaPath, chunkfilepool.c_str(), chunkfilepool.size()); + memcpy(cfop.metaPath, filePool.c_str(), filePool.size()); // test get chunk success char metapage[4096]; memset(metapage, '1', 4096); - ASSERT_EQ(-1, ChunkfilepoolPtr_->GetChunk("./new_exit", metapage)); + ASSERT_EQ(-1, chunkFilePoolPtr_->GetFile("./new_exit", metapage)); ASSERT_EQ(-2, fsptr->Delete("./new_exit")); - ChunkfilepoolPtr_->Initialize(cfop); - ASSERT_EQ(50, ChunkfilepoolPtr_->Size()); - ASSERT_EQ(0, ChunkfilepoolPtr_->GetChunk("./new1", metapage)); - ASSERT_EQ(49, ChunkfilepoolPtr_->Size()); + chunkFilePoolPtr_->Initialize(cfop); + ASSERT_EQ(50, chunkFilePoolPtr_->Size()); + ASSERT_EQ(0, chunkFilePoolPtr_->GetFile("./new1", metapage)); + ASSERT_EQ(49, chunkFilePoolPtr_->Size()); ASSERT_TRUE(fsptr->FileExists("./new1")); int fd = fsptr->Open("./new1", O_RDWR); char data[4096]; @@ -224,82 +224,82 @@ TEST_F(CSChunkfilePool_test, GetChunkTest) { ASSERT_EQ(0, fsptr->Delete("./new1")); // test get chunk success - ASSERT_EQ(0, ChunkfilepoolPtr_->GetChunk("./new2", metapage)); + ASSERT_EQ(0, chunkFilePoolPtr_->GetFile("./new2", metapage)); ASSERT_TRUE(fsptr->FileExists("./new2")); - ASSERT_NE(49, ChunkfilepoolPtr_->Size()); + ASSERT_NE(49, chunkFilePoolPtr_->Size()); ASSERT_EQ(0, fsptr->Delete("./new2")); } -TEST_F(CSChunkfilePool_test, RecycleChunkTest) { - std::string chunkfilepool = "./cspooltest/chunkfilepool.meta"; - ChunkfilePoolOptions cfop; - cfop.chunkSize = 4096; +TEST_F(CSFilePool_test, RecycleFileTest) { + std::string filePool = "./cspooltest/filePool.meta"; + FilePoolOptions cfop; + cfop.fileSize = 4096; cfop.metaPageSize = 4096; - memcpy(cfop.metaPath, chunkfilepool.c_str(), chunkfilepool.size()); + memcpy(cfop.metaPath, filePool.c_str(), filePool.size()); - ChunkfilepoolPtr_->Initialize(cfop); - ChunkFilePoolState_t currentStat = ChunkfilepoolPtr_->GetState(); + chunkFilePoolPtr_->Initialize(cfop); + FilePoolState_t currentStat = chunkFilePoolPtr_->GetState(); ASSERT_EQ(50, currentStat.preallocatedChunksLeft); - ASSERT_EQ(50, ChunkfilepoolPtr_->Size()); + ASSERT_EQ(50, chunkFilePoolPtr_->Size()); char metapage[4096]; memset(metapage, '1', 4096); - ASSERT_EQ(0, ChunkfilepoolPtr_->GetChunk("./new1", metapage)); + ASSERT_EQ(0, chunkFilePoolPtr_->GetFile("./new1", metapage)); ASSERT_TRUE(fsptr->FileExists("./new1")); - ASSERT_EQ(49, ChunkfilepoolPtr_->Size()); + ASSERT_EQ(49, chunkFilePoolPtr_->Size()); - currentStat = ChunkfilepoolPtr_->GetState(); + currentStat = chunkFilePoolPtr_->GetState(); ASSERT_EQ(49, currentStat.preallocatedChunksLeft); - ChunkfilepoolPtr_->RecycleChunk("./new1"); - ASSERT_EQ(50, ChunkfilepoolPtr_->Size()); + chunkFilePoolPtr_->RecycleFile("./new1"); + ASSERT_EQ(50, chunkFilePoolPtr_->Size()); - currentStat = ChunkfilepoolPtr_->GetState(); + currentStat = chunkFilePoolPtr_->GetState(); ASSERT_EQ(50, currentStat.preallocatedChunksLeft); ASSERT_FALSE(fsptr->FileExists("./new1")); - ASSERT_TRUE(fsptr->FileExists("./cspooltest/chunkfilepool/4")); - ASSERT_EQ(0, fsptr->Delete("./cspooltest/chunkfilepool/4")); + ASSERT_TRUE(fsptr->FileExists("./cspooltest/filePool/4")); + ASSERT_EQ(0, fsptr->Delete("./cspooltest/filePool/4")); } -TEST(CSChunkfilePool, GetChunkDirectlyTest) { - std::shared_ptr ChunkfilepoolPtr_; +TEST(CSFilePool, GetFileDirectlyTest) { + std::shared_ptr chunkFilePoolPtr_; std::shared_ptr fsptr; fsptr = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); // create chunkfile in chunkfile pool dir - // if chunkfile pool 的getchunkfrompool开关关掉了,那么 - // chunkfilepool的size是一直为0,不会从pool目录中找 - std::string filename = "./cspooltest/chunkfilepool/1000"; - fsptr->Mkdir("./cspooltest/chunkfilepool"); + // if chunkfile pool 的getFileFromPool开关关掉了,那么 + // FilePool的size是一直为0,不会从pool目录中找 + std::string filename = "./cspooltest/filePool/1000"; + fsptr->Mkdir("./cspooltest/filePool"); int fd = fsptr->Open(filename.c_str(), O_RDWR | O_CREAT); char data[8192]; memset(data, 'a', 8192); ASSERT_EQ(8192, fsptr->Write(fd, data, 0, 8192)); fsptr->Close(fd); - ASSERT_TRUE(fsptr->FileExists("./cspooltest/chunkfilepool/1000")); + ASSERT_TRUE(fsptr->FileExists("./cspooltest/filePool/1000")); - ChunkfilePoolOptions cspopt; - cspopt.getChunkFromPool = false; - cspopt.chunkSize = 16 * 1024; + FilePoolOptions cspopt; + cspopt.getFileFromPool = false; + cspopt.fileSize = 16 * 1024; cspopt.metaPageSize = 4 * 1024; - cspopt.cpMetaFileSize = 4 * 1024; + cspopt.metaFileSize = 4 * 1024; cspopt.retryTimes = 5; - strcpy(cspopt.chunkFilePoolDir, "./cspooltest/chunkfilepool"); // NOLINT + strcpy(cspopt.filePoolDir, "./cspooltest/filePool"); // NOLINT - ChunkfilepoolPtr_ = std::make_shared(fsptr); - if (ChunkfilepoolPtr_ == nullptr) { + chunkFilePoolPtr_ = std::make_shared(fsptr); + if (chunkFilePoolPtr_ == nullptr) { LOG(FATAL) << "allocate chunkfile pool failed!"; } - ASSERT_TRUE(ChunkfilepoolPtr_->Initialize(cspopt)); - ASSERT_EQ(0, ChunkfilepoolPtr_->Size()); + ASSERT_TRUE(chunkFilePoolPtr_->Initialize(cspopt)); + ASSERT_EQ(0, chunkFilePoolPtr_->Size()); // 测试获取chunk,chunkfile pool size不变一直为0 char metapage[4096]; memset(metapage, '1', 4096); - ASSERT_EQ(0, ChunkfilepoolPtr_->GetChunk("./new1", metapage)); - ASSERT_EQ(0, ChunkfilepoolPtr_->Size()); + ASSERT_EQ(0, chunkFilePoolPtr_->GetFile("./new1", metapage)); + ASSERT_EQ(0, chunkFilePoolPtr_->Size()); ASSERT_TRUE(fsptr->FileExists("./new1")); fd = fsptr->Open("./new1", O_RDWR); @@ -311,14 +311,14 @@ TEST(CSChunkfilePool, GetChunkDirectlyTest) { ASSERT_EQ(buf[i], '1'); } - // 测试回收chunk,文件被删除,chunkfilepool Size不受影响 - ChunkfilepoolPtr_->RecycleChunk("./new1"); - ASSERT_EQ(0, ChunkfilepoolPtr_->Size()); + // 测试回收chunk,文件被删除,FilePool Size不受影响 + chunkFilePoolPtr_->RecycleFile("./new1"); + ASSERT_EQ(0, chunkFilePoolPtr_->Size()); ASSERT_FALSE(fsptr->FileExists("./new1")); // 删除测试文件及目录 ASSERT_EQ(0, fsptr->Close(fd)); - ASSERT_EQ(0, fsptr->Delete("./cspooltest/chunkfilepool/1000")); - ASSERT_EQ(0, fsptr->Delete("./cspooltest/chunkfilepool")); - ChunkfilepoolPtr_->UnInitialize(); + ASSERT_EQ(0, fsptr->Delete("./cspooltest/filePool/1000")); + ASSERT_EQ(0, fsptr->Delete("./cspooltest/filePool")); + chunkFilePoolPtr_->UnInitialize(); } diff --git a/test/chunkserver/datastore/mock_chunkfile_pool.h b/test/chunkserver/datastore/mock_file_pool.h similarity index 61% rename from test/chunkserver/datastore/mock_chunkfile_pool.h rename to test/chunkserver/datastore/mock_file_pool.h index ab92d9e84a..5ddf23cc4b 100644 --- a/test/chunkserver/datastore/mock_chunkfile_pool.h +++ b/test/chunkserver/datastore/mock_file_pool.h @@ -20,31 +20,32 @@ * Author: yangyaokai */ -#ifndef TEST_CHUNKSERVER_DATASTORE_MOCK_CHUNKFILE_POOL_H_ -#define TEST_CHUNKSERVER_DATASTORE_MOCK_CHUNKFILE_POOL_H_ +#ifndef TEST_CHUNKSERVER_DATASTORE_MOCK_FILE_POOL_H_ +#define TEST_CHUNKSERVER_DATASTORE_MOCK_FILE_POOL_H_ #include #include #include -#include "src/chunkserver/datastore/chunkfile_pool.h" +#include "src/chunkserver/datastore/file_pool.h" namespace curve { namespace chunkserver { -class MockChunkfilePool : public ChunkfilePool { +class MockFilePool : public FilePool { public: - explicit MockChunkfilePool(std::shared_ptr lfs) - : ChunkfilePool(lfs) {} - ~MockChunkfilePool() {} - MOCK_METHOD1(Initialize, bool(ChunkfilePoolOptions)); - MOCK_METHOD2(GetChunk, int(const std::string&, char*)); - MOCK_METHOD1(RecycleChunk, int(const std::string& chunkpath)); + explicit MockFilePool(std::shared_ptr lfs) + : FilePool(lfs) {} + ~MockFilePool() {} + MOCK_METHOD1(Initialize, bool(FilePoolOptions)); + MOCK_METHOD2(GetFile, int(const std::string&, char*)); + MOCK_METHOD1(RecycleFile, int(const std::string& chunkpath)); MOCK_METHOD0(UnInitialize, void()); MOCK_METHOD0(Size, size_t()); + MOCK_METHOD0(GetFilePoolOpt, FilePoolOptions()); }; } // namespace chunkserver } // namespace curve -#endif // TEST_CHUNKSERVER_DATASTORE_MOCK_CHUNKFILE_POOL_H_ +#endif // TEST_CHUNKSERVER_DATASTORE_MOCK_FILE_POOL_H_ diff --git a/test/chunkserver/fake_datastore.h b/test/chunkserver/fake_datastore.h index 070b823105..75b5c80330 100644 --- a/test/chunkserver/fake_datastore.h +++ b/test/chunkserver/fake_datastore.h @@ -43,7 +43,7 @@ class FakeCSDataStore : public CSDataStore { FakeCSDataStore(DataStoreOptions options, std::shared_ptr fs) : CSDataStore(fs, - std::make_shared(fs), + std::make_shared(fs), options) { chunk_ = new (std::nothrow) char[options.chunkSize]; ::memset(chunk_, 0, options.chunkSize); @@ -228,14 +228,14 @@ class FakeCSDataStore : public CSDataStore { uint32_t chunkSize_; }; -class FakeChunkfilePool : public ChunkfilePool { +class FakeFilePool : public FilePool { public: - explicit FakeChunkfilePool(std::shared_ptr lfs) - : ChunkfilePool(lfs) {} - ~FakeChunkfilePool() {} + explicit FakeFilePool(std::shared_ptr lfs) + : FilePool(lfs) {} + ~FakeFilePool() {} - bool Initialize(const ChunkfilePoolOptions &cfop) { - LOG(INFO) << "FakeChunkfilePool init success"; + bool Initialize(const FilePoolOptions &cfop) { + LOG(INFO) << "FakeFilePool init success"; return true; } int GetChunk(const std::string &chunkpath, char *metapage) { return 0; } diff --git a/test/chunkserver/heartbeat_test_main.cpp b/test/chunkserver/heartbeat_test_main.cpp index 55d6a0b6ed..7b034c5dc8 100644 --- a/test/chunkserver/heartbeat_test_main.cpp +++ b/test/chunkserver/heartbeat_test_main.cpp @@ -31,7 +31,7 @@ #include "test/chunkserver/heartbeat_test_common.h" #include "test/integration/common/config_generator.h" -static char *param[3][12] = { +static char *param[3][15] = { { "heartbeat_test", "-chunkServerIp=127.0.0.1", @@ -40,9 +40,12 @@ static char *param[3][12] = { "-chunkServerMetaUri=local://./0/chunkserver.dat", "-copySetUri=local://./0/copysets", "-raftSnapshotUri=curve://./0/copysets", + "-raftLogUri=curve://./0/copysets", "-recycleUri=local://./0/recycler", "-chunkFilePoolDir=./0/chunkfilepool/", "-chunkFilePoolMetaPath=./0/chunkfilepool.meta", + "-walFilePoolDir=./0/walfilepool/", + "-walFilePoolMetaPath=./0/walfilepool.meta", "-conf=./8200/chunkserver.conf", "-graceful_quit_on_sigterm", }, @@ -54,9 +57,12 @@ static char *param[3][12] = { "-chunkServerMetaUri=local://./1/chunkserver.dat", "-copySetUri=local://./1/copysets", "-raftSnapshotUri=curve://./1/copysets", + "-raftLogUri=curve://./1/copysets", "-recycleUri=local://./1/recycler", "-chunkFilePoolDir=./1/chunkfilepool/", "-chunkFilePoolMetaPath=./1/chunkfilepool.meta", + "-walFilePoolDir=./1/walfilepool/", + "-walFilePoolMetaPath=./1/walfilepool.meta", "-conf=./8201/chunkserver.conf", "-graceful_quit_on_sigterm", }, @@ -68,9 +74,12 @@ static char *param[3][12] = { "-chunkServerMetaUri=local://./2/chunkserver.dat", "-copySetUri=local://./2/copysets", "-raftSnapshotUri=curve://./2/copysets", + "-raftLogUri=curve://./2/copysets", "-recycleUri=local://./2/recycler", "-chunkFilePoolDir=./2/chunkfilepool/", "-chunkFilePoolMetaPath=./2/chunkfilepool.meta", + "-walFilePoolDir=./2/walfilepool/", + "-walFilePoolMetaPath=./2/walfilepool.meta", "-conf=./8202/chunkserver.conf", "-graceful_quit_on_sigterm", }, diff --git a/test/chunkserver/metrics_test.cpp b/test/chunkserver/metrics_test.cpp index fd085ba9d3..667aabbbb4 100644 --- a/test/chunkserver/metrics_test.cpp +++ b/test/chunkserver/metrics_test.cpp @@ -34,9 +34,9 @@ #include "src/chunkserver/chunkserver_metrics.h" #include "src/chunkserver/trash.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/datastore/chunkfile_pool.h" +#include "src/chunkserver/datastore/file_pool.h" #include "src/fs/local_filesystem.h" -#include "test/chunkserver/datastore/chunkfilepool_helper.h" +#include "test/chunkserver/datastore/filepool_helper.h" namespace curve { namespace chunkserver { @@ -63,28 +63,28 @@ class CSMetricTest : public ::testing::Test { CSMetricTest() {} ~CSMetricTest() {} - void InitChunkFilePool() { - ChunkfilePoolHelper::PersistEnCodeMetaInfo(lfs_, + void InitFilePool() { + FilePoolHelper::PersistEnCodeMetaInfo(lfs_, CHUNK_SIZE, PAGE_SIZE, poolDir, poolMetaPath); - ChunkfilePoolOptions cfop; - cfop.chunkSize = CHUNK_SIZE; + FilePoolOptions cfop; + cfop.fileSize = CHUNK_SIZE; cfop.metaPageSize = PAGE_SIZE; memcpy(cfop.metaPath, poolMetaPath.c_str(), poolMetaPath.size()); if (lfs_->DirExists(poolDir)) lfs_->Delete(poolDir); allocateChunk(lfs_, chunkNum, poolDir, CHUNK_SIZE); - ASSERT_TRUE(chunkfilePool_->Initialize(cfop)); - ASSERT_EQ(chunkNum, chunkfilePool_->Size()); + ASSERT_TRUE(chunkFilePool_->Initialize(cfop)); + ASSERT_EQ(chunkNum, chunkFilePool_->Size()); } void InitTrash() { TrashOptions ops; ops.localFileSystem = lfs_; - ops.chunkfilePool = chunkfilePool_; + ops.chunkFilePool = chunkFilePool_; ops.trashPath = "local://./trash_csmetric"; ops.expiredAfterSec = 1; ops.scanPeriodSec = 1; @@ -104,7 +104,7 @@ class CSMetricTest : public ::testing::Test { copysetNodeOptions.raftSnapshotUri = copysetDir; copysetNodeOptions.concurrentapply = new ConcurrentApplyModule(); copysetNodeOptions.localFileSystem = lfs_; - copysetNodeOptions.chunkfilePool = chunkfilePool_; + copysetNodeOptions.chunkFilePool = chunkFilePool_; copysetNodeOptions.maxChunkSize = CHUNK_SIZE; copysetNodeOptions.trash = trash_; ASSERT_EQ(0, copysetMgr_->Init(copysetNodeOptions)); @@ -124,7 +124,7 @@ class CSMetricTest : public ::testing::Test { metricOptions.collectMetric = true; metric_ = ChunkServerMetric::GetInstance(); metric_->Init(metricOptions); - metric_->MonitorChunkFilePool(chunkfilePool_.get()); + metric_->MonitorChunkFilePool(chunkFilePool_.get()); metric_->MonitorTrash(trash_.get()); } @@ -146,10 +146,10 @@ class CSMetricTest : public ::testing::Test { ASSERT_NE(lfs_, nullptr); trash_ = std::make_shared(); ASSERT_NE(trash_, nullptr); - chunkfilePool_ = std::make_shared(lfs_); - ASSERT_NE(chunkfilePool_, nullptr); + chunkFilePool_ = std::make_shared(lfs_); + ASSERT_NE(chunkFilePool_, nullptr); - InitChunkFilePool(); + InitFilePool(); InitTrash(); InitCopysetManager(); InitChunkServerMetric(); @@ -163,7 +163,7 @@ class CSMetricTest : public ::testing::Test { lfs_->Delete(trashPath); lfs_->Delete(poolMetaPath); lfs_->Delete(confFile_); - chunkfilePool_->UnInitialize(); + chunkFilePool_->UnInitialize(); ASSERT_EQ(0, copysetMgr_->Fini()); ASSERT_EQ(0, server_.Stop(0)); ASSERT_EQ(0, server_.Join()); @@ -173,7 +173,7 @@ class CSMetricTest : public ::testing::Test { brpc::Server server_; std::shared_ptr trash_; CopysetNodeManager* copysetMgr_; - std::shared_ptr chunkfilePool_; + std::shared_ptr chunkFilePool_; std::shared_ptr lfs_; ChunkServerMetric* metric_; std::string confFile_; @@ -442,7 +442,7 @@ TEST_F(CSMetricTest, OnResponseTest) { } TEST_F(CSMetricTest, CountTest) { - // 初始状态下,没有copyset,chunkfilepool中有chunkNum个chunk + // 初始状态下,没有copyset,FilePool中有chunkNum个chunk ASSERT_EQ(0, metric_->GetCopysetCount()); ASSERT_EQ(10, metric_->GetChunkLeftCount()); @@ -583,7 +583,7 @@ TEST_F(CSMetricTest, OnOffTest) { { metricOptions.collectMetric = false; ASSERT_EQ(0, metric_->Init(metricOptions)); - metric_->MonitorChunkFilePool(chunkfilePool_.get()); + metric_->MonitorChunkFilePool(chunkFilePool_.get()); common::Configuration conf; conf.SetConfigPath(confFile_); int ret = conf.LoadConfig(); diff --git a/test/chunkserver/raftlog/BUILD b/test/chunkserver/raftlog/BUILD new file mode 100644 index 0000000000..aae327a7a5 --- /dev/null +++ b/test/chunkserver/raftlog/BUILD @@ -0,0 +1,32 @@ +# +# Copyright (c) 2020 NetEase Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +cc_test( + name = "curve-raftlog-unittest", + srcs = glob([ + "*.cpp", + "*.h", + ]), + copts = ["-std=c++11"], + deps = [ + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + "//external:braft", + "//src/chunkserver/raftlog:chunkserver-raft-log", + "//test/fs:fs_mock", + "//test/chunkserver/datastore:datastore_mock", + ], +) diff --git a/test/chunkserver/raftlog/common.cpp b/test/chunkserver/raftlog/common.cpp new file mode 100644 index 0000000000..f31eba12b4 --- /dev/null +++ b/test/chunkserver/raftlog/common.cpp @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2020 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: curve + * Created Date: 2020-09-16 + * Author: charisu + */ + +#include "test/chunkserver/raftlog/common.h" + +namespace curve { +namespace chunkserver { + +int prepare_segment(const std::string& path) { + int fd = ::open(path.c_str(), O_RDWR|O_CREAT, 0644); + if (fd < 0) { + LOG(ERROR) << "Create segment fail"; + return -1; + } + + if (::fallocate(fd, 0, 0, kSegmentSize) < 0) { + LOG(ERROR) << "fallocate fail"; + return -1; + } + char* data = new char[kSegmentSize]; + memset(data, 0, kSegmentSize); + if (pwrite(fd, data, kSegmentSize, 0) != kSegmentSize) { + LOG(ERROR) << "write failed"; + return -1; + } + delete data; + close(fd); + return 0; +} + +} // namespace chunkserver +} // namespace curve diff --git a/test/chunkserver/raftlog/common.h b/test/chunkserver/raftlog/common.h new file mode 100644 index 0000000000..f684390db5 --- /dev/null +++ b/test/chunkserver/raftlog/common.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2020 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: curve + * Created Date: 2020-09-16 + * Author: charisu + */ + +#ifndef TEST_CHUNKSERVER_RAFTLOG_COMMON_H_ +#define TEST_CHUNKSERVER_RAFTLOG_COMMON_H_ + +#include +#include +#include + +namespace curve { +namespace chunkserver { + +const uint32_t kSegmentSize = 8388608; +const uint32_t kPageSize = 4096; +const char kRaftLogDataDir[] = "./raft-log-data/"; + +int prepare_segment(const std::string& path); + +} // namespace chunkserver +} // namespace curve + + +#endif // TEST_CHUNKSERVER_RAFTLOG_COMMON_H_ diff --git a/test/chunkserver/raftlog/test_curve_segment.cpp b/test/chunkserver/raftlog/test_curve_segment.cpp new file mode 100644 index 0000000000..ba0cfcecbb --- /dev/null +++ b/test/chunkserver/raftlog/test_curve_segment.cpp @@ -0,0 +1,326 @@ +/* + * Copyright (c) 2020 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: curve + * Created Date: 2020-09-16 + * Author: charisu + */ + +// libraft - Quorum-based replication of states across machines. +// Copyright (c) 2015 Baidu.com, Inc. All Rights Reserved + +// Author: WangYao (fisherman), wangyao02@baidu.com +// Date: 2015/10/08 17:00:05 + +#include +#include +#include +#include +#include "src/chunkserver/raftlog/curve_segment.h" +#include "src/chunkserver/raftlog/define.h" +#include "test/fs/mock_local_filesystem.h" +#include "test/chunkserver/datastore/mock_file_pool.h" +#include "test/chunkserver/raftlog/common.h" + +namespace curve { +namespace chunkserver { + +using curve::fs::MockLocalFileSystem; +using ::testing::Return; +using ::testing::_; + +class CurveSegmentTest : public testing::Test { + protected: + CurveSegmentTest() { + fp_option.metaPageSize = kPageSize; + fp_option.fileSize = kSegmentSize; + } + void SetUp() { + lfs = std::make_shared(); + file_pool = std::make_shared(lfs); + kWalFilePool = file_pool; + std::string cmd = std::string("mkdir ") + kRaftLogDataDir; + ::system(cmd.c_str()); + } + void TearDown() { + kWalFilePool = nullptr; + std::string cmd = std::string("rm -rf ") + kRaftLogDataDir; + ::system(cmd.c_str()); + } + void append_entries_curve_segment(CurveSegment* segment, + const char* data_pattern = "hello, world: %d", + int start_index = 0, + int end_index = 10) { + for (int i = start_index; i < end_index; i++) { + braft::LogEntry* entry = new braft::LogEntry(); + entry->AddRef(); + entry->type = braft::ENTRY_TYPE_DATA; + entry->id.term = 1; + entry->id.index = i + 1; + + char data_buf[128]; + snprintf(data_buf, sizeof(data_buf), data_pattern, i + 1); + entry->data.append(data_buf); + ASSERT_EQ(0, segment->append(entry)); + entry->Release(); + } + } + void append_entries_braft_segment(braft::Segment* segment, + const char* data_pattern = "hello, world: %d", + int start_index = 0, + int end_index = 10) { + for (int i = start_index; i < end_index; i++) { + braft::LogEntry* entry = new braft::LogEntry(); + entry->AddRef(); + entry->type = braft::ENTRY_TYPE_DATA; + entry->id.term = 1; + entry->id.index = i + 1; + + char data_buf[128]; + snprintf(data_buf, sizeof(data_buf), data_pattern, i + 1); + entry->data.append(data_buf); + ASSERT_EQ(0, segment->append(entry)); + entry->Release(); + } + } + void read_entries_curve_segment(CurveSegment* segment, + const char* data_pattern = "hello, world: %d", + int start_index = 0, + int end_index = 10) { + for (int i = start_index; i < end_index; i++) { + int64_t term = segment->get_term(i+1); + ASSERT_EQ(term, 1); + + braft::LogEntry* entry = segment->get(i+1); + ASSERT_EQ(entry->id.term, 1); + ASSERT_EQ(entry->type, braft::ENTRY_TYPE_DATA); + ASSERT_EQ(entry->id.index, i+1); + + char data_buf[128]; + snprintf(data_buf, sizeof(data_buf), data_pattern, i + 1); + ASSERT_EQ(data_buf, entry->data.to_string()); + entry->Release(); + } + } + std::shared_ptr lfs; + std::shared_ptr file_pool; + FilePoolOptions fp_option; +}; + +TEST_F(CurveSegmentTest, open_segment) { + EXPECT_CALL(*file_pool, GetFilePoolOpt()) + .WillRepeatedly(Return(fp_option)); + EXPECT_CALL(*file_pool, GetFile(_, _)) + .WillOnce(Return(0)); + EXPECT_CALL(*file_pool, RecycleFile(_)) + .WillOnce(Return(0)); + scoped_refptr seg1 = + new CurveSegment(kRaftLogDataDir, 1, 0, true); + ASSERT_TRUE(seg1->from_pool()); + + // not open + braft::LogEntry* entry = seg1->get(1); + ASSERT_TRUE(entry == NULL); + + // create and open + std::string path = kRaftLogDataDir; + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 1); + ASSERT_EQ(0, prepare_segment(path)); + ASSERT_EQ(0, seg1->create()); + ASSERT_TRUE(seg1->is_open()); + + // append entry + append_entries_curve_segment(seg1); + + // read entry + read_entries_curve_segment(seg1); + { + braft::LogEntry* entry = seg1->get(0); + ASSERT_TRUE(entry == NULL); + entry = seg1->get(11); + ASSERT_TRUE(entry == NULL); + } + + braft::ConfigurationManager* configuration_manager = + new braft::ConfigurationManager; + // load open segment + scoped_refptr seg2 = + new CurveSegment(kRaftLogDataDir, 1, 0, true); + ASSERT_EQ(0, seg2->load(configuration_manager)); + + read_entries_curve_segment(seg2); + { + braft::LogEntry* entry = seg1->get(0); + ASSERT_TRUE(entry == NULL); + entry = seg1->get(11); + ASSERT_TRUE(entry == NULL); + } + + // truncate and read + ASSERT_EQ(0, seg1->truncate(5)); + append_entries_curve_segment(seg1, "HELLO, WORLD: %d", 5, 10); + read_entries_curve_segment(seg1, "hello, world: %d", 0, 5); + read_entries_curve_segment(seg1, "HELLO, WORLD: %d", 5, 10); + ASSERT_EQ(0, seg1->close()); + ASSERT_FALSE(seg1->is_open()); + ASSERT_EQ(0, seg1->unlink()); + + delete configuration_manager; +} + +TEST_F(CurveSegmentTest, open_segment_compatibility) { + EXPECT_CALL(*file_pool, GetFilePoolOpt()) + .Times(0); + scoped_refptr seg1 = + new braft::Segment(kRaftLogDataDir, 1, 0); + // create and open + ASSERT_EQ(0, seg1->create()); + ASSERT_TRUE(seg1->is_open()); + + // append entry + append_entries_braft_segment(seg1); + braft::ConfigurationManager* configuration_manager = + new braft::ConfigurationManager; + // load open segment + scoped_refptr seg2 = + new CurveSegment(kRaftLogDataDir, 1, 0, false); + ASSERT_EQ(0, seg2->load(configuration_manager)); + read_entries_curve_segment(seg2); + { + braft::LogEntry* entry = seg1->get(0); + ASSERT_TRUE(entry == NULL); + entry = seg1->get(11); + ASSERT_TRUE(entry == NULL); + } + + // truncate and unlink + ASSERT_EQ(0, seg2->truncate(5)); + ASSERT_EQ(0, seg2->close()); + ASSERT_FALSE(seg2->is_open()); + EXPECT_CALL(*file_pool, RecycleFile(_)) + .Times(0); + ASSERT_EQ(0, seg2->unlink()); + delete configuration_manager; +} + +TEST_F(CurveSegmentTest, closed_segment) { + EXPECT_CALL(*file_pool, GetFilePoolOpt()) + .WillRepeatedly(Return(fp_option)); + EXPECT_CALL(*file_pool, GetFile(_, _)) + .WillOnce(Return(0)); + EXPECT_CALL(*file_pool, RecycleFile(_)) + .WillOnce(Return(0)); + scoped_refptr seg1 = + new CurveSegment(kRaftLogDataDir, 1, 0, true); + + // create and open + std::string path = kRaftLogDataDir; + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 1); + ASSERT_EQ(0, prepare_segment(path)); + ASSERT_EQ(0, seg1->create()); + ASSERT_TRUE(seg1->is_open()); + + // append entry + append_entries_curve_segment(seg1); + seg1->close(); + + // read entry + read_entries_curve_segment(seg1); + { + braft::LogEntry* entry = seg1->get(0); + ASSERT_TRUE(entry == NULL); + entry = seg1->get(11); + ASSERT_TRUE(entry == NULL); + } + + braft::ConfigurationManager* configuration_manager = + new braft::ConfigurationManager; + // load closed segment + scoped_refptr seg2 = + new CurveSegment(kRaftLogDataDir, 1, 10, 0, true); + ASSERT_EQ(0, seg2->load(configuration_manager)); + + read_entries_curve_segment(seg2); + { + braft::LogEntry* entry = seg1->get(0); + ASSERT_TRUE(entry == NULL); + entry = seg1->get(11); + ASSERT_TRUE(entry == NULL); + } + + // truncate and read + ASSERT_EQ(0, seg1->truncate(5)); + append_entries_curve_segment(seg1, "HELLO, WORLD: %d", 5, 10); + read_entries_curve_segment(seg1, "hello, world: %d", 0, 5); + read_entries_curve_segment(seg1, "HELLO, WORLD: %d", 5, 10); + ASSERT_EQ(0, seg1->unlink()); + + delete configuration_manager; +} + +TEST_F(CurveSegmentTest, closed_segment_compatibility) { + scoped_refptr seg1 = + new braft::Segment(kRaftLogDataDir, 1, 0); + + // create and open + ASSERT_EQ(0, seg1->create()); + ASSERT_TRUE(seg1->is_open()); + + // append entry + append_entries_braft_segment(seg1); + seg1->close(); + + // read entry + for (int i = 0; i < 10; i++) { + braft::LogEntry* entry = seg1->get(i+1); + ASSERT_EQ(entry->id.term, 1); + ASSERT_EQ(entry->type, braft::ENTRY_TYPE_DATA); + ASSERT_EQ(entry->id.index, i+1); + + char data_buf[128]; + snprintf(data_buf, sizeof(data_buf), "hello, world: %d", i + 1); + ASSERT_EQ(data_buf, entry->data.to_string()); + entry->Release(); + } + { + braft::LogEntry* entry = seg1->get(0); + ASSERT_TRUE(entry == NULL); + entry = seg1->get(11); + ASSERT_TRUE(entry == NULL); + } + + braft::ConfigurationManager* configuration_manager = + new braft::ConfigurationManager; + // load closed segment + scoped_refptr seg2 = + new CurveSegment(kRaftLogDataDir, 1, 10, 0, false); + ASSERT_EQ(0, seg2->load(configuration_manager)); + read_entries_curve_segment(seg2); + { + braft::LogEntry* entry = seg2->get(0); + ASSERT_TRUE(entry == NULL); + entry = seg2->get(11); + ASSERT_TRUE(entry == NULL); + } + ASSERT_EQ(0, seg1->unlink()); + + delete configuration_manager; +} + + +} // namespace chunkserver +} // namespace curve diff --git a/test/chunkserver/raftlog/test_curve_segment_log_storage.cpp b/test/chunkserver/raftlog/test_curve_segment_log_storage.cpp new file mode 100644 index 0000000000..911041df89 --- /dev/null +++ b/test/chunkserver/raftlog/test_curve_segment_log_storage.cpp @@ -0,0 +1,492 @@ +/* + * Copyright (c) 2020 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Project: curve + * Created Date: 2020-09-16 + * Author: charisu + */ + +// libraft - Quorum-based replication of states across machines. +// Copyright (c) 2015 Baidu.com, Inc. All Rights Reserved + +// Author: WangYao (fisherman), wangyao02@baidu.com +// Date: 2015/10/08 17:00:05 + +#include +#include +#include +#include +#include "src/chunkserver/raftlog/curve_segment_log_storage.h" +#include "src/chunkserver/raftlog/define.h" +#include "test/fs/mock_local_filesystem.h" +#include "test/chunkserver/datastore/mock_file_pool.h" +#include "test/chunkserver/raftlog/common.h" + +namespace curve { +namespace chunkserver { + +using curve::fs::MockLocalFileSystem; +using ::testing::Return; +using ::testing::_; + +class CurveSegmentLogStorageTest : public testing::Test { + protected: + CurveSegmentLogStorageTest() { + fp_option.metaPageSize = kPageSize; + fp_option.fileSize = kSegmentSize; + } + void SetUp() { + lfs = std::make_shared(); + file_pool = std::make_shared(lfs); + kWalFilePool = file_pool; + std::string cmd = std::string("mkdir ") + kRaftLogDataDir; + ::system(cmd.c_str()); + } + void TearDown() { + kWalFilePool = nullptr; + std::string cmd = std::string("rm -rf ") + kRaftLogDataDir; + ::system(cmd.c_str()); + } + void append_entries(std::shared_ptr storage, + int m, int n) { + for (int i = 0; i < m; i++) { + std::vector entries; + for (int j = 0; j < n; j++) { + int64_t index = n*i + j + 1; + braft::LogEntry* entry = new braft::LogEntry(); + entry->type = braft::ENTRY_TYPE_DATA; + entry->id.term = 1; + entry->id.index = index; + + char data_buf[128]; + snprintf(data_buf, sizeof(data_buf), + "hello, world: %" PRId64, index); + entry->data.append(data_buf); + entries.push_back(entry); + } + + ASSERT_EQ(n, storage->append_entries(entries)); + } + } + void read_entries(std::shared_ptr storage, + int start, int end) { + for (int i = start; i < end; i++) { + int64_t index = i + 1; + braft::LogEntry* entry = storage->get_entry(index); + ASSERT_EQ(entry->id.term, 1); + ASSERT_EQ(entry->type, braft::ENTRY_TYPE_DATA); + ASSERT_EQ(entry->id.index, index); + + char data_buf[128]; + snprintf(data_buf, sizeof(data_buf), + "hello, world: %" PRId64, index); + ASSERT_EQ(data_buf, entry->data.to_string()); + } + } + std::shared_ptr lfs; + std::shared_ptr file_pool; + FilePoolOptions fp_option; +}; + +TEST_F(CurveSegmentLogStorageTest, basic_test) { + auto storage = std::make_shared(kRaftLogDataDir); + EXPECT_CALL(*file_pool, GetFilePoolOpt()) + .WillRepeatedly(Return(fp_option)); + EXPECT_CALL(*file_pool, GetFile(_, _)) + .WillRepeatedly(Return(0)); + EXPECT_CALL(*file_pool, RecycleFile(_)) + .WillRepeatedly(Return(0)); + // init + ASSERT_EQ(0, storage->init(new braft::ConfigurationManager())); + ASSERT_EQ(1, storage->first_log_index()); + ASSERT_EQ(0, storage->last_log_index()); + // append entry + std::string path = kRaftLogDataDir; + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 1); + ASSERT_EQ(0, prepare_segment(path)); + path = kRaftLogDataDir; + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 2049); + ASSERT_EQ(0, prepare_segment(path)); + path = kRaftLogDataDir; + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 4097); + ASSERT_EQ(0, prepare_segment(path)); + append_entries(storage, 1000, 5); + + // read entry + read_entries(storage, 0, 5000); + + ASSERT_EQ(storage->first_log_index(), 1); + ASSERT_EQ(storage->last_log_index(), 5000); + // truncate prefix + ASSERT_EQ(0, storage->truncate_prefix(1001)); + ASSERT_EQ(storage->first_log_index(), 1001); + ASSERT_EQ(storage->last_log_index(), 5000); + + // boundary truncate prefix + { + auto& segments1 = storage->segments(); + size_t old_segment_num = segments1.size(); + auto first_seg = segments1.begin()->second.get(); + + ASSERT_EQ(0, storage->truncate_prefix(first_seg->last_index())); + auto& segments2 = storage->segments(); + ASSERT_EQ(old_segment_num, segments2.size()); + + ASSERT_EQ(0, storage->truncate_prefix(first_seg->last_index() + 1)); + auto& segments3 = storage->segments(); + ASSERT_EQ(old_segment_num - 1, segments3.size()); + } + + ASSERT_EQ(0, storage->truncate_prefix(2100)); + ASSERT_EQ(storage->first_log_index(), 2100); + ASSERT_EQ(storage->last_log_index(), 5000); + read_entries(storage, 2100, 5000); + + // append + path = kRaftLogDataDir; + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 6145); + ASSERT_EQ(0, prepare_segment(path)); + for (int i = 5001; i <= 7000; i++) { + int64_t index = i; + braft::LogEntry* entry = new braft::LogEntry(); + entry->type = braft::ENTRY_TYPE_DATA; + entry->id.term = 1; + entry->id.index = index; + char data_buf[128]; + snprintf(data_buf, sizeof(data_buf), "hello, world: %" PRId64, index); + entry->data.append(data_buf); + ASSERT_EQ(0, storage->append_entry(entry)); + } + + // truncate suffix + ASSERT_EQ(2100, storage->first_log_index()); + ASSERT_EQ(7000, storage->last_log_index()); + ASSERT_EQ(0, storage->truncate_suffix(6200)); + ASSERT_EQ(2100, storage->first_log_index()); + ASSERT_EQ(6200, storage->last_log_index()); + + // boundary truncate suffix + { + auto& segments1 = storage->segments(); + LOG(INFO) << "segments num: " << segments1.size(); + auto first_seg = segments1.begin()->second.get(); + if (segments1.size() > 1) { + storage->truncate_suffix(first_seg->last_index() + 1); + } + auto& segments2 = storage->segments(); + ASSERT_EQ(2, segments2.size()); + ASSERT_EQ(storage->last_log_index(), first_seg->last_index() + 1); + storage->truncate_suffix(first_seg->last_index()); + auto segments3 = storage->segments(); + ASSERT_EQ(1, segments3.size()); + ASSERT_EQ(storage->last_log_index(), first_seg->last_index()); + } + + // read + read_entries(storage, 2100, storage->last_log_index()); + + // re load + std::string cmd = std::string("rm -rf ") + kRaftLogDataDir + "/log_meta"; + ::system(cmd.c_str()); + auto storage2 = std::make_shared(kRaftLogDataDir); + ASSERT_EQ(0, storage2->init(new braft::ConfigurationManager())); + ASSERT_EQ(1, storage2->first_log_index()); + ASSERT_EQ(0, storage2->last_log_index()); +} + +TEST_F(CurveSegmentLogStorageTest, append_close_load_append) { + EXPECT_CALL(*file_pool, GetFilePoolOpt()) + .WillRepeatedly(Return(fp_option)); + EXPECT_CALL(*file_pool, GetFile(_, _)) + .WillRepeatedly(Return(0)); + auto storage = std::make_shared(kRaftLogDataDir); + braft::ConfigurationManager* configuration_manager = + new braft::ConfigurationManager; + ASSERT_EQ(0, storage->init(configuration_manager)); + + // append entry + std::string path = kRaftLogDataDir; + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 1); + ASSERT_EQ(0, prepare_segment(path)); + path = kRaftLogDataDir; + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 2049); + ASSERT_EQ(0, prepare_segment(path)); + append_entries(storage, 600, 5); + + storage = nullptr; + delete configuration_manager; + + // reinit + storage = std::make_shared(kRaftLogDataDir); + configuration_manager = new braft::ConfigurationManager; + ASSERT_EQ(0, storage->init(configuration_manager)); + + // append entry + path = kRaftLogDataDir; + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 4097); + ASSERT_EQ(0, prepare_segment(path)); + for (int i = 600; i < 1000; i++) { + std::vector entries; + for (int j = 0; j < 5; j++) { + int64_t index = 5*i + j + 1; + braft::LogEntry* entry = new braft::LogEntry(); + entry->type = braft::ENTRY_TYPE_DATA; + entry->id.term = 2; + entry->id.index = index; + + char data_buf[128]; + snprintf(data_buf, sizeof(data_buf), + "hello, world: %" PRId64, index); + entry->data.append(data_buf); + entries.push_back(entry); + } + + ASSERT_EQ(5, storage->append_entries(entries)); + } + + // check and read + ASSERT_EQ(storage->first_log_index(), 1); + ASSERT_EQ(storage->last_log_index(), 5000); + + for (int i = 0; i < 5000; i++) { + int64_t index = i + 1; + braft::LogEntry* entry = storage->get_entry(index); + if (i < 3000) { + ASSERT_EQ(entry->id.term, 1); + } else { + ASSERT_EQ(entry->id.term, 2); + } + ASSERT_EQ(entry->type, braft::ENTRY_TYPE_DATA); + ASSERT_EQ(entry->id.index, index); + + char data_buf[128]; + snprintf(data_buf, sizeof(data_buf), "hello, world: %" PRId64, index); + ASSERT_EQ(data_buf, entry->data.to_string()); + entry->Release(); + } + delete configuration_manager; +} + +TEST_F(CurveSegmentLogStorageTest, data_lost) { + EXPECT_CALL(*file_pool, GetFilePoolOpt()) + .WillRepeatedly(Return(fp_option)); + EXPECT_CALL(*file_pool, GetFile(_, _)) + .WillRepeatedly(Return(0)); + auto storage = std::make_shared(kRaftLogDataDir); + braft::ConfigurationManager* configuration_manager = + new braft::ConfigurationManager; + ASSERT_EQ(0, storage->init(configuration_manager)); + + // append entry + std::string path = kRaftLogDataDir; + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 1); + ASSERT_EQ(0, prepare_segment(path)); + append_entries(storage, 100, 5); + + delete configuration_manager; + + // corrupt data + int fd = ::open(path.c_str(), O_RDWR); + ASSERT_GE(fd, 0); + char data[4096]; + memset(data, 0, 4096); + ASSERT_EQ(4096, ::pwrite(fd, data, 4096, 8192)); + storage = std::make_shared(kRaftLogDataDir); + configuration_manager = new braft::ConfigurationManager; + ASSERT_NE(0, storage->init(configuration_manager)); + + delete configuration_manager; +} + +TEST_F(CurveSegmentLogStorageTest, compatibility) { + EXPECT_CALL(*file_pool, GetFilePoolOpt()) + .WillRepeatedly(Return(fp_option)); + EXPECT_CALL(*file_pool, GetFile(_, _)) + .WillRepeatedly(Return(0)); + + auto storage1 = std::make_shared(kRaftLogDataDir); + // init + braft::ConfigurationManager* configuration_manager = + new braft::ConfigurationManager; + ASSERT_EQ(0, storage1->init(configuration_manager)); + ASSERT_EQ(1, storage1->first_log_index()); + ASSERT_EQ(0, storage1->last_log_index()); + + // append entry + append_entries(storage1, 600, 5); + delete configuration_manager; + + // reinit + auto storage2 = std::make_shared(kRaftLogDataDir); + configuration_manager = new braft::ConfigurationManager; + ASSERT_NE(0, storage2->init(configuration_manager)); + + // append entry + std::string path = kRaftLogDataDir; + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 3001); + ASSERT_EQ(0, prepare_segment(path)); + for (int i = 600; i < 1000; i++) { + std::vector entries; + for (int j = 0; j < 5; j++) { + int64_t index = 5*i + j + 1; + braft::LogEntry* entry = new braft::LogEntry(); + entry->type = braft::ENTRY_TYPE_DATA; + entry->id.term = 2; + entry->id.index = index; + + char data_buf[128]; + snprintf(data_buf, sizeof(data_buf), + "hello, world: %" PRId64, index); + entry->data.append(data_buf); + entries.push_back(entry); + } + + ASSERT_EQ(5, storage2->append_entries(entries)); + } + // check and read + ASSERT_EQ(storage2->first_log_index(), 1); + ASSERT_EQ(storage2->last_log_index(), 5000); + + for (int i = 0; i < 5000; i++) { + int64_t index = i + 1; + braft::LogEntry* entry = storage2->get_entry(index); + if (i < 3000) { + ASSERT_EQ(entry->id.term, 1); + } else { + ASSERT_EQ(entry->id.term, 2); + } + ASSERT_EQ(entry->type, braft::ENTRY_TYPE_DATA); + ASSERT_EQ(entry->id.index, index); + + char data_buf[128]; + snprintf(data_buf, sizeof(data_buf), "hello, world: %" PRId64, index); + ASSERT_EQ(data_buf, entry->data.to_string()); + entry->Release(); + } + delete configuration_manager; +} + +TEST_F(CurveSegmentLogStorageTest, basic_test_without_direct) { + FLAGS_enableWalDirectWrite = false; + auto storage = std::make_shared(kRaftLogDataDir); + EXPECT_CALL(*file_pool, GetFilePoolOpt()) + .WillRepeatedly(Return(fp_option)); + EXPECT_CALL(*file_pool, GetFile(_, _)) + .WillRepeatedly(Return(0)); + EXPECT_CALL(*file_pool, RecycleFile(_)) + .WillRepeatedly(Return(0)); + // init + ASSERT_EQ(0, storage->init(new braft::ConfigurationManager())); + ASSERT_EQ(1, storage->first_log_index()); + ASSERT_EQ(0, storage->last_log_index()); + // append entry + std::string path = kRaftLogDataDir; + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 1); + ASSERT_EQ(0, prepare_segment(path)); + path = kRaftLogDataDir; + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 2049); + ASSERT_EQ(0, prepare_segment(path)); + path = kRaftLogDataDir; + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 4097); + ASSERT_EQ(0, prepare_segment(path)); + append_entries(storage, 1000, 5); + + // read entry + read_entries(storage, 0, 5000); + + ASSERT_EQ(storage->first_log_index(), 1); + ASSERT_EQ(storage->last_log_index(), 5000); + // truncate prefix + ASSERT_EQ(0, storage->truncate_prefix(1001)); + ASSERT_EQ(storage->first_log_index(), 1001); + ASSERT_EQ(storage->last_log_index(), 5000); + + // boundary truncate prefix + { + auto& segments1 = storage->segments(); + size_t old_segment_num = segments1.size(); + auto first_seg = segments1.begin()->second.get(); + + ASSERT_EQ(0, storage->truncate_prefix(first_seg->last_index())); + auto& segments2 = storage->segments(); + ASSERT_EQ(old_segment_num, segments2.size()); + + ASSERT_EQ(0, storage->truncate_prefix(first_seg->last_index() + 1)); + auto& segments3 = storage->segments(); + ASSERT_EQ(old_segment_num - 1, segments3.size()); + } + + ASSERT_EQ(0, storage->truncate_prefix(2100)); + ASSERT_EQ(storage->first_log_index(), 2100); + ASSERT_EQ(storage->last_log_index(), 5000); + read_entries(storage, 2100, 5000); + + // append + path = kRaftLogDataDir; + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 6145); + ASSERT_EQ(0, prepare_segment(path)); + for (int i = 5001; i <= 7000; i++) { + int64_t index = i; + braft::LogEntry* entry = new braft::LogEntry(); + entry->type = braft::ENTRY_TYPE_DATA; + entry->id.term = 1; + entry->id.index = index; + char data_buf[128]; + snprintf(data_buf, sizeof(data_buf), "hello, world: %" PRId64, index); + entry->data.append(data_buf); + ASSERT_EQ(0, storage->append_entry(entry)); + } + + // truncate suffix + ASSERT_EQ(2100, storage->first_log_index()); + ASSERT_EQ(7000, storage->last_log_index()); + ASSERT_EQ(0, storage->truncate_suffix(6200)); + ASSERT_EQ(2100, storage->first_log_index()); + ASSERT_EQ(6200, storage->last_log_index()); + + // boundary truncate suffix + { + auto& segments1 = storage->segments(); + LOG(INFO) << "segments num: " << segments1.size(); + auto first_seg = segments1.begin()->second.get(); + if (segments1.size() > 1) { + storage->truncate_suffix(first_seg->last_index() + 1); + } + auto& segments2 = storage->segments(); + ASSERT_EQ(2, segments2.size()); + ASSERT_EQ(storage->last_log_index(), first_seg->last_index() + 1); + storage->truncate_suffix(first_seg->last_index()); + auto segments3 = storage->segments(); + ASSERT_EQ(1, segments3.size()); + ASSERT_EQ(storage->last_log_index(), first_seg->last_index()); + } + + // read + read_entries(storage, 2100, storage->last_log_index()); + + // re load + std::string cmd = std::string("rm -rf ") + kRaftLogDataDir + "/log_meta"; + ::system(cmd.c_str()); + auto storage2 = std::make_shared(kRaftLogDataDir); + ASSERT_EQ(0, storage2->init(new braft::ConfigurationManager())); + ASSERT_EQ(1, storage2->first_log_index()); + ASSERT_EQ(0, storage2->last_log_index()); +} + + +} // namespace chunkserver +} // namespace curve diff --git a/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_mock_unittest.cpp b/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_mock_unittest.cpp index 24ce26a837..bfe820d258 100644 --- a/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_mock_unittest.cpp +++ b/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_mock_unittest.cpp @@ -29,7 +29,7 @@ #include "src/fs/local_filesystem.h" #include "test/fs/mock_local_filesystem.h" -#include "src/chunkserver/datastore/chunkfile_pool.h" +#include "src/chunkserver/datastore/file_pool.h" #include "src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h" #include "src/chunkserver/raftsnapshot/define.h" @@ -51,7 +51,7 @@ using ::testing::AtLeast; using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::chunkserver::ChunkfilePool; +using curve::chunkserver::FilePool; using curve::fs::MockLocalFileSystem; namespace curve { namespace chunkserver { @@ -60,8 +60,8 @@ class RaftSnapshotFilesystemAdaptorMockTest : public testing::Test { void SetUp() { fsptr = curve::fs::LocalFsFactory::CreateFs( curve::fs::FileSystemType::EXT4, "/dev/sda"); - ChunkfilepoolPtr_ = std::make_shared(fsptr); - if (ChunkfilepoolPtr_ == nullptr) { + FilePoolPtr_ = std::make_shared(fsptr); + if (FilePoolPtr_ == nullptr) { LOG(FATAL) << "allocate chunkfile pool failed!"; } int count = 1; @@ -82,15 +82,15 @@ class RaftSnapshotFilesystemAdaptorMockTest : public testing::Test { uint32_t chunksize = 4096; uint32_t metapagesize = 4096; - ChunkfilePoolOptions cpopt; - cpopt.getChunkFromPool = true; - cpopt.chunkSize = chunksize; + FilePoolOptions cpopt; + cpopt.getFileFromPool = true; + cpopt.fileSize = chunksize; cpopt.metaPageSize = metapagesize; - cpopt.cpMetaFileSize = 4096; - memcpy(cpopt.chunkFilePoolDir, "./raftsnap/chunkfilepool", 17); + cpopt.metaFileSize = 4096; + memcpy(cpopt.filePoolDir, "./raftsnap/chunkfilepool", 17); memcpy(cpopt.metaPath, "./raftsnap/chunkfilepool.meta", 30); - int ret = ChunkfilePoolHelper::PersistEnCodeMetaInfo( + int ret = FilePoolHelper::PersistEnCodeMetaInfo( fsptr, chunksize, metapagesize, @@ -104,16 +104,16 @@ class RaftSnapshotFilesystemAdaptorMockTest : public testing::Test { lfs = std::make_shared(); - rfa = new CurveFilesystemAdaptor(ChunkfilepoolPtr_, lfs); + rfa = new CurveFilesystemAdaptor(FilePoolPtr_, lfs); std::vector filterList; std::string snapshotMeta(BRAFT_SNAPSHOT_META_FILE); filterList.push_back(snapshotMeta); rfa->SetFilterList(filterList); - ASSERT_TRUE(ChunkfilepoolPtr_->Initialize(cpopt)); + ASSERT_TRUE(FilePoolPtr_->Initialize(cpopt)); scoped_refptr scptr(rfa); - ChunkfilepoolPtr_->SetLocalFileSystem(lfs); + FilePoolPtr_->SetLocalFileSystem(lfs); fsadaptor.swap(scptr); fsadaptor->AddRef(); @@ -121,7 +121,7 @@ class RaftSnapshotFilesystemAdaptorMockTest : public testing::Test { void TearDown() { std::vector filename; - fsptr->List("./raftsnap/chunkfilepool", &filename); + fsptr->List("./raftsnap/FilePool", &filename); for (auto iter : filename) { auto path = "./raftsnap/chunkfilepool/" + iter; int err = fsptr->Delete(path.c_str()); @@ -131,7 +131,7 @@ class RaftSnapshotFilesystemAdaptorMockTest : public testing::Test { } fsptr->Delete("./raftsnap/chunkfilepool"); fsptr->Delete("./raftsnap/chunkfilepool.meta"); - ChunkfilepoolPtr_->UnInitialize(); + FilePoolPtr_->UnInitialize(); fsadaptor->Release(); } @@ -144,7 +144,7 @@ class RaftSnapshotFilesystemAdaptorMockTest : public testing::Test { } scoped_refptr fsadaptor; - std::shared_ptr ChunkfilepoolPtr_; + std::shared_ptr FilePoolPtr_; std::shared_ptr fsptr; std::shared_ptr lfs; CurveFilesystemAdaptor* rfa; @@ -155,26 +155,26 @@ TEST_F(RaftSnapshotFilesystemAdaptorMockTest, open_file_mock_test) { CreateChunkFile("./10"); std::string path = "./10"; butil::File::Error e; - ASSERT_EQ(ChunkfilepoolPtr_->Size(), 3); + ASSERT_EQ(FilePoolPtr_->Size(), 3); EXPECT_CALL(*lfs, Open(_, _)).Times(AtLeast(1)).WillRepeatedly(Return(-1)); braft::FileAdaptor* fa = fsadaptor->open(path, O_RDONLY | O_CLOEXEC, nullptr, &e); - ASSERT_EQ(ChunkfilepoolPtr_->Size(), 3); + ASSERT_EQ(FilePoolPtr_->Size(), 3); ASSERT_EQ(nullptr, fa); - // 2. open flag带CREAT, 从chunkfilepool取文件,但是chunkfilepool打开文件失败 + // 2. open flag带CREAT, 从FilePool取文件,但是FilePool打开文件失败 // 所以还是走原有逻辑,本地创建文件成功 EXPECT_CALL(*lfs, Open(_, _)).Times(3).WillOnce(Return(-1)) .WillOnce(Return(-1)) .WillOnce(Return(-1)); EXPECT_CALL(*lfs, FileExists(_)).Times(1).WillRepeatedly(Return(0)); - ASSERT_EQ(ChunkfilepoolPtr_->Size(), 3); + ASSERT_EQ(FilePoolPtr_->Size(), 3); path = "./11"; fa = fsadaptor->open(path, O_RDONLY | O_CLOEXEC | O_CREAT, nullptr, &e); - ASSERT_EQ(ChunkfilepoolPtr_->Size(), 0); + ASSERT_EQ(FilePoolPtr_->Size(), 0); ASSERT_TRUE(fsptr->FileExists("./10")); ASSERT_EQ(0, fsptr->Delete("./10")); ASSERT_FALSE(fsptr->FileExists("./10")); diff --git a/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_unittest.cpp b/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_unittest.cpp index 483080d40e..4c9384fe0b 100644 --- a/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_unittest.cpp +++ b/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_unittest.cpp @@ -27,13 +27,13 @@ #include #include "src/fs/local_filesystem.h" -#include "src/chunkserver/datastore/chunkfile_pool.h" +#include "src/chunkserver/datastore/file_pool.h" #include "src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h" #include "src/chunkserver/raftsnapshot/define.h" using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; -using curve::chunkserver::ChunkfilePool; +using curve::chunkserver::FilePool; namespace curve { namespace chunkserver { class CurveFilesystemAdaptorTest : public testing::Test { @@ -41,8 +41,8 @@ class CurveFilesystemAdaptorTest : public testing::Test { void SetUp() { fsptr = curve::fs::LocalFsFactory::CreateFs( curve::fs::FileSystemType::EXT4, "/dev/sda"); - ChunkfilepoolPtr_ = std::make_shared(fsptr); - if (ChunkfilepoolPtr_ == nullptr) { + chunkFilePoolPtr_ = std::make_shared(fsptr); + if (chunkFilePoolPtr_ == nullptr) { LOG(FATAL) << "allocate chunkfile pool failed!"; } int count = 1; @@ -63,15 +63,15 @@ class CurveFilesystemAdaptorTest : public testing::Test { uint32_t chunksize = 4096; uint32_t metapagesize = 4096; - ChunkfilePoolOptions cpopt; - cpopt.getChunkFromPool = true; - cpopt.chunkSize = chunksize; + FilePoolOptions cpopt; + cpopt.getFileFromPool = true; + cpopt.fileSize = chunksize; cpopt.metaPageSize = metapagesize; - cpopt.cpMetaFileSize = 4096; - memcpy(cpopt.chunkFilePoolDir, "./raftsnap/chunkfilepool", 17); + cpopt.metaFileSize = 4096; + memcpy(cpopt.filePoolDir, "./raftsnap/chunkfilepool", 17); memcpy(cpopt.metaPath, "./raftsnap/chunkfilepool.meta", 30); - int ret = ChunkfilePoolHelper::PersistEnCodeMetaInfo( + int ret = FilePoolHelper::PersistEnCodeMetaInfo( fsptr, chunksize, metapagesize, @@ -83,13 +83,13 @@ class CurveFilesystemAdaptorTest : public testing::Test { return; } - rfa = new CurveFilesystemAdaptor(ChunkfilepoolPtr_, fsptr); + rfa = new CurveFilesystemAdaptor(chunkFilePoolPtr_, fsptr); std::vector filterList; std::string snapshotMeta(BRAFT_SNAPSHOT_META_FILE); filterList.push_back(snapshotMeta); rfa->SetFilterList(filterList); - ASSERT_TRUE(ChunkfilepoolPtr_->Initialize(cpopt)); + ASSERT_TRUE(chunkFilePoolPtr_->Initialize(cpopt)); scoped_refptr scptr(rfa); fsadaptor.swap(scptr); @@ -98,11 +98,11 @@ class CurveFilesystemAdaptorTest : public testing::Test { void TearDown() { fsptr->Delete("./raftsnap"); - ChunkfilepoolPtr_->UnInitialize(); + chunkFilePoolPtr_->UnInitialize(); fsadaptor->Release(); } - void ClearChunkFilepool() { + void ClearFilePool() { std::vector filename; fsptr->List("./raftsnap/chunkfilepool", &filename); for (auto& iter : filename) { @@ -123,7 +123,7 @@ class CurveFilesystemAdaptorTest : public testing::Test { } scoped_refptr fsadaptor; - std::shared_ptr ChunkfilepoolPtr_; + std::shared_ptr chunkFilePoolPtr_; std::shared_ptr fsptr; CurveFilesystemAdaptor* rfa; }; @@ -132,24 +132,24 @@ TEST_F(CurveFilesystemAdaptorTest, open_file_test) { // 1. open flag不带CREAT std::string path = "./raftsnap/10"; butil::File::Error e; - ASSERT_EQ(ChunkfilepoolPtr_->Size(), 3); + ASSERT_EQ(chunkFilePoolPtr_->Size(), 3); braft::FileAdaptor* fa = fsadaptor->open(path, O_RDONLY | O_CLOEXEC, nullptr, &e); - ASSERT_EQ(ChunkfilepoolPtr_->Size(), 3); + ASSERT_EQ(chunkFilePoolPtr_->Size(), 3); ASSERT_FALSE(fsptr->FileExists("./raftsnap/10")); ASSERT_EQ(nullptr, fa); - // 2. open flag待CREAT, 从chunkfilepool取文件 - ASSERT_EQ(ChunkfilepoolPtr_->Size(), 3); + // 2. open flag待CREAT, 从FilePool取文件 + ASSERT_EQ(chunkFilePoolPtr_->Size(), 3); fa = fsadaptor->open(path, O_RDONLY | O_CLOEXEC | O_CREAT, nullptr, &e); - ASSERT_EQ(ChunkfilepoolPtr_->Size(), 2); + ASSERT_EQ(chunkFilePoolPtr_->Size(), 2); ASSERT_TRUE(fsptr->FileExists("./raftsnap/10")); ASSERT_NE(nullptr, fa); - // 3. open flag待CREAT,chunkfilepool为空时,从chunkfilepool取文件 - ClearChunkFilepool(); + // 3. open flag待CREAT,FilePool为空时,从FilePool取文件 + ClearFilePool(); fa = fsadaptor->open("./raftsnap/11", O_RDONLY | O_CLOEXEC | O_CREAT, nullptr, @@ -169,12 +169,12 @@ TEST_F(CurveFilesystemAdaptorTest, delete_file_test) { CreateChunkFile("./test_temp/test_temp1/test_temp2/1"); CreateChunkFile("./test_temp/test_temp1/test_temp2/2"); // 非递归删除非空文件夹,返回false - ASSERT_EQ(ChunkfilepoolPtr_->Size(), 3); + ASSERT_EQ(chunkFilePoolPtr_->Size(), 3); ASSERT_FALSE(fsadaptor->delete_file("./test_temp", false)); - ASSERT_EQ(ChunkfilepoolPtr_->Size(), 3); - // 递归删除文件夹,chunk被回收到chunkfilepool + ASSERT_EQ(chunkFilePoolPtr_->Size(), 3); + // 递归删除文件夹,chunk被回收到FilePool ASSERT_TRUE(fsadaptor->delete_file("./test_temp", true)); - ASSERT_EQ(ChunkfilepoolPtr_->Size(), 9); + ASSERT_EQ(chunkFilePoolPtr_->Size(), 9); ASSERT_FALSE(fsptr->DirExists("./test_temp")); ASSERT_FALSE(fsptr->DirExists("./test_temp/test_temp1")); ASSERT_FALSE(fsptr->DirExists("./test_temp/test_temp1/test_temp2")); @@ -190,19 +190,19 @@ TEST_F(CurveFilesystemAdaptorTest, delete_file_test) { ASSERT_TRUE(fsadaptor->delete_file("./test_temp3", false)); ASSERT_EQ(0, fsptr->Mkdir("./test_temp4")); ASSERT_TRUE(fsadaptor->delete_file("./test_temp4", true)); - ASSERT_EQ(ChunkfilepoolPtr_->Size(), 9); + ASSERT_EQ(chunkFilePoolPtr_->Size(), 9); ASSERT_FALSE(fsptr->DirExists("./test_temp3")); ASSERT_FALSE(fsptr->DirExists("./test_temp4")); - // 3. 删除一个常规chunk文件, 会被回收到chunkfilepool + // 3. 删除一个常规chunk文件, 会被回收到FilePool ASSERT_EQ(0, fsptr->Mkdir("./test_temp5")); CreateChunkFile("./test_temp5/3"); ASSERT_TRUE(fsadaptor->delete_file("./test_temp5/3", false)); - ASSERT_EQ(ChunkfilepoolPtr_->Size(), 10); + ASSERT_EQ(chunkFilePoolPtr_->Size(), 10); ASSERT_EQ(0, fsptr->Mkdir("./test_temp6")); CreateChunkFile("./test_temp6/4"); ASSERT_TRUE(fsadaptor->delete_file("./test_temp6/4", true)); - ASSERT_EQ(ChunkfilepoolPtr_->Size(), 11); + ASSERT_EQ(chunkFilePoolPtr_->Size(), 11); ASSERT_FALSE(fsptr->FileExists("./test_temp6/4")); ASSERT_FALSE(fsptr->FileExists("./test_temp5/3")); ASSERT_TRUE(fsptr->DirExists("./test_temp5")); @@ -219,7 +219,7 @@ TEST_F(CurveFilesystemAdaptorTest, delete_file_test) { fsptr->Write(fd, data, 0, 4096); fsptr->Close(fd); ASSERT_TRUE(fsadaptor->delete_file("./test_temp7/5", true)); - ASSERT_EQ(ChunkfilepoolPtr_->Size(), 11); + ASSERT_EQ(chunkFilePoolPtr_->Size(), 11); ASSERT_FALSE(fsptr->FileExists("./test_temp7/5")); ASSERT_EQ(0, fsptr->Delete("./test_temp7")); } @@ -232,23 +232,23 @@ TEST_F(CurveFilesystemAdaptorTest, rename_test) { // 目标文件size是chunksize,但是目标文件在过滤名单里,所以直接过滤 CreateChunkFile(filename); - int poolSize = ChunkfilepoolPtr_->Size(); + int poolSize = chunkFilePoolPtr_->Size(); std::string temppath = "./temp"; char metaPage[4096]; - ASSERT_EQ(0, ChunkfilepoolPtr_->GetChunk(temppath, metaPage)); + ASSERT_EQ(0, chunkFilePoolPtr_->GetFile(temppath, metaPage)); ASSERT_TRUE(rfa->rename(temppath, filename)); ASSERT_TRUE(fsptr->FileExists(filename)); ASSERT_FALSE(fsptr->FileExists(temppath)); - ASSERT_EQ(poolSize - 1, ChunkfilepoolPtr_->Size()); + ASSERT_EQ(poolSize - 1, chunkFilePoolPtr_->Size()); ASSERT_EQ(0, fsptr->Delete(filename)); // 目标文件size是chunksize,但是目标文件不在过滤名单里,所以先回收再rename filename = "./test_temp/"; filename.append("test"); CreateChunkFile(filename); - ASSERT_EQ(0, ChunkfilepoolPtr_->GetChunk(temppath, metaPage)); + ASSERT_EQ(0, chunkFilePoolPtr_->GetFile(temppath, metaPage)); ASSERT_TRUE(rfa->rename(temppath, filename)); - ASSERT_EQ(poolSize - 1, ChunkfilepoolPtr_->Size()); + ASSERT_EQ(poolSize - 1, chunkFilePoolPtr_->Size()); ASSERT_FALSE(fsptr->FileExists(temppath)); ASSERT_TRUE(fsptr->FileExists(filename)); ASSERT_EQ(0, fsptr->Delete(filename)); diff --git a/test/chunkserver/raftsnapshot/raftsnapshot_chunkfilepool_integration.cpp b/test/chunkserver/raftsnapshot/raftsnapshot_chunkfilepool_integration.cpp index a606fa83a5..6c5b8c0ee9 100644 --- a/test/chunkserver/raftsnapshot/raftsnapshot_chunkfilepool_integration.cpp +++ b/test/chunkserver/raftsnapshot/raftsnapshot_chunkfilepool_integration.cpp @@ -39,7 +39,7 @@ using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; using curve::fs::FileSystemType; -class RaftSnapChunkfilePoolTest : public testing::Test { +class RaftSnapFilePoolTest : public testing::Test { protected: virtual void SetUp() { ASSERT_EQ(0, peer1.parse("127.0.0.1:9060:0")); @@ -209,13 +209,13 @@ static void ReadVerify(PeerId leaderId, * 4. 然后 sleep 超过一个 snapshot interval,write read 数据 * 5. 然后再 sleep 超过一个 snapshot interval,write read 数据;4,5两步 * 是为了保证打至少两次快照,这样,节点再重启的时候必须通过 install snapshot, - * 因为 log 已经被删除了, install snapshot的数据从chunkfilepool中取文件 + * 因为 log 已经被删除了, install snapshot的数据从FilePool中取文件 * 6. 等待 leader 产生,然后 read 之前写入的数据验证一遍 * 7. transfer leader 到shut down 的peer 上 * 8. 在 read 之前写入的数据验证 * 9. 再 write 数据,再 read 出来验证一遍 */ -TEST_F(RaftSnapChunkfilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { +TEST_F(RaftSnapFilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { LogicPoolID logicPoolId = 2; CopysetID copysetId = 100001; uint64_t chunkId = 1; @@ -236,12 +236,12 @@ TEST_F(RaftSnapChunkfilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { ASSERT_EQ(0, cluster.StartPeer(peer2, false, true, true)); ASSERT_EQ(0, cluster.StartPeer(peer3, false, true, true)); - // 等待chunkfilepool创建成功 + // 等待FilePool创建成功 std::this_thread::sleep_for(std::chrono::seconds(60)); PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 获取三个chunkserver的chunkfilepool的pool容量 + // 获取三个chunkserver的FilePool的pool容量 std::shared_ptr fs(LocalFsFactory::CreateFs( FileSystemType::EXT4, "")); std::vector Peer1ChunkPoolSize; @@ -268,14 +268,14 @@ TEST_F(RaftSnapChunkfilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { fs->List(copysetdir2+"/chunkfilepool", &Peer2ChunkPoolSize); fs->List(copysetdir3+"/chunkfilepool", &Peer3ChunkPoolSize); - // 目前只有chunk文件才会从chunkfilepool中取 + // 目前只有chunk文件才会从FilePool中取 // raft snapshot meta 和 conf epoch文件直接从文件系统创建 ASSERT_EQ(20, Peer1ChunkPoolSize.size()); ASSERT_EQ(20, Peer2ChunkPoolSize.size()); ASSERT_EQ(20, Peer3ChunkPoolSize.size()); LOG(INFO) << "write 1 start"; - // 发起 read/write, 写数据会触发chunkserver从chunkfilepool取chunk + // 发起 read/write, 写数据会触发chunkserver从FilePool取chunk WriteThenReadVerify(leaderId, logicPoolId, copysetId, @@ -296,7 +296,7 @@ TEST_F(RaftSnapChunkfilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { fs->List(copysetdir2+"/chunkfilepool", &Peer2ChunkPoolSize); fs->List(copysetdir3+"/chunkfilepool", &Peer3ChunkPoolSize); - // 写完数据后,chunkfilepool容量少一个 + // 写完数据后,ChunkFilePool容量少一个 ASSERT_EQ(19, Peer1ChunkPoolSize.size()); ASSERT_EQ(19, Peer2ChunkPoolSize.size()); ASSERT_EQ(19, Peer3ChunkPoolSize.size()); @@ -316,9 +316,9 @@ TEST_F(RaftSnapChunkfilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeerid)); // wait snapshot, 保证能够触发打快照 - // 本次打快照,raft会从chunkfilepool取一个文件作为快照文件 - // 然后会把上一次的快照文件删除,删除过的文件会被回收到chunkfilepool - // 所以总体上本次写入只会导致datastore从chunkfilepool取文件 + // 本次打快照,raft会从FilePool取一个文件作为快照文件 + // 然后会把上一次的快照文件删除,删除过的文件会被回收到FilePool + // 所以总体上本次写入只会导致datastore从FilePool取文件 // 但是快照取了一个又放回去了一个 ::sleep(1.5*snapshotTimeoutS); // 再次发起 read/write @@ -341,15 +341,15 @@ TEST_F(RaftSnapChunkfilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { fs->List(copysetdir2+"/chunkfilepool", &Peer2ChunkPoolSize); fs->List(copysetdir3+"/chunkfilepool", &Peer3ChunkPoolSize); - // 写完数据后,chunkfilepool容量少一个 + // 写完数据后,FilePool容量少一个 ASSERT_EQ(19, Peer1ChunkPoolSize.size()); ASSERT_EQ(19, Peer2ChunkPoolSize.size()); ASSERT_EQ(19, Peer3ChunkPoolSize.size()); // wait snapshot, 保证能够触发打快照 - // 本次打快照,raft会从chunkfilepool取一个文件作为快照文件 - // 然后会把上一次的快照文件删除,删除过的文件会被回收到chunkfilepool - // 所以总体上本次写入只会导致datastore从chunkfilepool取文件 + // 本次打快照,raft会从FilePool取一个文件作为快照文件 + // 然后会把上一次的快照文件删除,删除过的文件会被回收到FilePool + // 所以总体上本次写入只会导致datastore从FilePool取文件 // 但是快照取了一个又放回去了一个 ::sleep(1.5*snapshotTimeoutS); // 再次发起 read/write @@ -376,7 +376,7 @@ TEST_F(RaftSnapChunkfilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { LOG(INFO) << "chunk pool2 size = " << Peer2ChunkPoolSize.size(); LOG(INFO) << "chunk pool3 size = " << Peer3ChunkPoolSize.size(); - // 写完数据后,chunkfilepool容量少一个 + // 写完数据后,FilePool容量少一个 if (shutdownPeerid == peer1) { ASSERT_EQ(19, Peer1ChunkPoolSize.size()); ASSERT_EQ(18, Peer2ChunkPoolSize.size()); diff --git a/test/chunkserver/server.cpp b/test/chunkserver/server.cpp index 23907e6401..1b12a52e8e 100644 --- a/test/chunkserver/server.cpp +++ b/test/chunkserver/server.cpp @@ -31,17 +31,18 @@ #include "src/fs/fs_common.h" #include "src/fs/local_filesystem.h" #include "src/chunkserver/concurrent_apply/concurrent_apply.h" -#include "src/chunkserver/datastore/chunkfile_pool.h" +#include "src/chunkserver/datastore/file_pool.h" #include "src/chunkserver/uri_paser.h" #include "src/chunkserver/raftsnapshot/curve_snapshot_storage.h" using curve::chunkserver::CopysetNodeOptions; using curve::chunkserver::Configuration; using curve::chunkserver::CopysetNodeManager; -using curve::chunkserver::ChunkfilePool; -using curve::chunkserver::ChunkfilePoolOptions; using curve::chunkserver::concurrent::ConcurrentApplyModule; using curve::chunkserver::concurrent::ConcurrentApplyOption; +using curve::chunkserver::FilePool; +using curve::chunkserver::FilePoolOptions; +using curve::chunkserver::ConcurrentApplyModule; using curve::chunkserver::UriParser; using curve::chunkserver::LogicPoolID; using curve::chunkserver::CopysetID; @@ -50,7 +51,7 @@ using curve::chunkserver::PeerId; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; using curve::fs::FileSystemType; -using curve::chunkserver::ChunkfilePoolHelper; +using curve::chunkserver::FilePoolHelper; DEFINE_string(ip, "127.0.0.1", @@ -98,16 +99,16 @@ void CreateChunkFilePool(const std::string& dirname, count++; } - ChunkfilePoolOptions cpopt; - cpopt.getChunkFromPool = true; - cpopt.chunkSize = chunksize; + FilePoolOptions cpopt; + cpopt.getFileFromPool = true; + cpopt.fileSize = chunksize; cpopt.metaPageSize = 4096; - cpopt.cpMetaFileSize = 4096; + cpopt.metaFileSize = 4096; - memcpy(cpopt.chunkFilePoolDir, datadir.c_str(), datadir.size()); + memcpy(cpopt.filePoolDir, datadir.c_str(), datadir.size()); memcpy(cpopt.metaPath, metapath.c_str(), metapath.size()); - int ret = ChunkfilePoolHelper::PersistEnCodeMetaInfo( + int ret = FilePoolHelper::PersistEnCodeMetaInfo( fsptr, chunksize, 4096, @@ -166,28 +167,28 @@ int main(int argc, char *argv[]) { << " error chunkDataDir is: " << chunkDataDir; } - copysetNodeOptions.chunkfilePool = std::make_shared(fs); - if (nullptr == copysetNodeOptions.chunkfilePool) { + copysetNodeOptions.chunkFilePool = std::make_shared(fs); + if (nullptr == copysetNodeOptions.chunkFilePool) { LOG(FATAL) << "new chunfilepool failed"; } - ChunkfilePoolOptions cfop; - ::memcpy(cfop.chunkFilePoolDir, chunkDataDir.c_str(), chunkDataDir.size()); - cfop.getChunkFromPool = FLAGS_enable_getchunk_from_pool; + FilePoolOptions cfop; + ::memcpy(cfop.filePoolDir, chunkDataDir.c_str(), chunkDataDir.size()); + cfop.getFileFromPool = FLAGS_enable_getchunk_from_pool; cfop.retryTimes = 3; cfop.metaPageSize = 4 * 1024; - cfop.chunkSize = kMaxChunkSize; - if (cfop.getChunkFromPool) { - cfop.cpMetaFileSize = 4096; + cfop.fileSize = kMaxChunkSize; + if (cfop.getFileFromPool) { + cfop.metaFileSize = 4096; if (FLAGS_create_chunkfilepool) { CreateChunkFilePool(chunkDataDir, kMaxChunkSize, fs); } std::string datadir = chunkDataDir + "/chunkfilepool"; std::string metapath = chunkDataDir + "/chunkfilepool.meta"; - memcpy(cfop.chunkFilePoolDir, datadir.c_str(), datadir.size()); + memcpy(cfop.filePoolDir, datadir.c_str(), datadir.size()); memcpy(cfop.metaPath, metapath.c_str(), metapath.size()); } - if (false == copysetNodeOptions.chunkfilePool->Initialize(cfop)) { + if (false == copysetNodeOptions.chunkFilePool->Initialize(cfop)) { LOG(FATAL) << "chunfilepool init failed"; } else { LOG(INFO) << "chunfilepool init success"; diff --git a/test/chunkserver/trash_test.cpp b/test/chunkserver/trash_test.cpp index acb74240c4..5555c87432 100644 --- a/test/chunkserver/trash_test.cpp +++ b/test/chunkserver/trash_test.cpp @@ -24,7 +24,7 @@ #include #include "src/chunkserver/trash.h" #include "test/fs/mock_local_filesystem.h" -#include "test/chunkserver/datastore/mock_chunkfile_pool.h" +#include "test/chunkserver/datastore/mock_file_pool.h" #include "src/chunkserver/copyset_node.h" using ::testing::_; @@ -49,9 +49,9 @@ class TrashTest : public ::testing::Test { protected: void SetUp() { lfs = std::make_shared(); - pool = std::make_shared(lfs); + pool = std::make_shared(lfs); ops.localFileSystem = lfs; - ops.chunkfilePool = pool; + ops.chunkFilePool = pool; ops.trashPath = "local://./0/trash"; ops.expiredAfterSec = 1; ops.scanPeriodSec = 1; @@ -103,8 +103,8 @@ class TrashTest : public ::testing::Test { EXPECT_CALL(*lfs, Delete(meta)).WillRepeatedly(Return(-1)); EXPECT_CALL(*lfs, List(data, _)) .WillRepeatedly(DoAll(SetArgPointee<1>(chunks), Return(0))); - EXPECT_CALL(*pool, RecycleChunk(chunks1)).WillRepeatedly(Return(-1)); - EXPECT_CALL(*pool, RecycleChunk(chunks2)).WillRepeatedly(Return(0)); + EXPECT_CALL(*pool, RecycleFile(chunks1)).WillRepeatedly(Return(-1)); + EXPECT_CALL(*pool, RecycleFile(chunks2)).WillRepeatedly(Return(0)); for (int i = 0; i < 50; i++) { trash->DeleteEligibleFileInTrash(); } @@ -127,7 +127,7 @@ class TrashTest : public ::testing::Test { protected: std::shared_ptr trash; std::shared_ptr lfs; - std::shared_ptr pool; + std::shared_ptr pool; TrashOptions ops; }; @@ -363,10 +363,10 @@ TEST_F(TrashTest, .WillOnce(DoAll(SetArgPointee<1>(empty), Return(0))); EXPECT_CALL(*pool, - RecycleChunk("./0/trash/4294967493.55555/data/chunk_123")) + RecycleFile("./0/trash/4294967493.55555/data/chunk_123")) .WillOnce(Return(0)); EXPECT_CALL(*pool, - RecycleChunk("./0/trash/4294967493.55555/data/chunk_345")) + RecycleFile("./0/trash/4294967493.55555/data/chunk_345")) .WillOnce(Return(0)); trash->DeleteEligibleFileInTrash(); @@ -493,10 +493,10 @@ TEST_F(TrashTest, test_chunk_num_statistic) { EXPECT_CALL(*lfs, List("./0/trash/4294967493.55555/data", _)) .WillOnce(DoAll(SetArgPointee<1>(chunks1), Return(0))); EXPECT_CALL(*pool, - RecycleChunk("./0/trash/4294967493.55555/data/chunk_123")) + RecycleFile("./0/trash/4294967493.55555/data/chunk_123")) .WillOnce(Return(0)); EXPECT_CALL(*pool, - RecycleChunk("./0/trash/4294967493.55555/data/chunk_345")) + RecycleFile("./0/trash/4294967493.55555/data/chunk_345")) .WillOnce(Return(-1)); EXPECT_CALL(*lfs, Delete("./0/trash/4294967493.55555")) .Times(0); diff --git a/test/integration/chunkserver/BUILD b/test/integration/chunkserver/BUILD index bd1c717e66..b32d4978da 100644 --- a/test/integration/chunkserver/BUILD +++ b/test/integration/chunkserver/BUILD @@ -33,7 +33,7 @@ cc_test( "//proto:chunkserver-cc-protos", "//src/chunkserver:chunkserver-test-lib", "//src/common:curve_common", - "//test/chunkserver/datastore:chunkfilepool_helper", + "//test/chunkserver/datastore:filepool_helper", "//test/integration/common:integration-test-common", "@com_google_googletest//:gtest_main", ], @@ -58,7 +58,7 @@ cc_test( "//proto:chunkserver-cc-protos", "//src/chunkserver:chunkserver-test-lib", "//src/common:curve_common", - "//test/chunkserver/datastore:chunkfilepool_helper", + "//test/chunkserver/datastore:filepool_helper", "//test/integration/common:integration-test-common", "@com_google_googletest//:gtest_main", ], @@ -83,7 +83,7 @@ cc_test( "//proto:chunkserver-cc-protos", "//src/chunkserver:chunkserver-test-lib", "//src/common:curve_common", - "//test/chunkserver/datastore:chunkfilepool_helper", + "//test/chunkserver/datastore:filepool_helper", "//test/integration/common:integration-test-common", "//test/integration/client/common:client_integration_common_lib", "//test/integration/cluster_common:integration_cluster_common", diff --git a/test/integration/chunkserver/chunkserver_basic_test.cpp b/test/integration/chunkserver/chunkserver_basic_test.cpp index b320f32d11..45b374728a 100644 --- a/test/integration/chunkserver/chunkserver_basic_test.cpp +++ b/test/integration/chunkserver/chunkserver_basic_test.cpp @@ -27,7 +27,7 @@ #include #include -#include "test/chunkserver/datastore/chunkfilepool_helper.h" +#include "test/chunkserver/datastore/filepool_helper.h" #include "test/integration/common/chunkservice_op.h" #include "test/integration/common/config_generator.h" #include "test/integration/common/peer_cluster.h" @@ -47,7 +47,7 @@ const ChunkSizeType CHUNK_SIZE = 16 * kMB; #define BASIC_TEST_CHUNK_SERVER_PORT "9078" #define KB 1024 -static char *chunkServerParams[1][13] = { +static char *chunkServerParams[1][16] = { { "chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=" BASIC_TEST_CHUNK_SERVER_PORT, "-chunkServerStoreUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/", @@ -55,10 +55,14 @@ static char *chunkServerParams[1][13] = { "/chunkserver.dat", "-copySetUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", "-raftSnapshotUri=curve://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", + "-raftLogUri=curve://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", "-recycleUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/recycler", "-chunkFilePoolDir=./" BASIC_TEST_CHUNK_SERVER_PORT "/chunkfilepool/", "-chunkFilePoolMetaPath=./" BASIC_TEST_CHUNK_SERVER_PORT "/chunkfilepool.meta", + "-walFilePoolDir=./" BASIC_TEST_CHUNK_SERVER_PORT "/walfilepool/", + "-walFilePoolMetaPath=./" BASIC_TEST_CHUNK_SERVER_PORT + "/walfilepool.meta", "-conf=./" BASIC_TEST_CHUNK_SERVER_PORT "/chunkserver.conf", "-raft_sync_segments=true", NULL }, }; @@ -103,7 +107,7 @@ class ChunkServerIoTest : public testing::Test { "/chunkfilepool/"; metaDir_ = "./" + std::to_string(PeerCluster::PeerToId(peer1_)) + "/chunkfilepool.meta"; - ChunkfilePoolHelper::PersistEnCodeMetaInfo(lfs_, kChunkSize, kPageSize, + FilePoolHelper::PersistEnCodeMetaInfo(lfs_, kChunkSize, kPageSize, poolDir_, metaDir_); allocateChunk(lfs_, kChunkNum, poolDir_, kChunkSize); } diff --git a/test/integration/chunkserver/chunkserver_clone_recover.cpp b/test/integration/chunkserver/chunkserver_clone_recover.cpp index bea68e62e0..40dae4ec18 100644 --- a/test/integration/chunkserver/chunkserver_clone_recover.cpp +++ b/test/integration/chunkserver/chunkserver_clone_recover.cpp @@ -132,8 +132,9 @@ const std::vector chunkserverConf1{ "/chunkserver.dat" }, { " -copySetUri=local://" + CHUNKSERVER0_BASE_DIR + "/copysets" }, { " -raftSnapshotUri=curve://" + CHUNKSERVER0_BASE_DIR + "/copysets" }, + { " -raftLogUri=curve://" + CHUNKSERVER0_BASE_DIR + "/copysets" }, { " -recycleUri=local://" + CHUNKSERVER0_BASE_DIR + "/recycler" }, - { " -chunkFilePoolDir=" + CHUNKSERVER0_BASE_DIR + "/filepool" }, + { " -chunkFilePoolDir=" + CHUNKSERVER0_BASE_DIR + "/chunkfilepool" }, { " -chunkFilePoolMetaPath=" + CHUNKSERVER0_BASE_DIR + "/chunkfilepool.meta" }, { " -conf=" + CHUNKSERVER_CONF_PATH }, @@ -142,7 +143,10 @@ const std::vector chunkserverConf1{ { " --graceful_quit_on_sigterm" }, { " -chunkServerIp=127.0.0.1" }, { " -chunkServerPort=" + CHUNK_SERVER0_PORT }, - { " -enableChunkfilepool=false" } + { " -enableChunkfilepool=false" }, + { " -enableWalfilepool=false" }, + { " -walFilePoolDir=" + CHUNKSERVER0_BASE_DIR + "/walfilepool" }, + { " -walFilePoolMetaPath=" + CHUNKSERVER0_BASE_DIR + "/walfilepool.meta" } }; const std::vector chunkserverConf2{ @@ -151,6 +155,7 @@ const std::vector chunkserverConf2{ "/chunkserver.dat" }, { " -copySetUri=local://" + CHUNKSERVER1_BASE_DIR + "/copysets" }, { " -raftSnapshotUri=curve://" + CHUNKSERVER1_BASE_DIR + "/copysets" }, + { " -raftLogUri=curve://" + CHUNKSERVER1_BASE_DIR + "/copysets" }, { " -recycleUri=local://" + CHUNKSERVER1_BASE_DIR + "/recycler" }, { " -chunkFilePoolDir=" + CHUNKSERVER1_BASE_DIR + "/filepool" }, { " -chunkFilePoolMetaPath=" + CHUNKSERVER1_BASE_DIR + @@ -161,7 +166,10 @@ const std::vector chunkserverConf2{ { " --graceful_quit_on_sigterm" }, { " -chunkServerIp=127.0.0.1" }, { " -chunkServerPort=" + CHUNK_SERVER1_PORT }, - { " -enableChunkfilepool=false" } + { " -enableChunkfilepool=false" }, + { " -enableWalfilepool=false" }, + { " -walFilePoolDir=" + CHUNKSERVER1_BASE_DIR + "/walfilepool" }, + { " -walFilePoolMetaPath=" + CHUNKSERVER1_BASE_DIR + "/walfilepool.meta" } }; const std::vector chunkserverConf3{ @@ -170,6 +178,7 @@ const std::vector chunkserverConf3{ "/chunkserver.dat" }, { " -copySetUri=local://" + CHUNKSERVER2_BASE_DIR + "/copysets" }, { " -raftSnapshotUri=curve://" + CHUNKSERVER2_BASE_DIR + "/copysets" }, + { " -raftLogUri=curve://" + CHUNKSERVER2_BASE_DIR + "/copysets" }, { " -recycleUri=local://" + CHUNKSERVER2_BASE_DIR + "/recycler" }, { " -chunkFilePoolDir=" + CHUNKSERVER2_BASE_DIR + "/filepool" }, { " -chunkFilePoolMetaPath=" + CHUNKSERVER2_BASE_DIR + @@ -180,7 +189,10 @@ const std::vector chunkserverConf3{ { " --graceful_quit_on_sigterm" }, { " -chunkServerIp=127.0.0.1" }, { " -chunkServerPort=" + CHUNK_SERVER2_PORT }, - { " -enableChunkfilepool=false" } + { " -enableChunkfilepool=false" }, + { " -enableWalfilepool=false" }, + { " -walFilePoolDir=" + CHUNKSERVER2_BASE_DIR + "/walfilepool" }, + { " -walFilePoolMetaPath=" + CHUNKSERVER2_BASE_DIR + "/walfilepool.meta" } }; namespace curve { diff --git a/test/integration/chunkserver/chunkserver_concurrent_test.cpp b/test/integration/chunkserver/chunkserver_concurrent_test.cpp index 13a907bac1..29a9526b75 100644 --- a/test/integration/chunkserver/chunkserver_concurrent_test.cpp +++ b/test/integration/chunkserver/chunkserver_concurrent_test.cpp @@ -32,7 +32,7 @@ #include "src/fs/local_filesystem.h" #include "src/common/concurrent/concurrent.h" #include "test/integration/common/peer_cluster.h" -#include "test/chunkserver/datastore/chunkfilepool_helper.h" +#include "test/chunkserver/datastore/filepool_helper.h" #include "test/integration/common/config_generator.h" namespace curve { @@ -43,7 +43,7 @@ using curve::fs::LocalFsFactory; using curve::fs::FileSystemType; using curve::common::Thread; -static char *chunkConcurrencyParams1[1][13] = { +static char *chunkConcurrencyParams1[1][16] = { { "chunkserver", "-chunkServerIp=127.0.0.1", @@ -52,16 +52,19 @@ static char *chunkConcurrencyParams1[1][13] = { "-chunkServerMetaUri=local://./9076/chunkserver.dat", "-copySetUri=local://./9076/copysets", "-raftSnapshotUri=curve://./9076/copysets", + "-raftLogUri=curve://./9076/copysets", "-recycleUri=local://./9076/recycler", "-chunkFilePoolDir=./9076/chunkfilepool/", "-chunkFilePoolMetaPath=./9076/chunkfilepool.meta", + "-walFilePoolDir=./9076/walfilepool/", + "-walFilePoolMetaPath=./9076/walfilepool.meta", "-conf=./9076/chunkserver.conf", "-raft_sync_segments=true", NULL }, }; -static char *chunkConcurrencyParams2[1][13] = { +static char *chunkConcurrencyParams2[1][16] = { { "chunkserver", "-chunkServerIp=127.0.0.1", @@ -70,9 +73,12 @@ static char *chunkConcurrencyParams2[1][13] = { "-chunkServerMetaUri=local://./9077/chunkserver.dat", "-copySetUri=local://./9077/copysets", "-raftSnapshotUri=curve://./9077/copysets", + "-raftLogUri=curve://./9077/copysets", "-recycleUri=local://./9077/recycler", "-chunkFilePoolDir=./9077/chunkfilepool/", "-chunkFilePoolMetaPath=./9077/chunkfilepool.meta", + "-walFilePoolDir=./9077/walfilepool/", + "-walFilePoolMetaPath=./9077/walfilepool.meta", "-conf=./9077/chunkserver.conf", "-raft_sync_segments=true", NULL @@ -84,8 +90,8 @@ const int kChunkNum = 10; const ChunkSizeType kChunkSize = 16 * 1024 * 1024; const PageSizeType kPageSize = kOpRequestAlignSize; -// chunk不从chunkfilepool获取的chunkserver并发测试 -class ChunkServerConcurrentNotFromChunkFilePoolTest : public testing::Test { +// chunk不从FilePool获取的chunkserver并发测试 +class ChunkServerConcurrentNotFromFilePoolTest : public testing::Test { protected: virtual void SetUp() { peer1.set_address("127.0.0.1:9076:0"); @@ -152,8 +158,8 @@ class ChunkServerConcurrentNotFromChunkFilePoolTest : public testing::Test { std::vector params; }; -// chunk从chunkfilepool获取的chunkserver并发测试 -class ChunkServerConcurrentFromChunkFilePoolTest : public testing::Test { +// chunk从FilePool获取的chunkserver并发测试 +class ChunkServerConcurrentFromFilePoolTest : public testing::Test { protected: virtual void SetUp() { peer1.set_address("127.0.0.1:9077:0"); @@ -183,7 +189,7 @@ class ChunkServerConcurrentFromChunkFilePoolTest : public testing::Test { params.push_back(chunkConcurrencyParams2[0]); - // 初始化chunkfilepool,这里会预先分配一些chunk + // 初始化FilePool,这里会预先分配一些chunk lfs = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); poolDir = "./" + std::to_string(PeerCluster::PeerToId(peer1)) @@ -192,7 +198,7 @@ class ChunkServerConcurrentFromChunkFilePoolTest : public testing::Test { + std::to_string(PeerCluster::PeerToId(peer1)) + "/chunkfilepool.meta"; - ChunkfilePoolHelper::PersistEnCodeMetaInfo(lfs, + FilePoolHelper::PersistEnCodeMetaInfo(lfs, kChunkSize, kPageSize, poolDir, @@ -482,11 +488,11 @@ void CreateCloneChunk(Peer leader, } /** - * chunk不是事先在chunkfilepool分配好的 + * chunk不是事先在FilePool分配好的 */ // 多线程并发随机读同一个chunk -TEST_F(ChunkServerConcurrentNotFromChunkFilePoolTest, RandReadOneChunk) { +TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadOneChunk) { uint64_t chunkId = 1; off_t offset = 0; int length = kOpRequestAlignSize; @@ -534,7 +540,7 @@ TEST_F(ChunkServerConcurrentNotFromChunkFilePoolTest, RandReadOneChunk) { } // 多线程并发随机写同一个chunk -TEST_F(ChunkServerConcurrentNotFromChunkFilePoolTest, RandWriteOneChunk) { +TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteOneChunk) { const int kThreadNum = 10; const int kMaxLoop = 200; ChunkID chunkIdRange = 1; @@ -567,7 +573,7 @@ TEST_F(ChunkServerConcurrentNotFromChunkFilePoolTest, RandWriteOneChunk) { } // 多线程并发写同一个chunk同一个offset -TEST_F(ChunkServerConcurrentNotFromChunkFilePoolTest, WriteOneChunkOnTheSameOffset) { //NOLINT +TEST_F(ChunkServerConcurrentNotFromFilePoolTest, WriteOneChunkOnTheSameOffset) { //NOLINT const int kThreadNum = 10; std::vector datas; ChunkID chunkId = 1; @@ -637,7 +643,7 @@ TEST_F(ChunkServerConcurrentNotFromChunkFilePoolTest, WriteOneChunkOnTheSameOffs } // 多线程并发随机读写同一个chunk -TEST_F(ChunkServerConcurrentNotFromChunkFilePoolTest, RandReadWriteOneChunk) { +TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteOneChunk) { off_t offset = 0; int length = kOpRequestAlignSize; std::string data(kOpRequestAlignSize, 'a'); @@ -698,7 +704,7 @@ TEST_F(ChunkServerConcurrentNotFromChunkFilePoolTest, RandReadWriteOneChunk) { } // 多线程并发读不同的chunk -TEST_F(ChunkServerConcurrentNotFromChunkFilePoolTest, RandReadMultiChunk) { +TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; std::string data(kOpRequestAlignSize, 'a'); @@ -746,7 +752,7 @@ TEST_F(ChunkServerConcurrentNotFromChunkFilePoolTest, RandReadMultiChunk) { } // 多线程并发读不同的chunk,注意这些chunk都还没有被写过 -TEST_F(ChunkServerConcurrentNotFromChunkFilePoolTest, RandReadMultiNotExistChunk) { //NOLINT +TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiNotExistChunk) { //NOLINT const int kThreadNum = 10; const int kMaxLoop = 200; ChunkID chunkIdRange = kChunkNum; @@ -779,7 +785,7 @@ TEST_F(ChunkServerConcurrentNotFromChunkFilePoolTest, RandReadMultiNotExistChunk } // 多线程并发随机写同多个chunk -TEST_F(ChunkServerConcurrentNotFromChunkFilePoolTest, RandWriteMultiChunk) { +TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; std::string data(kOpRequestAlignSize, 'a'); @@ -828,7 +834,7 @@ TEST_F(ChunkServerConcurrentNotFromChunkFilePoolTest, RandWriteMultiChunk) { } // 多线程并发随机读写同多个chunk -TEST_F(ChunkServerConcurrentNotFromChunkFilePoolTest, RandReadWriteMultiChunk) { +TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteMultiChunk) { std::string data(kOpRequestAlignSize, 'a'); const int kThreadNum = 10; const int kMaxLoop = 200; @@ -875,7 +881,7 @@ TEST_F(ChunkServerConcurrentNotFromChunkFilePoolTest, RandReadWriteMultiChunk) { } // 多线程并发删除不同的chunk -TEST_F(ChunkServerConcurrentNotFromChunkFilePoolTest, DeleteMultiChunk) { +TEST_F(ChunkServerConcurrentNotFromFilePoolTest, DeleteMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; std::string data(kOpRequestAlignSize, 'a'); @@ -923,7 +929,7 @@ TEST_F(ChunkServerConcurrentNotFromChunkFilePoolTest, DeleteMultiChunk) { } // 多线程并发create clone不同的chunk -TEST_F(ChunkServerConcurrentNotFromChunkFilePoolTest, CreateCloneMultiChunk) { +TEST_F(ChunkServerConcurrentNotFromFilePoolTest, CreateCloneMultiChunk) { const int kThreadNum = 10; ChunkID chunkIdRange = kChunkNum; @@ -954,11 +960,11 @@ TEST_F(ChunkServerConcurrentNotFromChunkFilePoolTest, CreateCloneMultiChunk) { } /** - * chunk是事先在chunkfilepool分配好的 + * chunk是事先在FilePool分配好的 */ // 多线程并发随机读同一个chunk -TEST_F(ChunkServerConcurrentFromChunkFilePoolTest, RandReadOneChunk) { +TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadOneChunk) { uint64_t chunkId = 1; off_t offset = 0; int length = kOpRequestAlignSize; @@ -1005,7 +1011,7 @@ TEST_F(ChunkServerConcurrentFromChunkFilePoolTest, RandReadOneChunk) { } // 多线程并发随机写同一个chunk -TEST_F(ChunkServerConcurrentFromChunkFilePoolTest, RandWriteOneChunk) { +TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteOneChunk) { const int kThreadNum = 10; const int kMaxLoop = 200; ChunkID chunkIdRange = 1; @@ -1038,7 +1044,7 @@ TEST_F(ChunkServerConcurrentFromChunkFilePoolTest, RandWriteOneChunk) { } // 多线程并发写同一个chunk同一个offset -TEST_F(ChunkServerConcurrentFromChunkFilePoolTest, WriteOneChunkOnTheSameOffset) { //NOLINT +TEST_F(ChunkServerConcurrentFromFilePoolTest, WriteOneChunkOnTheSameOffset) { //NOLINT const int kThreadNum = 10; std::vector datas; ChunkID chunkId = 1; @@ -1108,7 +1114,7 @@ TEST_F(ChunkServerConcurrentFromChunkFilePoolTest, WriteOneChunkOnTheSameOffset) } // 多线程并发随机读写同一个chunk -TEST_F(ChunkServerConcurrentFromChunkFilePoolTest, RandReadWriteOneChunk) { +TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteOneChunk) { std::string data(kOpRequestAlignSize, 'a'); const int kThreadNum = 10; const int kMaxLoop = 200; @@ -1155,7 +1161,7 @@ TEST_F(ChunkServerConcurrentFromChunkFilePoolTest, RandReadWriteOneChunk) { } // 多线程并发读不同的chunk -TEST_F(ChunkServerConcurrentFromChunkFilePoolTest, RandReadMultiChunk) { +TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; std::string data(kOpRequestAlignSize, 'a'); @@ -1203,7 +1209,7 @@ TEST_F(ChunkServerConcurrentFromChunkFilePoolTest, RandReadMultiChunk) { } // 多线程并发读不同的chunk,注意这些chunk都还没有被写过 -TEST_F(ChunkServerConcurrentFromChunkFilePoolTest, RandReadMultiNotExistChunk) { +TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiNotExistChunk) { const int kThreadNum = 10; const int kMaxLoop = 200; ChunkID chunkIdRange = kChunkNum; @@ -1236,7 +1242,7 @@ TEST_F(ChunkServerConcurrentFromChunkFilePoolTest, RandReadMultiNotExistChunk) { } // 多线程并发随机写同多个chunk -TEST_F(ChunkServerConcurrentFromChunkFilePoolTest, RandWriteMultiChunk) { +TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunk) { std::string data(kOpRequestAlignSize, 'a'); const int kThreadNum = 10; const int kMaxLoop = 200; @@ -1270,7 +1276,7 @@ TEST_F(ChunkServerConcurrentFromChunkFilePoolTest, RandWriteMultiChunk) { } // 多线程并发随机读写同多个chunk -TEST_F(ChunkServerConcurrentFromChunkFilePoolTest, RandReadWriteMultiChunk) { +TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteMultiChunk) { std::string data(kOpRequestAlignSize, 'a'); const int kThreadNum = 10; const int kMaxLoop = 200; @@ -1317,7 +1323,7 @@ TEST_F(ChunkServerConcurrentFromChunkFilePoolTest, RandReadWriteMultiChunk) { } // 多线程并发删除不同的chunk -TEST_F(ChunkServerConcurrentFromChunkFilePoolTest, DeleteMultiChunk) { +TEST_F(ChunkServerConcurrentFromFilePoolTest, DeleteMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; std::string data(kOpRequestAlignSize, 'a'); @@ -1365,7 +1371,7 @@ TEST_F(ChunkServerConcurrentFromChunkFilePoolTest, DeleteMultiChunk) { } // 多线程并发create clone不同的chunk -TEST_F(ChunkServerConcurrentFromChunkFilePoolTest, CreateCloneMultiChunk) { +TEST_F(ChunkServerConcurrentFromFilePoolTest, CreateCloneMultiChunk) { const int kThreadNum = 10; ChunkID chunkIdRange = kChunkNum; const int sn = 1; @@ -1397,7 +1403,7 @@ TEST_F(ChunkServerConcurrentFromChunkFilePoolTest, CreateCloneMultiChunk) { } // 多线程并发随机读写同多个chunk,同事伴随这并发的COW -TEST_F(ChunkServerConcurrentFromChunkFilePoolTest, RandWriteMultiChunkWithCOW) { +TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunkWithCOW) { off_t offset = 0; int length = kOpRequestAlignSize; std::string data(kOpRequestAlignSize, 'a'); diff --git a/test/integration/chunkserver/datastore/BUILD b/test/integration/chunkserver/datastore/BUILD index 12e6ce2320..fc9405b044 100644 --- a/test/integration/chunkserver/datastore/BUILD +++ b/test/integration/chunkserver/datastore/BUILD @@ -22,7 +22,7 @@ DEPS = [ "//include/chunkserver:include-chunkserver", "//src/common:curve_common", "//src/chunkserver/datastore:chunkserver_datastore", - "//test/chunkserver/datastore:chunkfilepool_helper", + "//test/chunkserver/datastore:filepool_helper", ] cc_test( diff --git a/test/integration/chunkserver/datastore/datastore_integration_base.h b/test/integration/chunkserver/datastore/datastore_integration_base.h index c850545bfd..1985ef9d57 100644 --- a/test/integration/chunkserver/datastore/datastore_integration_base.h +++ b/test/integration/chunkserver/datastore/datastore_integration_base.h @@ -34,9 +34,9 @@ #include "src/common/timeutility.h" #include "src/fs/local_filesystem.h" #include "src/chunkserver/datastore/define.h" -#include "src/chunkserver/datastore/chunkfile_pool.h" +#include "src/chunkserver/datastore/file_pool.h" #include "src/chunkserver/datastore/chunkserver_datastore.h" -#include "test/chunkserver/datastore/chunkfilepool_helper.h" +#include "test/chunkserver/datastore/filepool_helper.h" using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; @@ -69,7 +69,7 @@ class DatastoreIntegrationBase : public testing::Test { virtual void SetUp() { lfs_ = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); - filePool_ = std::make_shared(lfs_); + filePool_ = std::make_shared(lfs_); if (filePool_ == nullptr) { LOG(FATAL) << "allocate chunkfile pool failed!"; } @@ -84,7 +84,7 @@ class DatastoreIntegrationBase : public testing::Test { LOG(FATAL) << "allocate chunkfile pool failed!"; } - ChunkfilePoolHelper::PersistEnCodeMetaInfo(lfs_, + FilePoolHelper::PersistEnCodeMetaInfo(lfs_, CHUNK_SIZE, PAGE_SIZE, poolDir, @@ -97,8 +97,8 @@ class DatastoreIntegrationBase : public testing::Test { void InitChunkPool(int chunkNum) { filePool_->UnInitialize(); - ChunkfilePoolOptions cfop; - cfop.chunkSize = CHUNK_SIZE; + FilePoolOptions cfop; + cfop.fileSize = CHUNK_SIZE; cfop.metaPageSize = PAGE_SIZE; memcpy(cfop.metaPath, poolMetaPath.c_str(), poolMetaPath.size()); @@ -118,7 +118,7 @@ class DatastoreIntegrationBase : public testing::Test { } protected: - std::shared_ptr filePool_; + std::shared_ptr filePool_; std::shared_ptr lfs_; std::shared_ptr dataStore_; }; diff --git a/test/integration/chunkserver/datastore/datastore_restart_test.cpp b/test/integration/chunkserver/datastore/datastore_restart_test.cpp index d91910fd21..75f5b2f6fa 100644 --- a/test/integration/chunkserver/datastore/datastore_restart_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_restart_test.cpp @@ -28,8 +28,8 @@ namespace curve { namespace chunkserver { const string baseDir = "./data_int_res"; // NOLINT -const string poolDir = "./chunkfilepool_int_res"; // NOLINT -const string poolMetaPath = "./chunkfilepool_int_res.meta"; // NOLINT +const string poolDir = "./chunfilepool_int_res"; // NOLINT +const string poolMetaPath = "./chunfilepool_int_res.meta"; // NOLINT // 以下的测试读写数据都在[0, 32kb]范围内 const uint64_t kMaxSize = 8 * PAGE_SIZE; @@ -280,7 +280,7 @@ class StepList { void ClearEnv() { clearFunc_(); // 清理每一步的预期状态,因为清理环境后,读取到的数据内容可能会不一样 - // 因为通过chunkfilepool分配的chunk初始内容是不确定的 + // 因为通过FilePool分配的chunk初始内容是不确定的 for (auto &step : steps) { step->ClearStatus(); } diff --git a/test/integration/client/chunkserver_exception_test.cpp b/test/integration/client/chunkserver_exception_test.cpp index 2a45698b35..a20c2f44a4 100644 --- a/test/integration/client/chunkserver_exception_test.cpp +++ b/test/integration/client/chunkserver_exception_test.cpp @@ -61,6 +61,7 @@ const std::vector chunkserverConf4{ { " -chunkServerMetaUri=local://./moduleException4/chunkserver.dat" }, { " -copySetUri=local://./moduleException4/copysets" }, { " -raftSnapshotUri=curve://./moduleException4/copysets" }, + { " -raftLogUri=curve://./moduleException4/copysets" }, { " -recycleUri=local://./moduleException4/recycler" }, { " -chunkFilePoolDir=./moduleException4/chunkfilepool/" }, { " -chunkFilePoolMetaPath=./moduleException4/chunkfilepool.meta" }, @@ -70,7 +71,10 @@ const std::vector chunkserverConf4{ { " -chunkServerIp=127.0.0.1" }, { " -chunkServerPort=22125" }, { " -enableChunkfilepool=false" }, - { " -mdsListenAddr=127.0.0.1:22122,127.0.0.1:22123,127.0.0.1:22124" } + { " -mdsListenAddr=127.0.0.1:22122,127.0.0.1:22123,127.0.0.1:22124" }, + { " -enableWalfilepool=false" }, + { " -walFilePoolDir=./moduleException4/walfilepool/" }, + { " -walFilePoolMetaPath=./moduleException4/walfilepool.meta" } }; const std::vector chunkserverConf5{ @@ -78,6 +82,7 @@ const std::vector chunkserverConf5{ { " -chunkServerMetaUri=local://./moduleException5/chunkserver.dat" }, { " -copySetUri=local://./moduleException5/copysets" }, { " -raftSnapshotUri=curve://./moduleException5/copysets" }, + { " -raftLogUri=curve://./moduleException5/copysets" }, { " -recycleUri=local://./moduleException5/recycler" }, { " -chunkFilePoolDir=./moduleException5/chunkfilepool/" }, { " -chunkFilePoolMetaPath=./moduleException5/chunkfilepool.meta" }, @@ -87,7 +92,10 @@ const std::vector chunkserverConf5{ { " -chunkServerIp=127.0.0.1" }, { " -chunkServerPort=22126" }, { " -enableChunkfilepool=false" }, - { " -mdsListenAddr=127.0.0.1:22122,127.0.0.1:22123,127.0.0.1:22124" } + { " -mdsListenAddr=127.0.0.1:22122,127.0.0.1:22123,127.0.0.1:22124" }, + { " -enableWalfilepool=false" }, + { " -walFilePoolDir=./moduleException5/walfilepool/" }, + { " -walFilePoolMetaPath=./moduleException5/walfilepool.meta" } }; const std::vector chunkserverConf6{ @@ -95,6 +103,7 @@ const std::vector chunkserverConf6{ { " -chunkServerMetaUri=local://./moduleException6/chunkserver.dat" }, { " -copySetUri=local://./moduleException6/copysets" }, { " -raftSnapshotUri=curve://./moduleException6/copysets" }, + { " -raftLogUri=curve://./moduleException6/copysets" }, { " -recycleUri=local://./moduleException6/recycler" }, { " -chunkFilePoolDir=./moduleException6/chunkfilepool/" }, { " -chunkFilePoolMetaPath=./moduleException6/chunkfilepool.meta" }, @@ -104,7 +113,10 @@ const std::vector chunkserverConf6{ { " -chunkServerIp=127.0.0.1" }, { " -chunkServerPort=22127" }, { " -enableChunkfilepool=false" }, - { " -mdsListenAddr=127.0.0.1:22122,127.0.0.1:22123,127.0.0.1:22124" } + { " -mdsListenAddr=127.0.0.1:22122,127.0.0.1:22123,127.0.0.1:22124" }, + { " -enableWalfilepool=false" }, + { " -walFilePoolDir=./moduleException6/walfilepool/" }, + { " -walFilePoolMetaPath=./moduleException6/walfilepool.meta" } }; std::string mdsaddr = // NOLINT diff --git a/test/integration/client/mds_exception_test.cpp b/test/integration/client/mds_exception_test.cpp index 40fc2a4920..df5faa90b9 100644 --- a/test/integration/client/mds_exception_test.cpp +++ b/test/integration/client/mds_exception_test.cpp @@ -61,6 +61,7 @@ const std::vector chunkserverConf1{ { " -chunkServerMetaUri=local://./moduleException1/chunkserver.dat" }, { " -copySetUri=local://./moduleException1/copysets" }, { " -raftSnapshotUri=curve://./moduleException1/copysets" }, + { " -raftLogUri=curve://./moduleException1/copysets" }, { " -recycleUri=local://./moduleException1/recycler" }, { " -chunkFilePoolDir=./moduleException1/chunkfilepool/" }, { " -chunkFilePoolMetaPath=./moduleException1/chunkfilepool.meta" }, @@ -71,7 +72,10 @@ const std::vector chunkserverConf1{ { " -chunkServerIp=127.0.0.1" }, { " -chunkServerPort=22225" }, { " -enableChunkfilepool=false" }, - { " -mdsListenAddr=127.0.0.1:22222,127.0.0.1:22223,127.0.0.1:22224" } + { " -mdsListenAddr=127.0.0.1:22222,127.0.0.1:22223,127.0.0.1:22224" }, + { " -enableWalfilepool=false" }, + { " -walFilePoolDir=./moduleException1/walfilepool/" }, + { " -walFilePoolMetaPath=./moduleException1/walfilepool.meta" } }; const std::vector chunkserverConf2{ @@ -79,6 +83,7 @@ const std::vector chunkserverConf2{ { " -chunkServerMetaUri=local://./moduleException2/chunkserver.dat" }, { " -copySetUri=local://./moduleException2/copysets" }, { " -raftSnapshotUri=curve://./moduleException2/copysets" }, + { " -raftLogUri=curve://./moduleException2/copysets" }, { " -recycleUri=local://./moduleException2/recycler" }, { " -chunkFilePoolDir=./moduleException2/chunkfilepool/" }, { " -chunkFilePoolMetaPath=./moduleException2/chunkfilepool.meta" }, @@ -89,7 +94,10 @@ const std::vector chunkserverConf2{ { " -chunkServerIp=127.0.0.1" }, { " -chunkServerPort=22226" }, { " -enableChunkfilepool=false" }, - { " -mdsListenAddr=127.0.0.1:22222,127.0.0.1:22223,127.0.0.1:22224" } + { " -mdsListenAddr=127.0.0.1:22222,127.0.0.1:22223,127.0.0.1:22224" }, + { " -enableWalfilepool=false" }, + { " -walFilePoolDir=./moduleException2/walfilepool/" }, + { " -walFilePoolMetaPath=./moduleException2/walfilepool.meta" } }; const std::vector chunkserverConf3{ @@ -97,6 +105,7 @@ const std::vector chunkserverConf3{ { " -chunkServerMetaUri=local://./moduleException3/chunkserver.dat" }, { " -copySetUri=local://./moduleException3/copysets" }, { " -raftSnapshotUri=curve://./moduleException3/copysets" }, + { " -raftLogUri=curve://./moduleException3/copysets" }, { " -recycleUri=local://./moduleException3/recycler" }, { " -chunkFilePoolDir=./moduleException3/chunkfilepool/" }, { " -chunkFilePoolMetaPath=./moduleException3/chunkfilepool.meta" }, @@ -107,7 +116,10 @@ const std::vector chunkserverConf3{ { " -chunkServerIp=127.0.0.1" }, { " -chunkServerPort=22227" }, { " -enableChunkfilepool=false" }, - { " -mdsListenAddr=127.0.0.1:22222,127.0.0.1:22223,127.0.0.1:22224" } + { " -mdsListenAddr=127.0.0.1:22222,127.0.0.1:22223,127.0.0.1:22224" }, + { " -enableWalfilepool=false" }, + { " -walFilePoolDir=./moduleException3/walfilepool/" }, + { " -walFilePoolMetaPath=./moduleException3/walfilepool.meta" } }; std::string mdsaddr = // NOLINT diff --git a/test/integration/client/unstable_chunkserver_exception_test.cpp b/test/integration/client/unstable_chunkserver_exception_test.cpp index bd9b8204fd..e40729e556 100644 --- a/test/integration/client/unstable_chunkserver_exception_test.cpp +++ b/test/integration/client/unstable_chunkserver_exception_test.cpp @@ -61,7 +61,8 @@ curve::client::PerSecondMetric iops("test", "iops"); std::atomic running{ false }; const std::vector chunkserverConfigOpts{ - "chunkfilepool.enable_get_chunk_from_pool=false" + "chunkfilepool.enable_get_chunk_from_pool=false", + "walfilepool.enable_get_segment_from_pool=false" }; const std::vector mdsConfigOpts{ @@ -92,9 +93,12 @@ const std::vector chunkserverConfTemplate{ { " -chunkServerMetaUri=local://./ttt/%d/chunkserver.dat" }, { " -copySetUri=local://./ttt/%d/copysets" }, { " -raftSnapshotUri=curve://./ttt/%d/copysets" }, + { " -raftLogUri=curve://./ttt/%d/copysets" }, { " -recycleUri=local://./ttt/%d/recycler" }, - { " -chunkFilePoolDir=./ttt/%d/" }, + { " -chunkFilePoolDir=./ttt/%d/chunkfilepool/" }, { " -chunkFilePoolMetaPath=./ttt/%d/chunkfilepool.meta" }, + { " -walFilePoolDir=./ttt/%d/walfilepool/" }, + { " -walFilePoolMetaPath=./ttt/%d/walfilepool.meta" }, { " -mdsListenAddr=127.0.0.1:30010,127.0.0.1:30011,127.0.0.1:30012" }, { " -log_dir=./runlog/cs_%d" }, { " --stderrthreshold=3" } @@ -120,7 +124,11 @@ std::vector GenChunkserverConf(int port) { conf[6] = formatter(chunkserverConfTemplate[6], port); conf[7] = formatter(chunkserverConfTemplate[7], port); conf[8] = formatter(chunkserverConfTemplate[8], port); + conf[9] = formatter(chunkserverConfTemplate[9], port); conf[10] = formatter(chunkserverConfTemplate[10], port); + conf[11] = formatter(chunkserverConfTemplate[11], port); + conf[12] = formatter(chunkserverConfTemplate[12], port); + conf[14] = formatter(chunkserverConfTemplate[14], port); std::string rmcmd = "rm -rf ./runlog/cs_" + std::to_string(port); std::string mkcmd = "mkdir -p ./runlog/cs_" + std::to_string(port); diff --git a/test/integration/cluster_common/cluster.cpp b/test/integration/cluster_common/cluster.cpp index 954189cef5..ad05844e6e 100644 --- a/test/integration/cluster_common/cluster.cpp +++ b/test/integration/cluster_common/cluster.cpp @@ -432,23 +432,23 @@ int CurveCluster::StopAllEtcd() { return ret; } -int CurveCluster::FormatChunkFilePool(const std::string &chunkfilepooldir, - const std::string &chunkfilepoolmetapath, - const std::string &filesystempath, - uint32_t size) { - LOG(INFO) << "FormatChunkFilePool begin..."; +int CurveCluster::FormatFilePool(const std::string &filePooldir, + const std::string &filePoolmetapath, + const std::string &filesystempath, + uint32_t size) { + LOG(INFO) << "FormatFilePool begin..."; std::string cmd = std::string("./bazel-bin/src/tools/curve_format") + - " -chunkfilepool_dir=" + chunkfilepooldir + - " -chunkfilepool_metapath=" + chunkfilepoolmetapath + - " -filesystem_path=" + filesystempath + - " -allocateByPercent=false -preallocateNum=" + + " -filePoolDir=" + filePooldir + + " -filePoolMetaPath=" + filePoolmetapath + + " -fileSystemPath=" + filesystempath + + " -allocateByPercent=false -preAllocateNum=" + std::to_string(size * 64) + - " -needWriteZero=false"; // 1G = 64 chunk + " -needWriteZero=false"; RETURN_IF_NOT_ZERO(system(cmd.c_str())); - LOG(INFO) << "FormatChunkFilePool end."; + LOG(INFO) << "FormatFilePool end."; return 0; } diff --git a/test/integration/cluster_common/cluster.h b/test/integration/cluster_common/cluster.h index 151aa36af6..e85bbdefa5 100644 --- a/test/integration/cluster_common/cluster.h +++ b/test/integration/cluster_common/cluster.h @@ -214,17 +214,17 @@ class CurveCluster { int StopAllEtcd(); /** - * @brief 格式化chunkfilepool + * @brief 格式化FilePool * - * @param chunkfilepooldir chunkfilepool目录 - * @param chunkfilepoolmetapath chunkfilepool元数据目录 - * @param filesystem_path 文件系统目录 - * @param size chunkfilepool size (GB) + * @param filePooldir FilePool目录 + * @param filePoolmetapath FilePool元数据目录 + * @param filesystemPath 文件系统目录 + * @param size FilePool size (GB) * @return 成功返回0,失败返回-1 */ - int FormatChunkFilePool(const std::string &chunkfilepooldir, - const std::string &chunkfilepoolmetapath, - const std::string &filesystem_path, uint32_t size); + int FormatFilePool(const std::string &filePooldir, + const std::string &filePoolmetapath, + const std::string &filesystemPath, uint32_t size); /** * StartSingleChunkServer 启动一个chunkserver节点 diff --git a/test/integration/cluster_common/cluster_basic_test.cpp b/test/integration/cluster_common/cluster_basic_test.cpp index 20be4b8787..3c1e27e5d8 100644 --- a/test/integration/cluster_common/cluster_basic_test.cpp +++ b/test/integration/cluster_common/cluster_basic_test.cpp @@ -42,12 +42,16 @@ const std::vector chunkserverConf1{ { " -chunkServerMetaUri=local://./basic1/chunkserver.dat" }, { " -copySetUri=local://./basic1/copysets" }, { " -raftSnapshotUri=curve://./basic1/copysets" }, + { " -raftLogUri=curve://./basic1/copysets" }, { " -recycleUri=local://./basic1/recycler" }, { " -chunkFilePoolDir=./basic1/chunkfilepool/" }, { " -chunkFilePoolMetaPath=./basic1/chunkfilepool.meta" }, { " -conf=./conf/chunkserver.conf.example" }, { " -raft_sync_segments=true" }, - { " -enableChunkfilepool=false" } + { " -enableChunkfilepool=false" }, + { " -enableWalfilepool=false" }, + { " -walFilePoolDir=./basic1/walfilepool/" }, + { " -walFilePoolMetaPath=./basic1/walfilepool.meta" } }; const std::vector chunkserverConf2{ @@ -56,12 +60,16 @@ const std::vector chunkserverConf2{ { " -chunkServerMetaUri=local://./basic2/chunkserver.dat" }, { " -copySetUri=local://./basic2/copysets" }, { " -raftSnapshotUri=curve://./basic2/copysets" }, + { " -raftLogUri=curve://./basic2/copysets" }, { " -recycleUri=local://./basic2/recycler" }, { " -chunkFilePoolDir=./basic2/chunkfilepool/" }, { " -chunkFilePoolMetaPath=./basic2/chunkfilepool.meta" }, { " -conf=./conf/chunkserver.conf.example" }, { " -raft_sync_segments=true" }, - { " -enableChunkfilepool=false" } + { " -enableChunkfilepool=false" }, + { " -enableWalfilepool=false" }, + { " -walFilePoolDir=./basic2/walfilepool/" }, + { " -walFilePoolMetaPath=./basic2/walfilepool.meta" } }; const std::vector chunkserverConf3{ @@ -70,12 +78,16 @@ const std::vector chunkserverConf3{ { " -chunkServerMetaUri=local://./basic3/chunkserver.dat" }, { " -copySetUri=local://./basic3/copysets" }, { " -raftSnapshotUri=curve://./basic3/copysets" }, + { " -raftLogUri=curve://./basic3/copysets" }, { " -recycleUri=local://./basic3/recycler" }, { " -chunkFilePoolDir=./basic3/chunkfilepool/" }, { " -chunkFilePoolMetaPath=./basic3/chunkfilepool.meta" }, { " -conf=./conf/chunkserver.conf.example" }, { " -raft_sync_segments=true" }, - { " -enableChunkfilepool=false" } + { " -enableChunkfilepool=false" }, + { " -enableWalfilepool=false" }, + { " -walFilePoolDir=./basic3/walfilepool/" }, + { " -walFilePoolMetaPath=./basic3/walfilepool.meta" } }; class ClusterBasicTest : public ::testing::Test { diff --git a/test/integration/common/config_generator.h b/test/integration/common/config_generator.h index 6b2ad7fec4..d330177792 100644 --- a/test/integration/common/config_generator.h +++ b/test/integration/common/config_generator.h @@ -65,6 +65,11 @@ class CSTConfigGenerator : public ConfigGenerator { SetKV("chunkfilepool.meta_path", cfpoolMetaPath); SetKV("chunkfilepool.enable_get_chunk_from_pool", "false"); + std::string walPoolDir = "./" + port + "/walfilepool/"; + std::string walPoolMetaPath = "./" + port + "/walfilepool.meta"; + SetKV("walfilepool.file_pool_dir", walPoolDir); + SetKV("walfilepool.meta_path", walPoolMetaPath); + SetKV("walfilepool.enable_get_segment_from_pool", "false"); SetKV("chunkserver.common.logDir", DEFAULT_LOG_DIR); diff --git a/test/integration/common/peer_cluster.h b/test/integration/common/peer_cluster.h index 1b8d301416..83053882a7 100644 --- a/test/integration/common/peer_cluster.h +++ b/test/integration/common/peer_cluster.h @@ -35,7 +35,7 @@ #include #include -#include "src/chunkserver/datastore/chunkfile_pool.h" +#include "src/chunkserver/datastore/file_pool.h" #include "include/chunkserver/chunkserver_common.h" #include "src/fs/local_filesystem.h" #include "src/chunkserver/copyset_node.h" diff --git a/test/integration/raft/raft_config_change_test.cpp b/test/integration/raft/raft_config_change_test.cpp index 7cfff65b1a..b24546061e 100644 --- a/test/integration/raft/raft_config_change_test.cpp +++ b/test/integration/raft/raft_config_change_test.cpp @@ -40,7 +40,7 @@ using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; using curve::fs::FileSystemType; -static char* raftConfigParam[5][13] = { +static char* raftConfigParam[5][16] = { { "chunkserver", "-chunkServerIp=127.0.0.1", @@ -49,9 +49,12 @@ static char* raftConfigParam[5][13] = { "-chunkServerMetaUri=local://./9081/chunkserver.dat", "-copySetUri=local://./9081/copysets", "-raftSnapshotUri=curve://./9081/copysets", + "-raftLogUri=curve://./9081/copysets", "-recycleUri=local://./9081/recycler", "-chunkFilePoolDir=./9081/chunkfilepool/", "-chunkFilePoolMetaPath=./9081/chunkfilepool.meta", + "-walFilePoolDir=./9081/walfilepool/", + "-walFilePoolMetaPath=./9081/walfilepool.meta", "-conf=./9081/chunkserver.conf", "-raft_sync_segments=true", NULL @@ -64,9 +67,12 @@ static char* raftConfigParam[5][13] = { "-chunkServerMetaUri=local://./9082/chunkserver.dat", "-copySetUri=local://./9082/copysets", "-raftSnapshotUri=curve://./9082/copysets", + "-raftLogUri=curve://./9082/copysets", "-recycleUri=local://./9082/recycler", "-chunkFilePoolDir=./9082/chunkfilepool/", "-chunkFilePoolMetaPath=./9082/chunkfilepool.meta", + "-walFilePoolDir=./9082/walfilepool/", + "-walFilePoolMetaPath=./9082/walfilepool.meta", "-conf=./9082/chunkserver.conf", "-raft_sync_segments=true", NULL @@ -79,9 +85,12 @@ static char* raftConfigParam[5][13] = { "-chunkServerMetaUri=local://./9083/chunkserver.dat", "-copySetUri=local://./9083/copysets", "-raftSnapshotUri=curve://./9083/copysets", + "-raftLogUri=curve://./9083/copysets", "-recycleUri=local://./9083/recycler", "-chunkFilePoolDir=./9083/chunkfilepool/", "-chunkFilePoolMetaPath=./9083/chunkfilepool.meta", + "-walFilePoolDir=./9083/walfilepool/", + "-walFilePoolMetaPath=./9083/walfilepool.meta", "-conf=./9083/chunkserver.conf", "-raft_sync_segments=true", NULL @@ -94,9 +103,12 @@ static char* raftConfigParam[5][13] = { "-chunkServerMetaUri=local://./9084/chunkserver.dat", "-copySetUri=local://./9084/copysets", "-raftSnapshotUri=curve://./9084/copysets", + "-raftLogUri=curve://./9084/copysets", "-recycleUri=local://./9084/recycler", "-chunkFilePoolDir=./9084/chunkfilepool/", "-chunkFilePoolMetaPath=./9084/chunkfilepool.meta", + "-walFilePoolDir=./9084/walfilepool/", + "-walFilePoolMetaPath=./9084/walfilepool.meta", "-conf=./9084/chunkserver.conf", "-raft_sync_segments=true", NULL @@ -109,9 +121,12 @@ static char* raftConfigParam[5][13] = { "-chunkServerMetaUri=local://./9085/chunkserver.dat", "-copySetUri=local://./9085/copysets", "-raftSnapshotUri=curve://./9085/copysets", + "-raftLogUri=curve://./9085/copysets", "-recycleUri=local://./9085/recycler", "-chunkFilePoolDir=./9085/chunkfilepool/", "-chunkFilePoolMetaPath=./9085/chunkfilepool.meta", + "-walFilePoolDir=./9085/walfilepool/", + "-walFilePoolMetaPath=./9085/walfilepool.meta", "-conf=./9085/chunkserver.conf", "-raft_sync_segments=true", NULL diff --git a/test/integration/raft/raft_log_replication_test.cpp b/test/integration/raft/raft_log_replication_test.cpp index 21e6dfac74..8053fea3e4 100644 --- a/test/integration/raft/raft_log_replication_test.cpp +++ b/test/integration/raft/raft_log_replication_test.cpp @@ -41,7 +41,7 @@ using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; using curve::fs::FileSystemType; -static char* raftLogParam[5][13] = { +static char* raftLogParam[5][16] = { { "chunkserver", "-chunkServerIp=127.0.0.1", @@ -50,9 +50,12 @@ static char* raftLogParam[5][13] = { "-chunkServerMetaUri=local://./9071/chunkserver.dat", "-copySetUri=local://./9071/copysets", "-raftSnapshotUri=curve://./9071/copysets", + "-raftLogUri=curve://./9071/copysets", "-recycleUri=local://./9071/recycler", "-chunkFilePoolDir=./9071/chunkfilepool/", "-chunkFilePoolMetaPath=./9071/chunkfilepool.meta", + "-walFilePoolDir=./9071/walfilepool/", + "-walFilePoolMetaPath=./9071/walfilepool.meta", "-conf=./9071/chunkserver.conf", "-raft_sync_segments=true", NULL @@ -65,9 +68,12 @@ static char* raftLogParam[5][13] = { "-chunkServerMetaUri=local://./9072/chunkserver.dat", "-copySetUri=local://./9072/copysets", "-raftSnapshotUri=curve://./9072/copysets", + "-raftLogUri=curve://./9072/copysets", "-recycleUri=local://./9072/recycler", "-chunkFilePoolDir=./9072/chunkfilepool/", "-chunkFilePoolMetaPath=./9072/chunkfilepool.meta", + "-walFilePoolDir=./9072/walfilepool/", + "-walFilePoolMetaPath=./9072/walfilepool.meta", "-conf=./9072/chunkserver.conf", "-raft_sync_segments=true", NULL @@ -80,9 +86,12 @@ static char* raftLogParam[5][13] = { "-chunkServerMetaUri=local://./9073/chunkserver.dat", "-copySetUri=local://./9073/copysets", "-raftSnapshotUri=curve://./9073/copysets", + "-raftLogUri=curve://./9073/copysets", "-recycleUri=local://./9073/recycler", "-chunkFilePoolDir=./9073/chunkfilepool/", "-chunkFilePoolMetaPath=./9073/chunkfilepool.meta", + "-walFilePoolDir=./9073/walfilepool/", + "-walFilePoolMetaPath=./9073/walfilepool.meta", "-conf=./9073/chunkserver.conf", "-raft_sync_segments=true", NULL @@ -95,9 +104,12 @@ static char* raftLogParam[5][13] = { "-chunkServerMetaUri=local://./9074/chunkserver.dat", "-copySetUri=local://./9074/copysets", "-raftSnapshotUri=curve://./9074/copysets", + "-raftLogUri=curve://./9074/copysets", "-recycleUri=local://./9074/recycler", "-chunkFilePoolDir=./9074/chunkfilepool/", "-chunkFilePoolMetaPath=./9074/chunkfilepool.meta", + "-walFilePoolDir=./9074/walfilepool/", + "-walFilePoolMetaPath=./9074/walfilepool.meta", "-conf=./9074/chunkserver.conf", "-raft_sync_segments=true", NULL @@ -110,9 +122,12 @@ static char* raftLogParam[5][13] = { "-chunkServerMetaUri=local://./9075/chunkserver.dat", "-copySetUri=local://./9075/copysets", "-raftSnapshotUri=curve://./9075/copysets", + "-raftLogUri=curve://./9075/copysets", "-recycleUri=local://./9075/recycler", "-chunkFilePoolDir=./9075/chunkfilepool/", "-chunkFilePoolMetaPath=./9075/chunkfilepool.meta", + "-walFilePoolDir=./9075/walfilepool/", + "-walFilePoolMetaPath=./9075/walfilepool.meta", "-conf=./9075/chunkserver.conf", "-raft_sync_segments=true", NULL diff --git a/test/integration/raft/raft_snapshot_test.cpp b/test/integration/raft/raft_snapshot_test.cpp index 7e3a3bbd52..175240c29d 100644 --- a/test/integration/raft/raft_snapshot_test.cpp +++ b/test/integration/raft/raft_snapshot_test.cpp @@ -40,7 +40,7 @@ using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; using curve::fs::FileSystemType; -static char *raftVoteParam[4][13] = { +static char *raftVoteParam[4][16] = { { "chunkserver", "-chunkServerIp=127.0.0.1", @@ -54,6 +54,9 @@ static char *raftVoteParam[4][13] = { "-chunkFilePoolMetaPath=./9321/chunkfilepool.meta", "-conf=./9321/chunkserver.conf", "-raft_sync_segments=true", + "-raftLogUri=curve://./9321/copysets", + "-walFilePoolDir=./9321/walfilepool/", + "-walFilePoolMetaPath=./9321/walfilepool.meta", NULL }, { @@ -69,6 +72,9 @@ static char *raftVoteParam[4][13] = { "-chunkFilePoolMetaPath=./9322/chunkfilepool.meta", "-conf=./9322/chunkserver.conf", "-raft_sync_segments=true", + "-raftLogUri=curve://./9322/copysets", + "-walFilePoolDir=./9322/walfilepool/", + "-walFilePoolMetaPath=./9322/walfilepool.meta", NULL }, { @@ -84,6 +90,9 @@ static char *raftVoteParam[4][13] = { "-chunkFilePoolMetaPath=./9323/chunkfilepool.meta", "-conf=./9323/chunkserver.conf", "-raft_sync_segments=true", + "-raftLogUri=curve://./9323/copysets", + "-walFilePoolDir=./9323/walfilepool/", + "-walFilePoolMetaPath=./9323/walfilepool.meta", NULL }, { @@ -99,6 +108,9 @@ static char *raftVoteParam[4][13] = { "-chunkFilePoolMetaPath=./9324/chunkfilepool.meta", "-conf=./9324/chunkserver.conf", "-raft_sync_segments=true", + "-raftLogUri=curve://./9324/copysets", + "-walFilePoolDir=./9324/walfilepool/", + "-walFilePoolMetaPath=./9324/walfilepool.meta", NULL }, }; diff --git a/test/integration/raft/raft_vote_test.cpp b/test/integration/raft/raft_vote_test.cpp index 3b85856366..57f3240572 100644 --- a/test/integration/raft/raft_vote_test.cpp +++ b/test/integration/raft/raft_vote_test.cpp @@ -40,7 +40,7 @@ using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; using curve::fs::FileSystemType; -static char* raftVoteParam[3][13] = { +static char* raftVoteParam[3][16] = { { "chunkserver", "-chunkServerIp=127.0.0.1", @@ -54,6 +54,9 @@ static char* raftVoteParam[3][13] = { "-chunkFilePoolMetaPath=./9091/chunkfilepool.meta", "-conf=./9091/chunkserver.conf", "-raft_sync_segments=true", + "-raftLogUri=curve://./9091/copysets", + "-walFilePoolDir=./9091/walfilepool/", + "-walFilePoolMetaPath=./9091/walfilepool.meta", NULL }, { @@ -69,6 +72,9 @@ static char* raftVoteParam[3][13] = { "-chunkFilePoolMetaPath=./9092/chunkfilepool.meta", "-conf=./9092/chunkserver.conf", "-raft_sync_segments=true", + "-raftLogUri=curve://./9092/copysets", + "-walFilePoolDir=./9092/walfilepool/", + "-walFilePoolMetaPath=./9092/walfilepool.meta", NULL }, { @@ -84,6 +90,9 @@ static char* raftVoteParam[3][13] = { "-chunkFilePoolMetaPath=./9093/chunkfilepool.meta", "-conf=./9093/chunkserver.conf", "-raft_sync_segments=true", + "-raftLogUri=curve://./9093/copysets", + "-walFilePoolDir=./9093/walfilepool/", + "-walFilePoolMetaPath=./9093/walfilepool.meta", NULL }, }; diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_common_test.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_common_test.cpp index 70ce5e79cd..c1f19939d9 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_common_test.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_common_test.cpp @@ -105,6 +105,7 @@ const std::vector chunkserverConfigOptions{ std::string("mds.listen.addr=") + kMdsIpPort, std::string("curve.config_path=") + kCsClientConfigPath, std::string("s3.config_path=") + kS3ConfigPath, + "walfilepool.enable_get_segment_from_pool=false" }; const std::vector csClientConfigOptions{ @@ -132,6 +133,10 @@ const std::vector chunkserverConf1{ { " -raft_sync_segments=true" }, std::string(" --log_dir=") + kLogPath, { " --stderrthreshold=3" }, + { " -raftLogUri=curve://./" + kTestPrefix + "1/copysets" }, + { " -walFilePoolDir=./" + kTestPrefix + "1/walfilepool/" }, + { " -walFilePoolMetaPath=./" + kTestPrefix + + "1/walfilepool.meta" }, }; const std::vector chunkserverConf2{ @@ -149,6 +154,10 @@ const std::vector chunkserverConf2{ { " -raft_sync_segments=true" }, std::string(" --log_dir=") + kLogPath, { " --stderrthreshold=3" }, + { " -raftLogUri=curve://./" + kTestPrefix + "2/copysets" }, + { " -walFilePoolDir=./" + kTestPrefix + "2/walfilepool/" }, + { " -walFilePoolMetaPath=./" + kTestPrefix + + "2/walfilepool.meta" }, }; const std::vector chunkserverConf3{ @@ -166,6 +175,10 @@ const std::vector chunkserverConf3{ { " -raft_sync_segments=true" }, std::string(" --log_dir=") + kLogPath, { " --stderrthreshold=3" }, + { " -raftLogUri=curve://./" + kTestPrefix + "3/copysets" }, + { " -walFilePoolDir=./" + kTestPrefix + "3/walfilepool/" }, + { " -walFilePoolMetaPath=./" + kTestPrefix + + "3/walfilepool.meta" }, }; const std::vector snapshotcloneserverConfigOptions{ @@ -245,25 +258,24 @@ class SnapshotCloneServerTest : public ::testing::Test { "./test/integration/snapshotcloneserver/" "config/topo.json")); // NOLINT - // 格式化chunkfilepool + // format chunkfilepool and walfilepool std::vector threadpool(3); threadpool[0] = - std::thread(&CurveCluster::FormatChunkFilePool, cluster_, + std::thread(&CurveCluster::FormatFilePool, cluster_, "./" + kTestPrefix + "1/chunkfilepool/", "./" + kTestPrefix + "1/chunkfilepool.meta", - "./" + kTestPrefix + "1/", 1); + "./" + kTestPrefix + "1/chunkfilepool/", 1); threadpool[1] = - std::thread(&CurveCluster::FormatChunkFilePool, cluster_, + std::thread(&CurveCluster::FormatFilePool, cluster_, "./" + kTestPrefix + "2/chunkfilepool/", "./" + kTestPrefix + "2/chunkfilepool.meta", - "./" + kTestPrefix + "2/", 1); + "./" + kTestPrefix + "2/chunkfilepool/", 1); threadpool[2] = - std::thread(&CurveCluster::FormatChunkFilePool, cluster_, + std::thread(&CurveCluster::FormatFilePool, cluster_, "./" + kTestPrefix + "3/chunkfilepool/", "./" + kTestPrefix + "3/chunkfilepool.meta", - "./" + kTestPrefix + "3/", 1); - + "./" + kTestPrefix + "3/chunkfilepool/", 1); for (int i = 0; i < 3; i++) { threadpool[i].join(); } diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_concurrent_test.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_concurrent_test.cpp index 13162e1730..a998a92538 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_concurrent_test.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_concurrent_test.cpp @@ -104,6 +104,7 @@ const std::vector chunkserverConfigOptions{ std::string("mds.listen.addr=") + kMdsIpPort, std::string("curve.config_path=") + kCsClientConfigPath, std::string("s3.config_path=") + kS3ConfigPath, + "walfilepool.enable_get_segment_from_pool=false" }; const std::vector csClientConfigOptions{ @@ -131,6 +132,10 @@ const std::vector chunkserverConf1{ { " -raft_sync_segments=true" }, std::string(" --log_dir=") + kLogPath, { " --stderrthreshold=3" }, + { " -raftLogUri=curve://./" + kTestPrefix + "1/copysets" }, + { " -walFilePoolDir=./" + kTestPrefix + "1/walfilepool/" }, + { " -walFilePoolMetaPath=./" + kTestPrefix + + "1/walfilepool.meta" }, }; const std::vector chunkserverConf2{ @@ -148,6 +153,10 @@ const std::vector chunkserverConf2{ { " -raft_sync_segments=true" }, std::string(" --log_dir=") + kLogPath, { " --stderrthreshold=3" }, + { " -raftLogUri=curve://./" + kTestPrefix + "2/copysets" }, + { " -walFilePoolDir=./" + kTestPrefix + "2/walfilepool/" }, + { " -walFilePoolMetaPath=./" + kTestPrefix + + "2/walfilepool.meta" }, }; const std::vector chunkserverConf3{ @@ -165,6 +174,10 @@ const std::vector chunkserverConf3{ { " -raft_sync_segments=true" }, std::string(" --log_dir=") + kLogPath, { " --stderrthreshold=3" }, + { " -raftLogUri=curve://./" + kTestPrefix + "3/copysets" }, + { " -walFilePoolDir=./" + kTestPrefix + "3/walfilepool/" }, + { " -walFilePoolMetaPath=./" + kTestPrefix + + "3/walfilepool.meta" }, }; const std::vector snapshotcloneserverConfigOptions{ @@ -245,25 +258,24 @@ class SnapshotCloneServerTest : public ::testing::Test { "./test/integration/snapshotcloneserver/" "config/topo2.json")); - // 格式化chunkfilepool + // format chunkfilepool and walfilepool std::vector threadpool(3); threadpool[0] = - std::thread(&CurveCluster::FormatChunkFilePool, cluster_, + std::thread(&CurveCluster::FormatFilePool, cluster_, "./" + kTestPrefix + "1/chunkfilepool/", "./" + kTestPrefix + "1/chunkfilepool.meta", - "./" + kTestPrefix + "1/", 1); + "./" + kTestPrefix + "1/chunkfilepool/", 1); threadpool[1] = - std::thread(&CurveCluster::FormatChunkFilePool, cluster_, + std::thread(&CurveCluster::FormatFilePool, cluster_, "./" + kTestPrefix + "2/chunkfilepool/", "./" + kTestPrefix + "2/chunkfilepool.meta", - "./" + kTestPrefix + "2/", 1); + "./" + kTestPrefix + "2/chunkfilepool/", 1); threadpool[2] = - std::thread(&CurveCluster::FormatChunkFilePool, cluster_, + std::thread(&CurveCluster::FormatFilePool, cluster_, "./" + kTestPrefix + "3/chunkfilepool/", "./" + kTestPrefix + "3/chunkfilepool.meta", - "./" + kTestPrefix + "3/", 1); - + "./" + kTestPrefix + "3/chunkfilepool/", 1); for (int i = 0; i < 3; i++) { threadpool[i].join(); } diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_recover_test.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_recover_test.cpp index f101bd35f0..182943b9c7 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_recover_test.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_recover_test.cpp @@ -130,6 +130,7 @@ const std::vector chunkserverConfigOptions{ std::string("s3.config_path=") + kS3ConfigPath, std::string("curve.root_username") + mdsRootUser_, std::string("curve.root_password") + mdsRootPassword_, + "walfilepool.enable_get_segment_from_pool=false" }; const std::vector csClientConfigOptions{ @@ -157,6 +158,10 @@ const std::vector chunkserverConf1{ { " -raft_sync_segments=true" }, std::string(" --log_dir=") + kLogPath, { " --stderrthreshold=3" }, + { " -raftLogUri=curve://./" + kTestPrefix + "1/copysets" }, + { " -walFilePoolDir=./" + kTestPrefix + "1/walfilepool/" }, + { " -walFilePoolMetaPath=./" + kTestPrefix + + "1/walfilepool.meta" }, }; const std::vector chunkserverConf2{ @@ -174,6 +179,10 @@ const std::vector chunkserverConf2{ { " -raft_sync_segments=true" }, std::string(" --log_dir=") + kLogPath, { " --stderrthreshold=3" }, + { " -raftLogUri=curve://./" + kTestPrefix + "2/copysets" }, + { " -walFilePoolDir=./" + kTestPrefix + "2/walfilepool/" }, + { " -walFilePoolMetaPath=./" + kTestPrefix + + "2/walfilepool.meta" }, }; const std::vector chunkserverConf3{ @@ -191,6 +200,10 @@ const std::vector chunkserverConf3{ { " -raft_sync_segments=true" }, std::string(" --log_dir=") + kLogPath, { " --stderrthreshold=3" }, + { " -raftLogUri=curve://./" + kTestPrefix + "3/copysets" }, + { " -walFilePoolDir=./" + kTestPrefix + "3/walfilepool/" }, + { " -walFilePoolMetaPath=./" + kTestPrefix + + "3/walfilepool.meta" }, }; const std::vector snapshotcloneserverConfigOptions{ @@ -269,25 +282,24 @@ class SnapshotCloneServerTest : public ::testing::Test { "./test/integration/snapshotcloneserver/" "config/topo3.json")); // NOLINT - // 格式化chunkfilepool + // format chunkfilepool and walfilepool std::vector threadpool(3); threadpool[0] = - std::thread(&CurveCluster::FormatChunkFilePool, cluster_, + std::thread(&CurveCluster::FormatFilePool, cluster_, "./" + kTestPrefix + "1/chunkfilepool/", "./" + kTestPrefix + "1/chunkfilepool.meta", - "./" + kTestPrefix + "1/", 2); + "./" + kTestPrefix + "1/chunkfilepool/", 2); threadpool[1] = - std::thread(&CurveCluster::FormatChunkFilePool, cluster_, + std::thread(&CurveCluster::FormatFilePool, cluster_, "./" + kTestPrefix + "2/chunkfilepool/", "./" + kTestPrefix + "2/chunkfilepool.meta", - "./" + kTestPrefix + "2/", 2); + "./" + kTestPrefix + "2/chunkfilepool/", 2); threadpool[2] = - std::thread(&CurveCluster::FormatChunkFilePool, cluster_, + std::thread(&CurveCluster::FormatFilePool, cluster_, "./" + kTestPrefix + "3/chunkfilepool/", "./" + kTestPrefix + "3/chunkfilepool.meta", - "./" + kTestPrefix + "3/", 2); - + "./" + kTestPrefix + "3/chunkfilepool/", 2); for (int i = 0; i < 3; i++) { threadpool[i].join(); } diff --git a/test/tools/status_tool_test.cpp b/test/tools/status_tool_test.cpp index ec8ff0d5dd..2389977f40 100644 --- a/test/tools/status_tool_test.cpp +++ b/test/tools/status_tool_test.cpp @@ -533,7 +533,7 @@ TEST_F(StatusToolTest, StatusCmdCommon) { .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), Return(0))); EXPECT_CALL(*metricClient_, GetMetricUint(_, _, _)) - .Times(3) + .Times(6) .WillRepeatedly(DoAll(SetArgPointee<2>(1000), Return(MetricRet::kOK))); EXPECT_CALL(*copysetCheck_, CheckChunkServerOnline(_))