Skip to content

Commit

Permalink
[feat]tools-v2:fix comflict
Browse files Browse the repository at this point in the history
Signed-off-by: victorseptember <[email protected]>
  • Loading branch information
victorseptember committed Oct 26, 2023
2 parents dc021a1 + aff0ebf commit 5dc1cbe
Show file tree
Hide file tree
Showing 283 changed files with 23,217 additions and 3,390 deletions.
1 change: 1 addition & 0 deletions .clang-format
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ BasedOnStyle: Google
AccessModifierOffset: -3
DerivePointerAlignment: false
IndentWidth: 4
AlignAfterOpenBracket: DontAlign
---
Language: Proto
BasedOnStyle: Google
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/clang-format.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ jobs:
# fetch everything to be able to compare with any ref
fetch-depth: 0

- name: Check
- name: Check
env:
LANG: "C.UTF-8"
LC_ALL: "C.UTF-8"
Expand Down
8 changes: 8 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -169,3 +169,11 @@ test/integration/snapshotcloneserver/config/*.conf

*.deb
*.whl

*.class
curvefs/sdk/java/target/
curvefs/sdk/java/native/build
curvefs/sdk/libcurvefs/examples/bin
curvefs/sdk/output/
hadoop-test/
nnbench-test/
4 changes: 2 additions & 2 deletions .obm.cfg
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
container_name: curve-build-playground-master
container_image: opencurvedocker/curve-base:build-debian9
container_name: curve-build-playground.master
container_image: opencurvedocker/curve-base:build-debian11
36 changes: 32 additions & 4 deletions Makefile
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Copyright (C) 2021 Jingli Chen (Wine93), NetEase Inc.

.PHONY: list build dep install image playground check test docker
.PHONY: list build dep ci-list ci-build ci-dep install image playground check test docker format

stor?=""
prefix?= "$(PWD)/projects"
Expand All @@ -13,6 +13,14 @@ os?= "debian11"
ci?=0
commit_id="HEAD^"
define help_msg
## build curvebs
make build stor=bs dep=1
make dep stor=bs && make build stor=bs

## build curvefs
make build stor=fs dep=1
make dep stor=fs && make build stor=fs

## list
Usage:
make list stor=bs/fs
Expand All @@ -25,17 +33,21 @@ Usage:
Examples:
make build stor=bs only=//src/chunkserver:chunkserver
make build stor=bs only=src/*,test/* dep=0
make build stor=fs only=test/* os=debian9
make build stor=fs only=test/* os=debian11

make build stor=fs release=1
Note:
Extra build options can be specified using BUILD_OPTS environment variable, which will be passed to bazel build command.

## dep
## configure dependency(before build)
Usage:
make dep stor=bs/fs
Examples:
make dep stor=bs

## ci-list/build/dep
## use the same way above, but in the container

## install
Usage:
Expand All @@ -50,7 +62,7 @@ Examples:
Usage:
make image stor=bs/fs tag=TAG os=OS
Examples:
make image stor=bs tag=opencurvedocker/curvebs:v1.2 os=debian9
make image stor=bs tag=opencurvedocker/curvebs:v1.2 os=debian11


## package
Expand All @@ -59,6 +71,12 @@ Usage:
Examples:
make deb
make tar release=1 dep=1 os=debian11


## playground
## create/run a container, changes outside will be mapped into the container
Usage/Example:
make playground
endef
export help_msg

Expand All @@ -74,6 +92,9 @@ build:
dep:
@bash util/build.sh --stor=$(stor) --only="" --dep=1

ci-list:
@bash util/build_in_image.sh --stor=$(stor) --list

ci-build:
@bash util/build_in_image.sh --stor=$(stor) --only=$(only) --dep=$(dep) --release=$(release) --ci=$(ci) --os=$(os) --sanitizer=$(sanitizer)

Expand All @@ -90,7 +111,7 @@ tar deb:
@RELEASE=$(release) DEP=$(dep) OS=$(os) bash util/package.sh $@

playground:
@bash util/playground.sh
@bash util/playground.sh --version=$(version)

check:
@bash util/check.sh $(stor)
Expand All @@ -100,5 +121,12 @@ test:

docker:
@bash util/docker.sh --os=$(os) --ci=$(ci)

format:
@bash util/format.sh $(commit_id)

init-hadoop:
@bash util/init-hadoop.sh

sdk:
@bash util/sdk.sh
25 changes: 23 additions & 2 deletions WORKSPACE
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ git_repository(
patches = [
"//:thirdparties/brpc/brpc.patch",
"//:thirdparties/brpc/fix-gcc11.patch",
"//:thirdparties/brpc/0001-bvar-warning-on-conflict-bvar-name.patch",
"//:thirdparties/brpc/0001-bvar-warning-on-conflict-bvar-name.patch",
],
patch_args = ["-p1"],
)
Expand Down Expand Up @@ -192,7 +192,6 @@ new_local_repository(
path = "thirdparties/memcache/libmemcached-1.1.2",
)


http_archive(
name = "aws",
urls = ["https://github.com/aws/aws-sdk-cpp/archive/1.7.340.tar.gz"],
Expand Down Expand Up @@ -259,6 +258,21 @@ http_archive(
build_file = "//:thirdparties/spdlog.BUILD",
)

# incbin
new_git_repository(
name = "incbin",
remote = "https://github.com/graphitemaster/incbin.git",
commit = "6e576cae5ab5810f25e2631f2e0b80cbe7dc8cbf",
build_file = "//:thirdparties/incbin.BUILD",
)

# config
new_local_repository(
name = "config",
build_file = "//:thirdparties/config.BUILD",
path = "thirdparties/config",
)

# Bazel platform rules.
http_archive(
name = "platforms",
Expand All @@ -274,6 +288,13 @@ new_local_repository(
path = "thirdparties/rocksdb",
)

# jni
new_local_repository(
name = "jni",
build_file = "//:thirdparties/jni.BUILD",
path = "thirdparties",
)

# Hedron's Compile Commands Extractor for Bazel
# https://github.com/hedronvision/bazel-compile-commands-extractor
http_archive(
Expand Down
18 changes: 17 additions & 1 deletion conf/chunkserver.conf
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ rconcurrentapply.queuedepth=1
# 是否开启从chunkfilepool获取chunk,一般是true
chunkfilepool.enable_get_chunk_from_pool=true
# chunkfilepool目录
chunkfilepool.chunk_file_pool_dir=./0/ # __CURVEADM_TEMPLATE__ ${prefix}/data __CURVEADM_TEMPLATE__
chunkfilepool.chunk_file_pool_dir=./0/chunks # __CURVEADM_TEMPLATE__ ${prefix}/data __CURVEADM_TEMPLATE__
# chunkfilepool meta文件路径
chunkfilepool.meta_path=./chunkfilepool.meta # __CURVEADM_TEMPLATE__ ${prefix}/data/chunkfilepool.meta __CURVEADM_TEMPLATE__
# chunkfilepool meta文件大小
Expand All @@ -207,6 +207,14 @@ chunkfilepool.clean.enable=true
chunkfilepool.clean.bytes_per_write=4096
# The throttle iops for cleaning chunk (4KB/IO)
chunkfilepool.clean.throttle_iops=500
# Whether allocate filePool by percent of disk size.
chunkfilepool.allocated_by_percent=true
# Preallocate storage percent of total disk
chunkfilepool.allocate_percent=80
# Preallocate storage size of chunkfilepool (None/KB/MB/GB/TB)
chunkfilepool.chunk_file_pool_size=1GB
# The thread num for format chunks
chunkfilepool.thread_num=1

#
# WAL file pool
Expand All @@ -229,6 +237,14 @@ walfilepool.metapage_size=4096
walfilepool.meta_file_size=4096
# WAL filepool get chunk最大重试次数
walfilepool.retry_times=5
# Whether allocate filePool by percent of disk size.
walfilepool.allocated_by_percent=true
# Preallocate storage percent of total disk
walfilepool.allocate_percent=90
# Preallocate storage size size of walfilepool (None/KB/MB/GB/TB)
walfilepool.wal_file_pool_size=0
# The thread num for format chunks
walfilepool.thread_num=1

#
# trash settings
Expand Down
18 changes: 17 additions & 1 deletion conf/chunkserver.conf.example
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ rconcurrentapply.queuedepth=1
# 是否开启从chunkfilepool获取chunk,一般是true
chunkfilepool.enable_get_chunk_from_pool=true
# chunkfilepool目录
chunkfilepool.chunk_file_pool_dir=./0/
chunkfilepool.chunk_file_pool_dir=./0/chunks
# chunkfilepool meta文件路径
#chunkfilepool.meta_path=./chunkfilepool.meta
# chunkfilepool meta文件大小
Expand All @@ -199,6 +199,14 @@ chunkfilepool.clean.enable=true
chunkfilepool.clean.bytes_per_write=4096
# The throttle iops for cleaning chunk (4KB/IO)
chunkfilepool.clean.throttle_iops=500
# Whether allocate filePool by percent of disk size.
chunkfilepool.allocated_by_percent=true
# Preallocate storage percent of total disk
chunkfilepool.allocate_percent=80
# Preallocate storage size of chunkfilepool (None/KB/MB/GB/TB)
chunkfilepool.chunk_file_pool_size=1GB
# The thread num for format chunks
chunkfilepool.thread_num=1

#
# WAL file pool
Expand All @@ -221,6 +229,14 @@ walfilepool.metapage_size=4096
walfilepool.meta_file_size=4096
# WAL filepool get chunk最大重试次数
walfilepool.retry_times=5
# Whether allocate filePool by percent of disk size.
walfilepool.allocated_by_percent=true
# Preallocate storage percent of total disk
walfilepool.allocate_percent=10
# Preallocate storage size size of walfilepool (None/KB/MB/GB/TB)
walfilepool.wal_file_pool_size=0
# The thread num for format chunks
walfilepool.thread_num=1

#
# trash settings
Expand Down
33 changes: 26 additions & 7 deletions curvefs/conf/client.conf
Original file line number Diff line number Diff line change
Expand Up @@ -92,8 +92,6 @@ fuseClient.enableMultiMountPointRename=true
fuseClient.enableSplice=false
# thread number of listDentry when get summary xattr
fuseClient.listDentryThreads=10
# disable xattr on one mountpoint can fast 'ls -l'
fuseClient.disableXattr=false
# default data(s3ChunkInfo/volumeExtent) size in inode, if exceed will eliminate and try to get the merged one
fuseClient.maxDataSize=1024
# default refresh data interval 30s
Expand Down Expand Up @@ -128,17 +126,26 @@ fuseClient.throttle.burstReadIops=0
# the times that read burst Iops can continue, default 180s
fuseClient.throttle.burstReadIopsSecs=180

#### vfs (virtual filesystem)
#{
vfs.userPermission.uid=0
vfs.userPermission.gids=0
vfs.userPermission.umask=0022
vfs.entryCache.lruSize=2000000
vfs.attrCache.lruSize=2000000
#}

#### filesystem metadata
# {
# fs.disableXattr:
# fs.disableXAttr:
# if you want to get better metadata performance,
# you can mount fs with |fs.disableXattr| is true
# you can mount fs with |fs.disableXAttr| is true
#
# fs.lookupCache.negativeTimeoutSec:
# entry which not found will be cached if |timeout| > 0
fs.cto=true
fs.maxNameLength=255
fs.disableXattr=false
fs.disableXAttr=true
fs.accessLogging=true
fs.kernelCache.attrTimeoutSec=3600
fs.kernelCache.dirAttrTimeoutSec=3600
Expand Down Expand Up @@ -204,6 +211,18 @@ s3.writeCacheMaxByte=838860800
s3.readCacheMaxByte=209715200
# file cache read thread num
s3.readCacheThreads=5

# The data in the cache cluster download to local
s3.memClusterToLocal=true
# The data in the s3 storage download to local
s3.s3ToLocal=true
# read size bigger than this value will read until prefetch is finished
s3.bigIoSize=131072
# retry times when read big io failed
s3.bigIoRetryTimes=100
# retry interval when read big io failed
s3.bigIoRetryIntervalUs=100

# http = 0, https = 1
s3.http_scheme=0
s3.verify_SSL=False
Expand All @@ -217,7 +236,7 @@ s3.logLevel=4
s3.logPrefix=/data/logs/curvefs/aws_ # __CURVEADM_TEMPLATE__ /curvefs/client/logs/aws_ __CURVEADM_TEMPLATE__
s3.asyncThreadNum=500
# limit all inflight async requests' bytes, |0| means not limited
s3.maxAsyncRequestInflightBytes=104857600
s3.maxAsyncRequestInflightBytes=1073741824
s3.chunkFlushThreads=5
# throttle
s3.throttle.iopsTotalLimit=0
Expand Down Expand Up @@ -247,7 +266,7 @@ diskCache.asyncLoadPeriodMs=5
# ok nearfull full
# |------------|-------------------|----------------------|
# 0 trimRatio*safeRatio safeRatio fullRatio
#
#
# 1. 0<=ok<trimRatio*safeRatio;
# 2. trimRatio*safeRatio<=nearfull<safeRatio
# 3. safeRatio<=full<=fullRatio
Expand Down
Loading

0 comments on commit 5dc1cbe

Please sign in to comment.