diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 0a872ccaf35..e673e6dc229 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1 +1 @@ -See [How to contribute](https://github.com/vesoft-inc/nebula-community/blob/master/Contributors/how-to-contribute.md) for details about how to contribute to **Nebula Graph**. +See [How to contribute](https://github.com/vesoft-inc/nebula-community/blob/master/Contributors/how-to-contribute.md) for details about how to contribute to **NebulaGraph**. diff --git a/conf/tuned/README.md b/conf/tuned/README.md index cada719b08e..dfcc0bc2ffc 100644 --- a/conf/tuned/README.md +++ b/conf/tuned/README.md @@ -1,6 +1,6 @@ # Summary -These are tuned profile to configure the system to optimize for the Nebula Graph service. +These are tuned profile to configure the system to optimize for the NebulaGraph service. Follow below steps to utilize: * Install the tuned service if absent, and enable it with `systemctl`. diff --git a/conf/tuned/nebula/tuned.conf b/conf/tuned/nebula/tuned.conf index e9e34e9080c..8951652964a 100644 --- a/conf/tuned/nebula/tuned.conf +++ b/conf/tuned/nebula/tuned.conf @@ -1,5 +1,5 @@ [main] -summary=Optimize for Nebula Graph DBMS +summary=Optimize for NebulaGraph DBMS include=latency-performance diff --git a/docker/README.md b/docker/README.md index 1e626b9b725..6cd7391c46e 100644 --- a/docker/README.md +++ b/docker/README.md @@ -1,4 +1,4 @@ -# Docker Files for Nebula Graph Services +# Docker Files for NebulaGraph Services Following docker images will be ready in production. diff --git a/package/package.sh b/package/package.sh index e96e35f662c..086eeed03ec 100755 --- a/package/package.sh +++ b/package/package.sh @@ -149,11 +149,11 @@ function _build_graph { ${project_dir} if ! ( make -j ${jobs} ); then - echo ">>> build nebula graph failed <<<" + echo ">>> build NebulaGraph failed <<<" exit 1 fi popd - echo ">>> build nebula graph successfully <<<" + echo ">>> build NebulaGraph successfully <<<" } # args: @@ -179,7 +179,7 @@ function package { [[ $strip_enable == TRUE ]] && args="-D CPACK_STRIP_FILES=TRUE -D CPACK_RPM_SPEC_MORE_DEFINE=" if ! ( cpack --verbose $args ); then - echo ">>> package nebula failed <<<" + echo ">>> package NebulaGraph failed <<<" exit 1 else # rename package file diff --git a/resources/README.md b/resources/README.md index 7b2c47ba251..8c308375281 100644 --- a/resources/README.md +++ b/resources/README.md @@ -1,3 +1,3 @@ # Overview -This directory holds docs and resources shipped with the **Nebula Graph** package. +This directory holds docs and resources shipped with the **NebulaGraph** package. diff --git a/scripts/README.md b/scripts/README.md index 6b733aeb268..902cb2979bb 100644 --- a/scripts/README.md +++ b/scripts/README.md @@ -1,6 +1,6 @@ # Overview -This document handles **Nebula Graph** installation, and all the operations are done on single hosts. +This document handles **NebulaGraph** installation, and all the operations are done on single hosts. Firstly, you need to create the config files for the three daemons in the `conf` directory based on their `*.default` counterparts. diff --git a/src/daemons/MetaDaemonInit.cpp b/src/daemons/MetaDaemonInit.cpp index 25261014255..ef587ec6bda 100644 --- a/src/daemons/MetaDaemonInit.cpp +++ b/src/daemons/MetaDaemonInit.cpp @@ -93,7 +93,7 @@ std::unique_ptr initKV(std::vector p auto engineRet = kvstore->part(nebula::kDefaultSpaceId, nebula::kDefaultPartId); if (!nebula::ok(engineRet)) { - LOG(ERROR) << "Get nebula store engine failed"; + LOG(ERROR) << "Get Nebula store engine failed"; return nullptr; } diff --git a/src/daemons/StorageDaemon.cpp b/src/daemons/StorageDaemon.cpp index 16e2d55e6ec..5269d7d678d 100644 --- a/src/daemons/StorageDaemon.cpp +++ b/src/daemons/StorageDaemon.cpp @@ -24,7 +24,7 @@ DEFINE_string(data_path, "For rocksdb engine, one path one instance."); DEFINE_string(wal_path, "", - "Nebula wal path. By default, wal will be stored as a sibling of " + "NebulaGraph wal path. By default, wal will be stored as a sibling of " "rocksdb data."); DEFINE_string(listener_path, "", diff --git a/src/graph/service/GraphFlags.cpp b/src/graph/service/GraphFlags.cpp index be60913a10e..5ea5fbc1ad3 100644 --- a/src/graph/service/GraphFlags.cpp +++ b/src/graph/service/GraphFlags.cpp @@ -7,10 +7,10 @@ #include "version/Version.h" -DEFINE_int32(port, 3699, "Nebula Graph daemon's listen port"); +DEFINE_int32(port, 3699, "Graph service daemon's listen port"); DEFINE_int32(client_idle_timeout_secs, 28800, - "The number of seconds Nebula service waits before closing the idle connections"); + "The number of seconds NebulaGraph service waits before closing the idle connections"); DEFINE_int32(session_idle_timeout_secs, 28800, "The number of seconds before idle sessions expire"); DEFINE_int32(session_reclaim_interval_secs, 10, "Period we try to reclaim expired sessions"); DEFINE_int32(num_netio_threads, diff --git a/src/parser/test/fuzzing/README.md b/src/parser/test/fuzzing/README.md index 6a985bc8f1b..49105e29f7e 100644 --- a/src/parser/test/fuzzing/README.md +++ b/src/parser/test/fuzzing/README.md @@ -1,4 +1,4 @@ -# Nebula Graph Database fuzz testing +# NebulaGraph Database fuzz testing Nebula-graph uses [libfuzzer](http://llvm.org/docs/LibFuzzer.html) for fuzz test. If you want to use fuzz test, then you need to use the [Clang](https://clang.llvm.org/) to compile Nebula. diff --git a/src/storage/exec/GetPropNode.h b/src/storage/exec/GetPropNode.h index 7e79d3479df..a8b226f2dcc 100644 --- a/src/storage/exec/GetPropNode.h +++ b/src/storage/exec/GetPropNode.h @@ -94,8 +94,13 @@ class GetTagPropNode : public QueryNode { return ret; } } - if (filter_ == nullptr || (QueryUtils::vTrue(filter_->eval(*expCtx_)))) { + if (filter_ == nullptr) { resultDataSet_->rows.emplace_back(std::move(row)); + } else { + auto result = QueryUtils::vTrue(filter_->eval(*expCtx_)); + if (result.ok() && result.value()) { + resultDataSet_->rows.emplace_back(std::move(row)); + } } if (expCtx_ != nullptr) { expCtx_->clear(); @@ -172,8 +177,13 @@ class GetEdgePropNode : public QueryNode { return ret; } } - if (filter_ == nullptr || (QueryUtils::vTrue(filter_->eval(*expCtx_)))) { + if (filter_ == nullptr) { resultDataSet_->rows.emplace_back(std::move(row)); + } else { + auto result = QueryUtils::vTrue(filter_->eval(*expCtx_)); + if (result.ok() && result.value()) { + resultDataSet_->rows.emplace_back(std::move(row)); + } } if (expCtx_ != nullptr) { expCtx_->clear(); diff --git a/src/storage/exec/QueryUtils.h b/src/storage/exec/QueryUtils.h index 4fb86627b12..4a3c1499727 100644 --- a/src/storage/exec/QueryUtils.h +++ b/src/storage/exec/QueryUtils.h @@ -18,8 +18,16 @@ namespace storage { class QueryUtils final { public: - static inline bool vTrue(const Value& v) { - return v.isBool() && v.getBool(); + // The behavior keep same with filter executor + static inline StatusOr vTrue(const Value& val) { + if (val.isBadNull() || (!val.empty() && !val.isBool() && !val.isNull())) { + return Status::Error("Wrong type result, the type should be NULL, EMPTY or BOOL"); + } + if (val.empty() || val.isNull() || !val.getBool()) { + return false; + } else { + return true; + } } enum class ReturnColType : uint16_t { diff --git a/src/storage/exec/ScanNode.h b/src/storage/exec/ScanNode.h index c5c5a9d997e..6c58080c0f2 100644 --- a/src/storage/exec/ScanNode.h +++ b/src/storage/exec/ScanNode.h @@ -171,9 +171,15 @@ class ScanVertexPropNode : public QueryNode { break; } } - if (ret == nebula::cpp2::ErrorCode::SUCCEEDED && - (filter_ == nullptr || QueryUtils::vTrue(filter_->eval(*expCtx_)))) { - resultDataSet_->rows.emplace_back(std::move(row)); + if (ret == nebula::cpp2::ErrorCode::SUCCEEDED) { + if (filter_ == nullptr) { + resultDataSet_->rows.emplace_back(std::move(row)); + } else { + auto result = QueryUtils::vTrue(filter_->eval(*expCtx_)); + if (result.ok() && result.value()) { + resultDataSet_->rows.emplace_back(std::move(row)); + } + } } expCtx_->clear(); for (auto& tagNode : tagNodes_) { @@ -323,9 +329,15 @@ class ScanEdgePropNode : public QueryNode { break; } } - if (ret == nebula::cpp2::ErrorCode::SUCCEEDED && - (filter_ == nullptr || QueryUtils::vTrue(filter_->eval(*expCtx_)))) { - resultDataSet_->rows.emplace_back(std::move(row)); + if (ret == nebula::cpp2::ErrorCode::SUCCEEDED) { + if (filter_ == nullptr) { + resultDataSet_->rows.emplace_back(std::move(row)); + } else { + auto result = QueryUtils::vTrue(filter_->eval(*expCtx_)); + if (result.ok() && result.value()) { + resultDataSet_->rows.emplace_back(std::move(row)); + } + } } expCtx_->clear(); for (auto& edgeNode : edgeNodes_) { diff --git a/src/storage/query/GetNeighborsProcessor.cpp b/src/storage/query/GetNeighborsProcessor.cpp index c978697c468..97d34f85dec 100644 --- a/src/storage/query/GetNeighborsProcessor.cpp +++ b/src/storage/query/GetNeighborsProcessor.cpp @@ -130,8 +130,13 @@ void GetNeighborsProcessor::runInMultipleThread(const cpp2::GetNeighborsRequest& folly::collectAll(futures).via(executor_).thenTry([this](auto&& t) mutable { CHECK(!t.hasException()); const auto& tries = t.value(); + size_t sum = 0; for (size_t j = 0; j < tries.size(); j++) { CHECK(!tries[j].hasException()); + sum += results_[j].size(); + } + resultDataSet_.rows.reserve(sum); + for (size_t j = 0; j < tries.size(); j++) { const auto& [code, partId] = tries[j].value(); if (code != nebula::cpp2::ErrorCode::SUCCEEDED) { handleErrorCode(code, spaceId_, partId); diff --git a/src/storage/query/GetPropProcessor.cpp b/src/storage/query/GetPropProcessor.cpp index f4c2412a073..c20030002e2 100644 --- a/src/storage/query/GetPropProcessor.cpp +++ b/src/storage/query/GetPropProcessor.cpp @@ -129,8 +129,13 @@ void GetPropProcessor::runInMultipleThread(const cpp2::GetPropRequest& req) { folly::collectAll(futures).via(executor_).thenTry([this](auto&& t) mutable { CHECK(!t.hasException()); const auto& tries = t.value(); + size_t sum = 0; for (size_t j = 0; j < tries.size(); j++) { CHECK(!tries[j].hasException()); + sum += results_[j].size(); + } + resultDataSet_.rows.reserve(sum); + for (size_t j = 0; j < tries.size(); j++) { const auto& [code, partId] = tries[j].value(); if (code != nebula::cpp2::ErrorCode::SUCCEEDED) { handleErrorCode(code, spaceId_, partId); diff --git a/src/storage/query/ScanEdgeProcessor.cpp b/src/storage/query/ScanEdgeProcessor.cpp index 3e95eccdce6..780f0fbaf27 100644 --- a/src/storage/query/ScanEdgeProcessor.cpp +++ b/src/storage/query/ScanEdgeProcessor.cpp @@ -179,8 +179,13 @@ void ScanEdgeProcessor::runInMultipleThread(const cpp2::ScanEdgeRequest& req) { folly::collectAll(futures).via(executor_).thenTry([this](auto&& t) mutable { CHECK(!t.hasException()); const auto& tries = t.value(); + size_t sum = 0; for (size_t j = 0; j < tries.size(); j++) { CHECK(!tries[j].hasException()); + sum += results_[j].size(); + } + resultDataSet_.rows.reserve(sum); + for (size_t j = 0; j < tries.size(); j++) { const auto& [code, partId] = tries[j].value(); if (code != nebula::cpp2::ErrorCode::SUCCEEDED) { handleErrorCode(code, spaceId_, partId); diff --git a/src/storage/query/ScanVertexProcessor.cpp b/src/storage/query/ScanVertexProcessor.cpp index 6ebdf7f2134..2c1b611f8f2 100644 --- a/src/storage/query/ScanVertexProcessor.cpp +++ b/src/storage/query/ScanVertexProcessor.cpp @@ -184,8 +184,13 @@ void ScanVertexProcessor::runInMultipleThread(const cpp2::ScanVertexRequest& req folly::collectAll(futures).via(executor_).thenTry([this](auto&& t) mutable { CHECK(!t.hasException()); const auto& tries = t.value(); + size_t sum = 0; for (size_t j = 0; j < tries.size(); j++) { CHECK(!tries[j].hasException()); + sum += results_[j].size(); + } + resultDataSet_.rows.reserve(sum); + for (size_t j = 0; j < tries.size(); j++) { const auto& [code, partId] = tries[j].value(); if (code != nebula::cpp2::ErrorCode::SUCCEEDED) { handleErrorCode(code, spaceId_, partId); diff --git a/tests/README.md b/tests/README.md index a9c0e045632..7dfb484b761 100644 --- a/tests/README.md +++ b/tests/README.md @@ -1,4 +1,4 @@ -# Nebula Graph Test Manual +# NebulaGraph Test Manual ## Usage @@ -35,7 +35,7 @@ $ make BUILD_DIR=/path/to/nebula/build/directory up ### Run all test cases -There are two classes of nebula graph test cases, one is built on pytest and another is built on TCK. We split them into different execution methods: +There are two classes of NebulaGraph test cases, one is built on pytest and another is built on TCK. We split them into different execution methods: ```shell $ make test # run pytest cases diff --git a/tests/conftest.py b/tests/conftest.py index da80af10008..3bb15a16f3b 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -58,11 +58,11 @@ def pytest_addoption(parser): parser.addoption("--build_dir", dest="build_dir", default=BUILD_DIR, - help="Nebula Graph CMake build directory") + help="NebulaGraph CMake build directory") parser.addoption("--src_dir", dest="src_dir", default=NEBULA_HOME, - help="Nebula Graph workspace") + help="NebulaGraph workspace") def pytest_bdd_step_error(request, feature, scenario, step, step_func, step_func_args): diff --git a/tests/nebula-test-run.py b/tests/nebula-test-run.py index 6703fa6b0d3..d1d9dd2854c 100755 --- a/tests/nebula-test-run.py +++ b/tests/nebula-test-run.py @@ -30,7 +30,7 @@ def init_parser(): '--build_dir', dest='build_dir', default=BUILD_DIR, - help='Build directory of nebula graph', + help='Build directory of NebulaGraph', ) opt_parser.add_option( '--rm_dir', @@ -39,10 +39,10 @@ def init_parser(): help='Whether to remove the test folder', ) opt_parser.add_option( - '--user', dest='user', default='root', help='nebula graph user' + '--user', dest='user', default='root', help='NebulaGraph user' ) opt_parser.add_option( - '--password', dest='password', default='nebula', help='nebula graph password' + '--password', dest='password', default='nebula', help='NebulaGraph password' ) opt_parser.add_option('--cmd', dest='cmd', default='', help='start or stop command') opt_parser.add_option( @@ -114,7 +114,7 @@ def opt_is(val, expect): def start_nebula(nb, configs): if configs.address is not None and configs.address != "": - print('test remote nebula graph, address is {}'.format(configs.address)) + print('test remote NebulaGraph, address is {}'.format(configs.address)) if len(configs.address.split(':')) != 2: raise Exception('Invalid address, address is {}'.format(configs.address)) address, port = configs.address.split(':') @@ -161,7 +161,7 @@ def start_nebula(nb, configs): def start_standalone(nb, configs): if configs.address is not None and configs.address != "": - print('test remote nebula graph, address is {}'.format(configs.address)) + print('test remote NebulaGraph, address is {}'.format(configs.address)) if len(configs.address.split(':')) != 2: raise Exception('Invalid address, address is {}'.format(configs.address)) address, port = configs.address.split(':') @@ -209,7 +209,7 @@ def start_standalone(nb, configs): def stop_nebula(nb, configs=None): if configs.address is not None and configs.address != "": - print('test remote nebula graph, no need to stop nebula.') + print('test remote NebulaGraph, no need to stop nebula.') return with open(NB_TMP_PATH, "r") as f: @@ -235,7 +235,7 @@ def stop_nebula(nb, configs=None): graphd_inst = 2 is_standalone = False - # Setup nebula graph service + # Setup NebulaGraph service nebula_svc = NebulaService( configs.build_dir, NEBULA_HOME,