diff --git a/.github/actions/tagname-action/action.yml b/.github/actions/tagname-action/action.yml index 81c8b1011eb..8132c96f470 100644 --- a/.github/actions/tagname-action/action.yml +++ b/.github/actions/tagname-action/action.yml @@ -13,7 +13,9 @@ runs: - id: tag run: | tag=$(echo ${{ github.ref }} | rev | cut -d/ -f1 | rev) - tagnum=$(echo $tag |sed 's/^v//') + tagnum=$(echo $tag | sed 's/^v//') + majorver=$(echo $tag | cut -d '.' -f 1) echo "::set-output name=tag::$tag" echo "::set-output name=tagnum::$tagnum" + echo "::set-output name=majorver::$majorver" shell: bash diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index cdcd55fd93d..0bb60d43d0f 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -39,7 +39,7 @@ jobs: echo "::set-output name=subdir::$subdir" - uses: actions/upload-artifact@v1 with: - name: ${{ matrix.os }}-v2-nightly + name: ${{ matrix.os }}-nightly path: pkg-build/cpack_output - uses: ./.github/actions/upload-to-oss-action with: diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 54ce3508195..9f9f2d4c40b 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -94,6 +94,7 @@ jobs: -DCMAKE_C_COMPILER=$TOOLSET_CLANG_DIR/bin/gcc \ -DCMAKE_BUILD_TYPE=Debug \ -DENABLE_TESTING=on \ + -DENABLE_COVERAGE=on \ -B build echo "::set-output name=j::10" echo "::set-output name=t::10" @@ -126,7 +127,24 @@ jobs: timeout-minutes: 20 - name: Setup cluster run: | - make up + case ${{ matrix.compiler }} in + gcc-*) + case ${{ matrix.os }} in + centos7) + # normal cluster + make up + ;; + ubuntu2004) + # ssl cluster + make ENABLE_SSL=true CA_SIGNED=true up + ;; + esac + ;; + clang-*) + # graph ssl only cluster + make ENABLE_SSL=false ENABLE_GRAPH_SSL=true up + ;; + esac working-directory: tests/ timeout-minutes: 2 - name: Pytest @@ -144,6 +162,11 @@ jobs: make RM_DIR=false down working-directory: tests/ timeout-minutes: 2 + - name: coverage + if: ${{ matrix.compiler == 'gcc-9.2' && matrix.os == 'ubuntu2004' }} + run: | + ~/.local/bin/fastcov -d build -l -o fastcov.info -p --exclude /usr/include --exclude=/opt/vesoft --exclude scanner.lex + bash <(curl -s https://codecov.io/bash) -Z -f fastcov.info - name: Sanitizer if: ${{ always() }} run: | diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 0456901f7dd..962e76d8330 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -62,6 +62,14 @@ jobs: - uses: actions/checkout@v2 - uses: ./.github/actions/tagname-action id: tagname + - id: docker + run: | + majorver=$(git tag -l --sort=v:refname | tail -n1 | cut -f1 -d'.') + tag="" + if [[ $majorver == ${{ steps.tagname.outputs.majorver }} ]]; then + tag="vesoft/nebula-${{ matrix.service }}:latest" + fi + echo "::set-output name=tag::$tag" - uses: docker/setup-buildx-action@v1 - uses: docker/login-action@v1 with: @@ -71,7 +79,10 @@ jobs: with: context: . file: ./docker/Dockerfile.${{ matrix.service }} - tags: vesoft/nebula-${{ matrix.service }}:${{ steps.tagname.outputs.tag }},vesoft/nebula-${{ matrix.service }}:latest + tags: | + vesoft/nebula-${{ matrix.service }}:${{ steps.tagname.outputs.tag }} + vesoft/nebula-${{ matrix.service }}:${{ steps.tagname.outputs.majorver }} + ${{ steps.docker.outputs.tag }} push: true build-args: | BRANCH=${{ steps.tagname.outputs.tag }} diff --git a/.gitignore b/.gitignore index c9920bfafd5..1a2cf4c78f1 100644 --- a/.gitignore +++ b/.gitignore @@ -52,6 +52,7 @@ cmake-build*/ core.* workspace.* .metals/ +.cproject #py *.egg-info diff --git a/README-CN.md b/README-CN.md index a86cf921a5c..691ae03e97e 100644 --- a/README-CN.md +++ b/README-CN.md @@ -1,5 +1,5 @@

- +
中文 | English
世界上唯一能够容纳千亿个顶点和万亿条边,并提供毫秒级查询延时的图数据库解决方案

@@ -34,7 +34,7 @@ ## 发布通告 -v1.x和v2.5.0之后的版本,Nebula Graph在这个repo管理。如需获取v2.0.0到v2.5.0之间的版本,请访问[Nebula Graph repo](https://github.com/vesoft-inc/nebula-graph)。 +v1.x和v2.5.1之后的版本,Nebula Graph在这个repo管理。如需获取v2.0.0到v2.5.1之间的版本,请访问[Nebula Graph repo](https://github.com/vesoft-inc/nebula-graph)。 Nebula Graph 1.x 后续不再进行功能的更新,请升级到2.0版本中。Nebula Graph内核 1.x 与 2.x数据格式、通信协议、客户端等均双向不兼容,可参照[升级指导](https://docs.nebula-graph.com.cn/2.5.0/4.deployment-and-installation/3.upgrade-nebula-graph/upgrade-nebula-graph-to-250/)进行升级。 diff --git a/README.md b/README.md index 1f9f13e5824..ea5c8e4f23e 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@

- +
English | 中文
A distributed, scalable, lightning-fast graph database

@@ -31,7 +31,7 @@ Compared with other graph database solutions, **Nebula Graph** has the following ## Notice of Release -This repository hosts the source code of Nebula Graph versions before 2.0.0-alpha and after v2.5.0. If you are looking to use the versions between v2.0.0 and v2.5.0, please head to [Nebula Graph repo](https://github.com/vesoft-inc/nebula-graph). +This repository hosts the source code of Nebula Graph versions before 2.0.0-alpha and after v2.5.1. If you are looking to use the versions between v2.0.0 and v2.5.1, please head to [Nebula Graph repo](https://github.com/vesoft-inc/nebula-graph). Nebula Graph 1.x is not actively maintained. Please move to Nebula Graph 2.x. The data format, rpc protocols, clients, etc. are not compatible between Nebula Graph v1.x and v2.x, but we do offer [upgrade guide from 1.x to v2.5.0](https://docs.nebula-graph.io/2.5.0/4.deployment-and-installation/3.upgrade-nebula-graph/upgrade-nebula-graph-to-250/). diff --git a/cmake/nebula/GeneralCompilerConfig.cmake b/cmake/nebula/GeneralCompilerConfig.cmake index 3512c1c0d3b..ca9f70c4e6c 100644 --- a/cmake/nebula/GeneralCompilerConfig.cmake +++ b/cmake/nebula/GeneralCompilerConfig.cmake @@ -51,6 +51,8 @@ if(ENABLE_TESTING AND ENABLE_COVERAGE) add_compile_options(--coverage) add_compile_options(-g) add_compile_options(-O0) + nebula_add_exe_linker_flag(-coverage) + nebula_add_exe_linker_flag(-lgcov) endif() # TODO(doodle) Add option suggest-override for gnu diff --git a/conf/nebula-storaged.conf.default b/conf/nebula-storaged.conf.default index 79c1e8cd48b..4aead99ba67 100644 --- a/conf/nebula-storaged.conf.default +++ b/conf/nebula-storaged.conf.default @@ -90,8 +90,10 @@ # * kAll, Collect all stats --rocksdb_stats_level=kExceptHistogramOrTimers -# Whether or not to enable rocksdb's prefix bloom filter, disabled by default. ---enable_rocksdb_prefix_filtering=false +# Whether or not to enable rocksdb's prefix bloom filter, enabled by default. +--enable_rocksdb_prefix_filtering=true +# Whether or not to enable rocksdb's whole key bloom filter, disabled by default. +--enable_rocksdb_whole_key_filtering=false ############## rocksdb Options ############## # rocksdb DBOptions in json, each name and value of option is a string, given as "option_name":"option_value" separated by comma diff --git a/conf/nebula-storaged.conf.production b/conf/nebula-storaged.conf.production index 0ba755fc189..8789ebdd08a 100644 --- a/conf/nebula-storaged.conf.production +++ b/conf/nebula-storaged.conf.production @@ -96,8 +96,10 @@ # * kAll, Collect all stats --rocksdb_stats_level=kExceptHistogramOrTimers -# Whether or not to enable rocksdb's prefix bloom filter, disabled by default. ---enable_rocksdb_prefix_filtering=false +# Whether or not to enable rocksdb's prefix bloom filter, enabled by default. +--enable_rocksdb_prefix_filtering=true +# Whether or not to enable rocksdb's whole key bloom filter, disabled by default. +--enable_rocksdb_whole_key_filtering=false ############### misc #################### --snapshot_part_rate_limit=10485760 diff --git a/src/clients/meta/MetaClient.cpp b/src/clients/meta/MetaClient.cpp index 87e276e5409..0f73cbad02a 100644 --- a/src/clients/meta/MetaClient.cpp +++ b/src/clients/meta/MetaClient.cpp @@ -19,6 +19,7 @@ #include "common/http/HttpClient.h" #include "common/meta/NebulaSchemaProvider.h" #include "common/network/NetworkUtils.h" +#include "common/ssl/SSLConfig.h" #include "common/stats/StatsManager.h" #include "common/time/TimeUtils.h" #include "version/Version.h" @@ -49,7 +50,8 @@ MetaClient::MetaClient(std::shared_ptr ioThreadPool CHECK(ioThreadPool_ != nullptr) << "IOThreadPool is required"; CHECK(!addrs_.empty()) << "No meta server address is specified or can be solved. Meta server is required"; - clientsMan_ = std::make_shared>(); + clientsMan_ = std::make_shared>( + FLAGS_enable_ssl || FLAGS_enable_meta_ssl); updateActive(); updateLeader(); bgThread_ = std::make_unique(); @@ -798,6 +800,8 @@ Status MetaClient::handleResponse(const RESP& resp) { return Status::Error("Failed to get meta dir!"); case nebula::cpp2::ErrorCode::E_INVALID_JOB: return Status::Error("No valid job!"); + case nebula::cpp2::ErrorCode::E_JOB_NOT_IN_SPACE: + return Status::Error("Job not in chosen space!"); case nebula::cpp2::ErrorCode::E_BACKUP_EMPTY_TABLE: return Status::Error("Backup empty table!"); case nebula::cpp2::ErrorCode::E_BACKUP_TABLE_FAILED: @@ -1060,6 +1064,21 @@ folly::Future> MetaClient::createSpace(meta::cpp2::SpaceD return future; } +folly::Future> MetaClient::createSpaceAs(const std::string& oldSpaceName, + const std::string& newSpaceName) { + cpp2::CreateSpaceAsReq req; + req.set_old_space_name(oldSpaceName); + req.set_new_space_name(newSpaceName); + folly::Promise> promise; + auto future = promise.getFuture(); + getResponse( + std::move(req), + [](auto client, auto request) { return client->future_createSpaceAs(request); }, + [](cpp2::ExecResp&& resp) -> GraphSpaceID { return resp.get_id().get_space_id(); }, + std::move(promise)); + return future; +} + folly::Future>> MetaClient::listSpaces() { cpp2::ListSpacesReq req; folly::Promise>> promise; diff --git a/src/clients/meta/MetaClient.h b/src/clients/meta/MetaClient.h index 546640b497d..6418e4ceced 100644 --- a/src/clients/meta/MetaClient.h +++ b/src/clients/meta/MetaClient.h @@ -215,6 +215,9 @@ class MetaClient { folly::Future> createSpace(meta::cpp2::SpaceDesc spaceDesc, bool ifNotExists = false); + folly::Future> createSpaceAs(const std::string& oldSpaceName, + const std::string& newSpaceName); + folly::Future>> listSpaces(); folly::Future> getSpace(std::string name); diff --git a/src/clients/meta/test/CMakeLists.txt b/src/clients/meta/test/CMakeLists.txt index b732a4748b6..80fc0bf0434 100644 --- a/src/clients/meta/test/CMakeLists.txt +++ b/src/clients/meta/test/CMakeLists.txt @@ -10,5 +10,6 @@ nebula_add_test( $ $ $ + $ LIBRARIES gtest ) diff --git a/src/clients/storage/StorageClientBase-inl.h b/src/clients/storage/StorageClientBase-inl.h index 749563a8879..108ff59ea2e 100644 --- a/src/clients/storage/StorageClientBase-inl.h +++ b/src/clients/storage/StorageClientBase-inl.h @@ -8,6 +8,7 @@ #include +#include "common/ssl/SSLConfig.h" #include "common/time/WallClock.h" namespace nebula { @@ -72,7 +73,7 @@ template StorageClientBase::StorageClientBase( std::shared_ptr threadPool, meta::MetaClient* metaClient) : metaClient_(metaClient), ioThreadPool_(threadPool) { - clientsMan_ = std::make_unique>(); + clientsMan_ = std::make_unique>(FLAGS_enable_ssl); } template diff --git a/src/codec/test/CMakeLists.txt b/src/codec/test/CMakeLists.txt index 2ce9788956e..05581db1c94 100644 --- a/src/codec/test/CMakeLists.txt +++ b/src/codec/test/CMakeLists.txt @@ -31,6 +31,7 @@ set(CODEC_TEST_LIBS $ $ $ + $ ) diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index 9122bd1f76f..fe404277c93 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt @@ -25,3 +25,4 @@ nebula_add_subdirectory(function) nebula_add_subdirectory(graph) nebula_add_subdirectory(plugin) nebula_add_subdirectory(utils) +nebula_add_subdirectory(ssl) diff --git a/src/common/algorithm/ReservoirSampling.h b/src/common/algorithm/ReservoirSampling.h index 6d846e1ad10..04cc937ad66 100644 --- a/src/common/algorithm/ReservoirSampling.h +++ b/src/common/algorithm/ReservoirSampling.h @@ -37,7 +37,13 @@ class ReservoirSampling final { return false; } - std::vector&& samples() && { return std::move(samples_); } + std::vector samples() { + auto result = std::move(samples_); + samples_.clear(); + samples_.reserve(num_); + cnt_ = 0; + return result; + } private: std::vector samples_; diff --git a/src/common/algorithm/test/ReservoirSamplingTest.cpp b/src/common/algorithm/test/ReservoirSamplingTest.cpp index 67e2c22e8ac..1a5d6c32bbb 100644 --- a/src/common/algorithm/test/ReservoirSamplingTest.cpp +++ b/src/common/algorithm/test/ReservoirSamplingTest.cpp @@ -18,7 +18,7 @@ TEST(ReservoirSamplingTest, Sample) { sampler.sampling(std::move(i)); } - auto result = std::move(sampler).samples(); + auto result = sampler.samples(); EXPECT_EQ(5, result.size()); for (auto i : result) { EXPECT_LE(0, i); @@ -27,16 +27,18 @@ TEST(ReservoirSamplingTest, Sample) { } { ReservoirSampling sampler(5); - std::vector sampleSpace = {0, 1, 2}; - for (auto i : sampleSpace) { - sampler.sampling(std::move(i)); - } + for (size_t count = 0; count < 10; count++) { + std::vector sampleSpace = {0, 1, 2}; + for (auto i : sampleSpace) { + sampler.sampling(std::move(i)); + } - auto result = std::move(sampler).samples(); - EXPECT_EQ(3, result.size()); - EXPECT_EQ(0, result[0]); - EXPECT_EQ(1, result[1]); - EXPECT_EQ(2, result[2]); + auto result = sampler.samples(); + EXPECT_EQ(3, result.size()); + EXPECT_EQ(0, result[0]); + EXPECT_EQ(1, result[1]); + EXPECT_EQ(2, result[2]); + } } } } // namespace algorithm diff --git a/src/common/datatypes/DataSet.h b/src/common/datatypes/DataSet.h index b80cec79793..5c42bbc696d 100644 --- a/src/common/datatypes/DataSet.h +++ b/src/common/datatypes/DataSet.h @@ -7,6 +7,8 @@ #ifndef COMMON_DATATYPES_DATASET_H_ #define COMMON_DATATYPES_DATASET_H_ +#include + #include #include #include @@ -153,6 +155,44 @@ struct DataSet { return os.str(); } + // format: + // [ + // { + // "row": [ row-data ], + // "meta": [ metadata ] + // }, + // ] + folly::dynamic toJson() const { + // parse rows to json + auto dataBody = folly::dynamic::array(); + for (auto& row : rows) { + dataBody.push_back(rowToJson(row)); + } + + return dataBody; + } + + // parse Nebula::Row to json + // format: + // { + // "row": [ row-data ], + // "meta": [ metadata ] + // } + folly::dynamic rowToJson(const Row& row) const { + folly::dynamic rowJsonObj = folly::dynamic::object(); + auto rowDataList = folly::dynamic::array(); + auto metaDataList = folly::dynamic::array(); + + for (const auto& ele : row.values) { + rowDataList.push_back(ele.toJson()); + metaDataList.push_back(ele.getMetaData()); + } + + rowJsonObj.insert("row", rowDataList); + rowJsonObj.insert("meta", metaDataList); + return rowJsonObj; + } + bool operator==(const DataSet& rhs) const { return colNames == rhs.colNames && rows == rhs.rows; } }; diff --git a/src/common/datatypes/Date.h b/src/common/datatypes/Date.h index 732fc786eda..7afd65f4549 100644 --- a/src/common/datatypes/Date.h +++ b/src/common/datatypes/Date.h @@ -7,6 +7,8 @@ #ifndef COMMON_DATATYPES_DATE_H_ #define COMMON_DATATYPES_DATE_H_ +#include + #include namespace nebula { @@ -62,6 +64,7 @@ struct Date { Date operator-(int64_t days) const; std::string toString() const; + folly::dynamic toJson() const { return toString(); } // Return the number of days since -32768/1/1 int64_t toInt() const; @@ -113,6 +116,8 @@ struct Time { } std::string toString() const; + // 'Z' representing UTC timezone + folly::dynamic toJson() const { return toString() + "Z"; } }; inline std::ostream& operator<<(std::ostream& os, const Time& d) { @@ -203,6 +208,8 @@ struct DateTime { } std::string toString() const; + // 'Z' representing UTC timezone + folly::dynamic toJson() const { return toString() + "Z"; } }; inline std::ostream& operator<<(std::ostream& os, const DateTime& d) { diff --git a/src/common/datatypes/Edge.cpp b/src/common/datatypes/Edge.cpp index 95d04aa41a6..1893fb79fca 100644 --- a/src/common/datatypes/Edge.cpp +++ b/src/common/datatypes/Edge.cpp @@ -32,6 +32,45 @@ std::string Edge::toString() const { return os.str(); } +// format: +// { +// "prop1": val1, +// "prop2": val2, +// } +folly::dynamic Edge::toJson() const { + folly::dynamic propObj = folly::dynamic::object(); + + for (const auto& iter : props) { + propObj.insert(iter.first, iter.second.toJson()); + } + + return propObj; +} + +// Used in Json form query result +// format: +// { +// "id": { +// "name": _name, +// "src": srcVID, +// "dst": dstVID, +// "type": _type, +// "ranking": _rankding +// } +// "type": "edge" +// } +folly::dynamic Edge::getMetaData() const { + folly::dynamic edgeMetadataObj = folly::dynamic::object(); + + folly::dynamic edgeIdObj = folly::dynamic::object("name", name)("src", src.toJson())( + "dst", dst.toJson())("type", type)("ranking", ranking); + + edgeMetadataObj.insert("id", edgeIdObj); + edgeMetadataObj.insert("type", "edge"); + + return edgeMetadataObj; +} + bool Edge::contains(const Value& key) const { if (!key.isStr()) { return false; diff --git a/src/common/datatypes/Edge.h b/src/common/datatypes/Edge.h index 32440217eda..b1c3247af84 100644 --- a/src/common/datatypes/Edge.h +++ b/src/common/datatypes/Edge.h @@ -50,6 +50,9 @@ struct Edge { void __clear() { clear(); } std::string toString() const; + folly::dynamic toJson() const; + // Used in Json form query result + folly::dynamic getMetaData() const; bool operator==(const Edge& rhs) const; diff --git a/src/common/datatypes/List.cpp b/src/common/datatypes/List.cpp index ca747dee6b9..ac2af1679ff 100644 --- a/src/common/datatypes/List.cpp +++ b/src/common/datatypes/List.cpp @@ -22,4 +22,24 @@ std::string List::toString() const { return os.str(); } +folly::dynamic List::toJson() const { + auto listJsonObj = folly::dynamic::array(); + + for (const auto& val : values) { + listJsonObj.push_back(val.toJson()); + } + + return listJsonObj; +} + +folly::dynamic List::getMetaData() const { + auto listMetadataObj = folly::dynamic::array(); + + for (const auto& val : values) { + listMetadataObj.push_back(val.getMetaData()); + } + + return listMetadataObj; +} + } // namespace nebula diff --git a/src/common/datatypes/List.h b/src/common/datatypes/List.h index c5622820905..be768a0de08 100644 --- a/src/common/datatypes/List.h +++ b/src/common/datatypes/List.h @@ -65,6 +65,9 @@ struct List { size_t size() const { return values.size(); } std::string toString() const; + folly::dynamic toJson() const; + // Extract the metadata of each element + folly::dynamic getMetaData() const; }; inline std::ostream& operator<<(std::ostream& os, const List& l) { return os << l.toString(); } diff --git a/src/common/datatypes/Map.cpp b/src/common/datatypes/Map.cpp index b9839c120ae..7ab73d5c9b7 100644 --- a/src/common/datatypes/Map.cpp +++ b/src/common/datatypes/Map.cpp @@ -14,7 +14,7 @@ namespace nebula { std::string Map::toString() const { std::vector value(kvs.size()); - std::transform(kvs.begin(), kvs.end(), value.begin(), [](const auto &iter) -> std::string { + std::transform(kvs.begin(), kvs.end(), value.begin(), [](const auto& iter) -> std::string { std::stringstream out; out << iter.first << ":" << iter.second; return out.str(); @@ -25,4 +25,24 @@ std::string Map::toString() const { return os.str(); } +folly::dynamic Map::toJson() const { + folly::dynamic mapJsonObj = folly::dynamic::object(); + + for (const auto& iter : kvs) { + mapJsonObj.insert(iter.first, iter.second.toJson()); + } + + return mapJsonObj; +} + +folly::dynamic Map::getMetaData() const { + auto mapMetadataObj = folly::dynamic::array(); + + for (const auto& kv : kvs) { + mapMetadataObj.push_back(kv.second.getMetaData()); + } + + return mapMetadataObj; +} + } // namespace nebula diff --git a/src/common/datatypes/Map.h b/src/common/datatypes/Map.h index 7f7ec9b6a81..333e5d0cb0d 100644 --- a/src/common/datatypes/Map.h +++ b/src/common/datatypes/Map.h @@ -43,6 +43,9 @@ struct Map { // the configs of rocksdb will use the interface, so the value need modify to // string std::string toString() const; + folly::dynamic toJson() const; + // Extract the metadata of the value of each kv pair + folly::dynamic getMetaData() const; bool operator==(const Map& rhs) const { return kvs == rhs.kvs; } diff --git a/src/common/datatypes/Path.h b/src/common/datatypes/Path.h index 1d4e2184ded..17eb77a3cee 100644 --- a/src/common/datatypes/Path.h +++ b/src/common/datatypes/Path.h @@ -130,6 +130,55 @@ struct Path { return os.str(); } + folly::dynamic toJson() const { + folly::dynamic pathJsonObj = folly::dynamic::array(); + auto srcVertex = src; + pathJsonObj.push_back(srcVertex.toJson()); + + for (const auto& s : steps) { + folly::dynamic edgeJsonObj = folly::dynamic::object(); + // parse edge props map as json + for (const auto& iter : s.props) { + edgeJsonObj.insert(iter.first, iter.second.toJson()); + } + // add edge json obj to path + pathJsonObj.push_back(edgeJsonObj); + + // reset src vertex and add vertex json obj to path + srcVertex = s.dst; + pathJsonObj.push_back(srcVertex.toJson()); + } + + return pathJsonObj; + } + + // Used in Json form query result + // format: + // [vertex1_metadata, edge1_metadata, vertex2_metadata, edge2_metadata,....] + folly::dynamic getMetaData() const { + auto dynamicObj = folly::dynamic::array(); + auto srcVertex = src; + dynamicObj.push_back(srcVertex.getMetaData()); + + // Construct edge metadata + for (const auto& s : steps) { + folly::dynamic edgeIdObj = folly::dynamic::object(); + edgeIdObj.insert("src", srcVertex.vid.toJson()); + edgeIdObj.insert("dst", s.dst.vid.toJson()); + edgeIdObj.insert("type", s.type); + edgeIdObj.insert("name", s.name); + edgeIdObj.insert("ranking", s.ranking); + + folly::dynamic edgeMetadataObj = folly::dynamic::object("id", edgeIdObj)("type", "edge"); + dynamicObj.push_back(edgeMetadataObj); + dynamicObj.push_back(s.dst.getMetaData()); + // reset src vertex + srcVertex = s.dst; + } + + return dynamicObj; + } + Path& operator=(Path&& rhs) noexcept { if (&rhs != this) { src = std::move(rhs.src); diff --git a/src/common/datatypes/Set.cpp b/src/common/datatypes/Set.cpp index c31ffc1de21..2e0e90a2f5a 100644 --- a/src/common/datatypes/Set.cpp +++ b/src/common/datatypes/Set.cpp @@ -22,4 +22,24 @@ std::string Set::toString() const { return os.str(); } +folly::dynamic Set::toJson() const { + auto setJsonObj = folly::dynamic::array(); + + for (const auto& val : values) { + setJsonObj.push_back(val.toJson()); + } + + return setJsonObj; +} + +folly::dynamic Set::getMetaData() const { + auto setMetadataObj = folly::dynamic::array(); + + for (const auto& val : values) { + setMetadataObj.push_back(val.getMetaData()); + } + + return setMetadataObj; +} + } // namespace nebula diff --git a/src/common/datatypes/Set.h b/src/common/datatypes/Set.h index e844aa879eb..dabc33e0d7e 100644 --- a/src/common/datatypes/Set.h +++ b/src/common/datatypes/Set.h @@ -26,6 +26,9 @@ struct Set { void __clear() { clear(); } std::string toString() const; + folly::dynamic toJson() const; + // Extract the metadata of each element + folly::dynamic getMetaData() const; Set& operator=(const Set& rhs) { if (this == &rhs) { diff --git a/src/common/datatypes/Value.cpp b/src/common/datatypes/Value.cpp index ad6e82e4043..19545de8135 100644 --- a/src/common/datatypes/Value.cpp +++ b/src/common/datatypes/Value.cpp @@ -1344,6 +1344,116 @@ void Value::setG(DataSet&& v) { new (std::addressof(value_.gVal)) std::unique_ptr(new DataSet(std::move(v))); } +// Convert Nebula::Value to a value compatible with Json standard +// DATE, TIME, DATETIME will be converted to strings in UTC +// VERTEX, EDGES, PATH will be converted to objects +folly::dynamic Value::toJson() const { + switch (type_) { + case Value::Type::__EMPTY__: { + return "__EMPTY__"; + } + // Json null + case Value::Type::NULLVALUE: { + return folly::dynamic(nullptr); + } + // Json bool + case Value::Type::BOOL: { + return folly::dynamic(getBool()); + } + // Json int + case Value::Type::INT: { + return folly::dynamic(getInt()); + } + // json double + case Value::Type::FLOAT: { + return folly::dynamic(getFloat()); + } + // json string + case Value::Type::STRING: { + return folly::dynamic(getStr()); + } + // Json array + case Value::Type::LIST: { + return getList().toJson(); + } + case Value::Type::SET: { + return getSet().toJson(); + } + // Json object + case Value::Type::MAP: { + return getMap().toJson(); + } + case Value::Type::DATE: { + return getDate().toJson(); + } + case Value::Type::TIME: { + return getTime().toJson(); + } + case Value::Type::DATETIME: { + return getDateTime().toJson(); + } + case Value::Type::EDGE: { + return getEdge().toJson(); + } + case Value::Type::VERTEX: { + return getVertex().toJson(); + } + case Value::Type::PATH: { + return getPath().toJson(); + } + case Value::Type::DATASET: { + return getDataSet().toJson(); + } + // no default so the compiler will warning when lack + } + + LOG(FATAL) << "Unknown value type " << static_cast(type_); +} + +folly::dynamic Value::getMetaData() const { + auto dynamicObj = folly::dynamic(); + switch (type_) { + // Privative datatypes has no meta data + case Value::Type::__EMPTY__: + case Value::Type::BOOL: + case Value::Type::INT: + case Value::Type::FLOAT: + case Value::Type::STRING: + case Value::Type::DATASET: + case Value::Type::NULLVALUE: { + return folly::dynamic(nullptr); + } + // Extract the meta info of each element as the metadata of the container + case Value::Type::LIST: { + return getList().getMetaData(); + } + case Value::Type::SET: { + return getSet().getMetaData(); + } + case Value::Type::MAP: { + return getMap().getMetaData(); + } + case Value::Type::DATE: + case Value::Type::TIME: + case Value::Type::DATETIME: { + return folly::dynamic::object("type", typeName()); + } + case Value::Type::VERTEX: { + return getVertex().getMetaData(); + } + case Value::Type::EDGE: { + return getEdge().getMetaData(); + } + case Value::Type::PATH: { + return getPath().getMetaData(); + } + default: + break; + } + + LOG(FATAL) << "Unknown value type " << static_cast(type_); +} + std::string Value::toString() const { switch (type_) { case Value::Type::__EMPTY__: { diff --git a/src/common/datatypes/Value.h b/src/common/datatypes/Value.h index 00c413cb4a9..57dc60df4e4 100644 --- a/src/common/datatypes/Value.h +++ b/src/common/datatypes/Value.h @@ -7,6 +7,8 @@ #ifndef COMMON_DATATYPES_VALUE_H_ #define COMMON_DATATYPES_VALUE_H_ +#include + #include #include "common/datatypes/Date.h" @@ -268,6 +270,9 @@ struct Value { static const Value& null() noexcept { return kNullValue; } std::string toString() const; + folly::dynamic toJson() const; + // Used in Json form query result + folly::dynamic getMetaData() const; Value toBool() const; Value toFloat() const; diff --git a/src/common/datatypes/Vertex.cpp b/src/common/datatypes/Vertex.cpp index 1d0f227025e..9f839a26831 100644 --- a/src/common/datatypes/Vertex.cpp +++ b/src/common/datatypes/Vertex.cpp @@ -6,6 +6,7 @@ #include "common/datatypes/Vertex.h" +#include #include #include @@ -24,6 +25,20 @@ std::string Tag::toString() const { return os.str(); } +// { +// "player.name" : "Tim Duncan", +// "player.age" : 42, +// } +folly::dynamic Tag::toJson() const { + folly::dynamic tagJsonObj = folly::dynamic::object(); + + for (const auto& iter : props) { + tagJsonObj.insert(name + "." + iter.first, iter.second.toJson()); + } + + return tagJsonObj; +} + bool Vertex::contains(const Value& key) const { if (!key.isStr()) { return false; @@ -59,6 +74,31 @@ std::string Vertex::toString() const { return os.str(); } +// { +// "player.name" : "Tim Duncan", +// "player.age" : 42, +// "bachelor.name" : "Tim Duncan", +// "bachelor.speciality" : "psychology" +// } +folly::dynamic Vertex::toJson() const { + folly::dynamic propJsonObj = folly::dynamic::object(); + + for (const auto& tag : tags) { + propJsonObj.update(tag.toJson()); + } + return propJsonObj; +} + +// format: +// { +// "id": _vid +// "type": "vertex" +// } +folly::dynamic Vertex::getMetaData() const { + folly::dynamic vertexMetadataObj = folly::dynamic::object("id", vid.toJson())("type", "vertex"); + return vertexMetadataObj; +} + Vertex& Vertex::operator=(Vertex&& rhs) noexcept { if (&rhs != this) { vid = std::move(rhs.vid); diff --git a/src/common/datatypes/Vertex.h b/src/common/datatypes/Vertex.h index a0c60318692..08d50b40f0e 100644 --- a/src/common/datatypes/Vertex.h +++ b/src/common/datatypes/Vertex.h @@ -34,6 +34,7 @@ struct Tag { void __clear() { clear(); } std::string toString() const; + folly::dynamic toJson() const; Tag& operator=(Tag&& rhs) noexcept { if (&rhs != this) { @@ -71,6 +72,9 @@ struct Vertex { void __clear() { clear(); } std::string toString() const; + folly::dynamic toJson() const; + // Used in Json form query result + folly::dynamic getMetaData() const; Vertex& operator=(Vertex&& rhs) noexcept; diff --git a/src/common/datatypes/test/CMakeLists.txt b/src/common/datatypes/test/CMakeLists.txt index a45c5d2ffd6..54517fda196 100644 --- a/src/common/datatypes/test/CMakeLists.txt +++ b/src/common/datatypes/test/CMakeLists.txt @@ -79,6 +79,25 @@ nebula_add_test( gtest ) +nebula_add_test( + NAME + value_to_json_test + SOURCES + ValueToJsonTest.cpp + OBJECTS + $ + $ + $ + $ + $ + $ + $ + $ + LIBRARIES + gtest + ${THRIFT_LIBRARIES} +) + nebula_add_executable( NAME edge_bm diff --git a/src/common/datatypes/test/ValueToJsonTest.cpp b/src/common/datatypes/test/ValueToJsonTest.cpp new file mode 100644 index 00000000000..3f729d37a6b --- /dev/null +++ b/src/common/datatypes/test/ValueToJsonTest.cpp @@ -0,0 +1,314 @@ +/* Copyright (c) 2020 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License, + * attached with Common Clause Condition 1.0, found in the LICENSES directory. + */ +#include +#include + +#include "common/base/Base.h" +#include "common/datatypes/CommonCpp2Ops.h" +#include "common/datatypes/DataSet.h" +#include "common/datatypes/Date.h" +#include "common/datatypes/Edge.h" +#include "common/datatypes/List.h" +#include "common/datatypes/Map.h" +#include "common/datatypes/Path.h" +#include "common/datatypes/Set.h" +#include "common/datatypes/Value.h" +#include "common/datatypes/Vertex.h" + +namespace nebula { + +using folly::dynamic; +using serializer = apache::thrift::CompactSerializer; + +TEST(ValueToJson, vertex) { + // Test tag to json + auto tag1 = Tag("tagName", {{"prop", Value(2)}}); + auto tag2 = + Tag("tagName1", + {{"prop1", Value(2)}, {"prop2", Value(NullType::__NULL__)}, {"prop3", Value("123")}}); + { + dynamic expectedTagJson = dynamic::object("tagName.prop", 2); + ASSERT_EQ(expectedTagJson, tag1.toJson()); + } + { + dynamic expectedTagJson = + dynamic::object("tagName1.prop1", 2)("tagName1.prop2", nullptr)("tagName1.prop3", "123"); + ASSERT_EQ(expectedTagJson, tag2.toJson()); + } + + // vertex wtih string vid + auto vertexStrVid = Value(Vertex({"Vid", + { + tag1, + tag2, + }})); + + // integerID vertex + auto vertexIntVid = Value(Vertex({001, + { + tag1, + tag2, + }})); + { + dynamic expectedVeretxJson = dynamic::object("tagName.prop", 2)("tagName1.prop1", 2)( + "tagName1.prop2", nullptr)("tagName1.prop3", "123"); + ASSERT_EQ(expectedVeretxJson, vertexStrVid.toJson()); + + dynamic expectedVeretxMetaJson = dynamic::object("id", "Vid")("type", "vertex"); + ASSERT_EQ(expectedVeretxMetaJson, vertexStrVid.getMetaData()); + } + { + dynamic expectedVeretxJson = dynamic::object("tagName.prop", 2)("tagName1.prop1", 2)( + "tagName1.prop2", nullptr)("tagName1.prop3", "123"); + ASSERT_EQ(expectedVeretxJson, vertexIntVid.toJson()); + + dynamic expectedVeretxMetaJson = dynamic::object("id", 001)("type", "vertex"); + ASSERT_EQ(expectedVeretxMetaJson, vertexIntVid.getMetaData()); + } +} + +TEST(ValueToJson, edge) { + // edge + auto edge1 = + Value(Edge("Src", "Dst", 1, "Edge", 233, {{"prop1", Value(233)}, {"prop2", Value(2.3)}})); + // integerID edge + auto edge2 = + Value(Edge(101, 102, 1, "Edge", 233, {{"prop1", Value(233)}, {"prop2", Value(2.3)}})); + { + dynamic expectedEdgeJson = dynamic::object("prop1", 233)("prop2", 2.3); + ASSERT_EQ(expectedEdgeJson, edge1.toJson()); + + dynamic expectedEdgeMetaJson = + dynamic::object("id", + dynamic::object("name", "Edge")("src", "Src")("dst", "Dst")("type", 1)( + "name", "Edge")("ranking", 233))("type", "edge"); + ASSERT_EQ(expectedEdgeMetaJson, edge1.getMetaData()); + } + + { + dynamic expectedEdgeJson = dynamic::object("prop1", 233)("prop2", 2.3); + ASSERT_EQ(expectedEdgeJson, edge2.toJson()); + + dynamic expectedEdgeMetaJson = + dynamic::object("id", + dynamic::object("name", "Edge")("src", 101)("dst", 102)("type", 1)( + "name", "Edge")("ranking", 233))("type", "edge"); + ASSERT_EQ(expectedEdgeMetaJson, edge2.getMetaData()); + } +} + +TEST(ValueToJson, path) { + auto path = Value(Path(Vertex({"v1", {Tag("tagName", {{"prop1", Value(1)}})}}), + {Step(Vertex({"v2", + {Tag("tagName2", + {{"prop1", Value(2)}, + {"prop2", Value(NullType::__NULL__)}, + {"prop3", Value("123")}})}}), + 1, + "edgeName", + 100, + {{"edgeProp", "edgePropVal"}})})); + auto emptyPath = Value(Path()); + + dynamic expectedPathJsonObj = dynamic::array( + dynamic::object("tagName.prop1", 1), + dynamic::object("edgeProp", "edgePropVal"), + dynamic::object("tagName2.prop1", 2)("tagName2.prop2", nullptr)("tagName2.prop3", "123")); + ASSERT_EQ(expectedPathJsonObj, path.toJson()); + + dynamic expectedPathMetaJson = dynamic::array( + dynamic::object("id", "v1")("type", "vertex"), + dynamic::object("id", + dynamic::object("name", "Edge")("src", "v1")("dst", "v2")("type", 1)( + "name", "edgeName")("ranking", 100))("type", "edge"), + dynamic::object("id", "v2")("type", "vertex")); + ASSERT_EQ(expectedPathMetaJson, path.getMetaData()); +} + +TEST(ValueToJson, list) { + auto list1 = Value(List({Value(2), // int + Value(2.33), // float + Value(true), // bool + Value("str"), // string + Date(2021, 12, 21), // date + Time(13, 30, 15, 0), // time + DateTime(2021, 12, 21, 13, 30, 15, 0)})); // datetime + dynamic expectedListJsonObj = dynamic::array( + 2, 2.33, true, "str", "2021-12-21", "13:30:15.000000Z", "2021-12-21T13:30:15.0Z"); + ASSERT_EQ(expectedListJsonObj, list1.toJson()); + + dynamic expectedListMetaObj = dynamic::array(nullptr, + nullptr, + nullptr, + nullptr, + dynamic::object("type", "date"), + dynamic::object("type", "time"), + dynamic::object("type", "datetime")); + ASSERT_EQ(expectedListMetaObj, list1.getMetaData()); +} + +TEST(ValueToJson, Set) { + auto set = Value(Set({Value(2), // int + Value(2.33), // float + Value(true), // bool + Value("str"), // string + Date(2021, 12, 21), // date + Time(13, 30, 15, 0), // time + DateTime(2021, 12, 21, 13, 30, 15, 0)})); // datetime + dynamic expectedSetJsonObj = dynamic::array( + 2, 2.33, true, "str", "2021-12-21", "13:30:15.000000Z", "2021-12-21T13:30:15.0Z"); + // The underlying data strcuture is unordered_set, so sort before the comparison + auto actualJson = set.toJson(); + std::sort(actualJson.begin(), actualJson.end()); + std::sort(expectedSetJsonObj.begin(), expectedSetJsonObj.end()); + ASSERT_EQ(expectedSetJsonObj, actualJson); + + // Skip meta json comparison since nested dynamic objects cannot be sorted. i.g. dynamic::object + // inside dynamic::array +} + +TEST(ValueToJson, map) { + auto map = Value(Map({{"key1", Value(2)}, // int + {"key2", Value(2.33)}, // float + {"key3", Value(true)}, // bool + {"key4", Value("str")}, // string + {"key5", Date(2021, 12, 21)}, // date + {"key6", Time(13, 30, 15, 0)}, // time + {"key7", DateTime(2021, 12, 21, 13, 30, 15, 0)}})); // datetime + dynamic expectedMapJsonObj = + dynamic::object("key1", 2)("key2", 2.33)("key3", true)("key4", "str")("key5", "2021-12-21")( + "key6", "13:30:15.000000Z")("key7", "2021-12-21T13:30:15.0Z"); + ASSERT_EQ(expectedMapJsonObj, map.toJson()); + // Skip meta json comparison since nested dynamic objects cannot be sorted. i.g. dynamic::object + // inside dynamic::array +} + +TEST(ValueToJson, dataset) { + DataSet dataset = DataSet({"col1", "col2", "col3", "col4", "col5", "col6", "col7"}); + dataset.emplace_back(List({Value(2), // int + Value(2.33), // float + Value(true), // bool + Value("str"), // string + Date(2021, 12, 21), // date + Time(13, 30, 15, 0), // time + DateTime(2021, 12, 21, 13, 30, 15, 0)})); + dynamic expectedDatasetJsonObj = dynamic::array(dynamic::object( + "row", + dynamic::array( + 2, 2.33, true, "str", "2021-12-21", "13:30:15.000000Z", "2021-12-21T13:30:15.0Z"))( + "meta", + dynamic::array(nullptr, + nullptr, + nullptr, + nullptr, + dynamic::object("type", "date"), + dynamic::object("type", "time"), + dynamic::object("type", "datetime")))); + ASSERT_EQ(expectedDatasetJsonObj, dataset.toJson()); +} + +TEST(ValueToJson, DecodeEncode) { + std::vector values{ + // empty + Value(), + + // null + Value(NullType::__NULL__), + Value(NullType::DIV_BY_ZERO), + Value(NullType::BAD_DATA), + Value(NullType::ERR_OVERFLOW), + Value(NullType::OUT_OF_RANGE), + Value(NullType::UNKNOWN_PROP), + + // int + Value(0), + Value(1), + Value(2), + + // float + Value(3.14), + Value(2.67), + + // string + Value("Hello "), + Value("World"), + + // bool + Value(false), + Value(true), + + // date + Value(Date(2020, 1, 1)), + Value(Date(2019, 12, 1)), + + // time + Value(Time{1, 2, 3, 4}), + + // datatime + Value(DateTime{1, 2, 3, 4, 5, 6, 7}), + + // vertex + Value( + Vertex({"Vid", + { + Tag("tagName", {{"prop", Value(2)}}), + Tag("tagName1", {{"prop1", Value(2)}, {"prop2", Value(NullType::__NULL__)}}), + }})), + + // integerID vertex + Value( + Vertex({001, + { + Tag("tagName", {{"prop", Value(2)}}), + Tag("tagName1", {{"prop1", Value(2)}, {"prop2", Value(NullType::__NULL__)}}), + }})), + + // edge + Value(Edge("Src", "Dst", 1, "Edge", 233, {{"prop1", Value(233)}, {"prop2", Value(2.3)}})), + + // integerID edge + Value(Edge(001, 002, 1, "Edge", 233, {{"prop1", Value(233)}, {"prop2", Value(2.3)}})), + + // Path + Value(Path( + Vertex({"1", {Tag("tagName", {{"prop", Value(2)}})}}), + {Step(Vertex({"1", {Tag("tagName", {{"prop", Value(2)}})}}), 1, "1", 1, {{"1", 1}})})), + Value(Path()), + + // List + Value(List({Value(2), Value(true), Value(2.33)})), + + // Set + Value(Set({Value(2), Value(true), Value(2.33)})), + + // Map + Value(Map({{"Key1", Value(2)}, {"Key2", Value(true)}, {"Key3", Value(2.33)}})), + + // DataSet + Value(DataSet({"col1", "col2"})), + }; + for (const auto& val : values) { + std::string buf; + buf.reserve(128); + folly::dynamic jsonObj = val.toJson(); + auto jsonString = folly::toJson(jsonObj); + serializer::serialize(jsonString, &buf); + std::string valCopy; + std::size_t s = serializer::deserialize(buf, valCopy); + ASSERT_EQ(s, buf.size()); + EXPECT_EQ(jsonString, valCopy); + } +} + +} // namespace nebula + +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + folly::init(&argc, &argv, true); + google::SetStderrLogging(google::INFO); + + return RUN_ALL_TESTS(); +} diff --git a/src/common/function/test/FunctionManagerTest.cpp b/src/common/function/test/FunctionManagerTest.cpp index 817eb6ee15e..a02bc86a609 100644 --- a/src/common/function/test/FunctionManagerTest.cpp +++ b/src/common/function/test/FunctionManagerTest.cpp @@ -37,10 +37,12 @@ class FunctionManagerTest : public ::testing::Test { } auto res = result.value()(argsRef); if (res.type() != expect.type()) { - return ::testing::AssertionFailure() << "function return type check failed: " << expr; + return ::testing::AssertionFailure() << "function return type check failed, expect " + << expect.type() << ", got " << res.type(); } if (res != expect) { - return ::testing::AssertionFailure() << "function return value check failed: " << expr; + return ::testing::AssertionFailure() + << "function return value check failed, expect " << expect << ", got " << res; } return ::testing::AssertionSuccess(); } @@ -519,6 +521,42 @@ TEST_F(FunctionManagerTest, functionCall) { {Map({{"hour", 20}, {"minute", 9}, {"second", 15}})}, Value(time::TimeUtils::timeToUTC(Time(20, 9, 15, 0)))); } + { + TEST_FUNCTION(time, + {Map({{"hour", 20}, + {"minute", 9}, + {"second", 15}, + {"millisecond", 10}, + {"microsecond", 15}})}, + Value(time::TimeUtils::timeToUTC(Time(20, 9, 15, 10015)))); + } + { + TEST_FUNCTION(time, + {Map({{"hour", 20}, + {"minute", 9}, + {"second", 15}, + {"millisecond", 999}, + {"microsecond", 999}})}, + Value(time::TimeUtils::timeToUTC(Time(20, 9, 15, 999999)))); + } + { + TEST_FUNCTION(time, + {Map({{"hour", 20}, + {"minute", 9}, + {"second", 15}, + {"millisecond", 1000}, + {"microsecond", 999}})}, + Value::kNullBadData); + } + { + TEST_FUNCTION(time, + {Map({{"hour", 20}, + {"minute", 9}, + {"second", 15}, + {"millisecond", 999}, + {"microsecond", 1000}})}, + Value::kNullBadData); + } // range [(0, 0, 0, 0), (23, 59, 59, 999999)] { TEST_FUNCTION(time, @@ -566,8 +604,46 @@ TEST_F(FunctionManagerTest, functionCall) { {"day", 15}, {"hour", 20}, {"minute", 9}, - {"second", 15}})}, - Value(time::TimeUtils::dateTimeToUTC(DateTime(2020, 9, 15, 20, 9, 15, 0)))); + {"second", 15}, + {"millisecond", 10}, + {"microsecond", 15}})}, + Value(time::TimeUtils::dateTimeToUTC(DateTime(2020, 9, 15, 20, 9, 15, 10015)))); + } + { + TEST_FUNCTION(datetime, + {Map({{"year", 2020}, + {"month", 9}, + {"day", 15}, + {"hour", 20}, + {"minute", 9}, + {"second", 15}, + {"millisecond", 999}, + {"microsecond", 999}})}, + Value(time::TimeUtils::dateTimeToUTC(DateTime(2020, 9, 15, 20, 9, 15, 999999)))); + } + { + TEST_FUNCTION(datetime, + {Map({{"year", 2020}, + {"month", 9}, + {"day", 15}, + {"hour", 20}, + {"minute", 9}, + {"second", 15}, + {"millisecond", 1000}, + {"microsecond", 999}})}, + Value::kNullBadData); + } + { + TEST_FUNCTION(datetime, + {Map({{"year", 2020}, + {"month", 9}, + {"day", 15}, + {"hour", 20}, + {"minute", 9}, + {"second", 15}, + {"millisecond", 999}, + {"microsecond", 1000}})}, + Value::kNullBadData); } { TEST_FUNCTION(datetime, diff --git a/src/common/graph/Response.cpp b/src/common/graph/Response.cpp index a03885bd1fa..82f3def31c7 100644 --- a/src/common/graph/Response.cpp +++ b/src/common/graph/Response.cpp @@ -31,7 +31,7 @@ bool PlanNodeDescription::operator==(const PlanNodeDescription& rhs) const { case ErrorCode::EnumName: \ return #EnumName; -const char* errorCode(ErrorCode code) { +const char* getErrorCode(ErrorCode code) { switch (code) { ErrorCodeEnums } return "Unknown error"; } diff --git a/src/common/graph/Response.h b/src/common/graph/Response.h index 0adfd2b52cf..3d7ee8b88c1 100644 --- a/src/common/graph/Response.h +++ b/src/common/graph/Response.h @@ -7,6 +7,9 @@ #ifndef COMMON_GRAPH_RESPONSE_H #define COMMON_GRAPH_RESPONSE_H +#include +#include + #include #include #include @@ -183,10 +186,10 @@ enum class ErrorCode { ErrorCodeEnums }; #undef X -const char *errorCode(ErrorCode code); +const char *getErrorCode(ErrorCode code); static inline std::ostream &operator<<(std::ostream &os, ErrorCode code) { - os << errorCode(code); + os << getErrorCode(code); return os; } @@ -266,6 +269,16 @@ struct ProfilingStats { int64_t totalDurationInUs{0}; // Other profiling stats data map std::unique_ptr> otherStats; + + folly::dynamic toJson() const { + folly::dynamic ProfilingStatsObj = folly::dynamic::object(); + ProfilingStatsObj.insert("rows", rows); + ProfilingStatsObj.insert("execDurationInUs", execDurationInUs); + ProfilingStatsObj.insert("totalDurationInUs", totalDurationInUs); + ProfilingStatsObj.insert("otherStats", folly::toDynamic(*otherStats)); + + return ProfilingStatsObj; + } }; // The info used for select/loop. @@ -285,6 +298,14 @@ struct PlanNodeBranchInfo { bool isDoBranch{0}; // select/loop node id int64_t conditionNodeId{-1}; + + folly::dynamic toJson() const { + folly::dynamic PlanNodeBranchInfoObj = folly::dynamic::object(); + PlanNodeBranchInfoObj.insert("isDoBranch", isDoBranch); + PlanNodeBranchInfoObj.insert("conditionNodeId", conditionNodeId); + + return PlanNodeBranchInfoObj; + } }; struct Pair { @@ -299,6 +320,11 @@ struct Pair { std::string key; std::string value; + + folly::dynamic toJson() const { + folly::dynamic pairObj = folly::dynamic::object(key, value); + return pairObj; + } }; struct PlanNodeDescription { @@ -326,6 +352,32 @@ struct PlanNodeDescription { std::unique_ptr> profiles{nullptr}; std::unique_ptr branchInfo{nullptr}; std::unique_ptr> dependencies{nullptr}; + + folly::dynamic toJson() const { + folly::dynamic planNodeDescObj = folly::dynamic::object(); + planNodeDescObj.insert("name", name); + planNodeDescObj.insert("id", id); + planNodeDescObj.insert("outputVar", outputVar); + + auto descriptionObj = folly::dynamic::array(); + descriptionObj.resize(description->size()); + std::transform( + description->begin(), description->end(), descriptionObj.begin(), [](const auto &ele) { + return ele.toJson(); + }); + planNodeDescObj.insert("description", descriptionObj); + + auto profilesObj = folly::dynamic::array(); + profilesObj.resize(profiles->size()); + std::transform(profiles->begin(), profiles->end(), profilesObj.begin(), [](const auto &ele) { + return ele.toJson(); + }); + planNodeDescObj.insert("profiles", profilesObj); + planNodeDescObj.insert("branchInfo", branchInfo->toJson()); + planNodeDescObj.insert("dependencies", folly::toDynamic(*dependencies)); + + return planNodeDescObj; + } }; struct PlanDescription { @@ -350,6 +402,29 @@ struct PlanDescription { std::string format; // the optimization spent time int32_t optimize_time_in_us{0}; + + folly::dynamic toJson() const { + folly::dynamic PlanDescObj = folly::dynamic::object(); + + auto planNodeDescsObj = folly::dynamic::array(); + planNodeDescsObj.resize(planNodeDescs.size()); + std::transform(planNodeDescs.begin(), + planNodeDescs.end(), + planNodeDescsObj.begin(), + [](const PlanNodeDescription &ele) { return ele.toJson(); }); + PlanDescObj.insert("planNodeDescs", planNodeDescsObj); + // nodeIndexMap uses int as the key of the map, but strict json format only accepts string as + // the key, so convert the int to string here. + folly::dynamic nodeIndexMapObj = folly::dynamic::object(); + for (const auto &kv : nodeIndexMap) { + nodeIndexMapObj.insert(folly::to(kv.first), kv.second); + } + PlanDescObj.insert("nodeIndexMap", nodeIndexMapObj); + PlanDescObj.insert("format", format); + PlanDescObj.insert("optimize_time_in_us", optimize_time_in_us); + + return PlanDescObj; + } }; struct ExecutionResponse { @@ -397,6 +472,83 @@ struct ExecutionResponse { std::unique_ptr errorMsg{nullptr}; std::unique_ptr planDesc{nullptr}; std::unique_ptr comment{nullptr}; + + // Return the response as a json string + // format + // "results": [ + // { + // "columns": [], + // "data": [ + // { + // "row": [ row-data ], + // "meta": [ metadata ] + // }, + // ], + // "latencyInUs" : 0, + // "spaceName": "", + // "planDesc ": { + // "planNodeDescs": [ { + // "name" : "", + // "id" : 0, + // "outputVar" : "", + // "description" : {"key" : ""}, + // "profiles" : [{ + // "rows" : 1, + // "execDurationInUs" : 0, + // "totalDurationInUs" : 0, + // "otherStats" : {}, // map + // }], + // "branchInfo" : { + // "isDoBranch" : false, + // "conditionNodeId" : -1, + // }, + // "dependencies" : [] // vector of ints + // } + // ], + // "nodeIndexMap" : {}, + // "format" : "", + // "optimize_time_in_us" : 0, + // }, + // "comment ": "", + // "errors" : "" // errorMsg + // } + // ] + // } + folly::dynamic toJson() const { + folly::dynamic respJsonObj = folly::dynamic::object(); + folly::dynamic resultBody = folly::dynamic::object(); + + // required fields + folly::dynamic errorsBody = folly::dynamic::object(); + errorsBody.insert("errorCode", getErrorCode(errorCode)); + resultBody.insert("latencyInUs", latencyInUs); + + // optional fields + if (errorMsg) { + errorsBody.insert("errorMsg", *errorMsg); + } + resultBody.insert("errors", errorsBody); + + if (data) { + resultBody.insert("columns", folly::toDynamic(data->keys())); + resultBody.insert("data", data->toJson()); + } + if (spaceName) { + resultBody.insert("spaceName", *spaceName); + } + if (planDesc) { + resultBody.insert("planDesc", planDesc->toJson()); + } + if (comment) { + resultBody.insert("comment", *comment); + } + + auto resultArray = folly::dynamic::array(); + resultArray.push_back(resultBody); + respJsonObj.insert("results", resultArray); + + return respJsonObj; + } }; } // namespace nebula diff --git a/src/common/graph/tests/ResponseEncodeDecodeTest.cpp b/src/common/graph/tests/ResponseEncodeDecodeTest.cpp index 378525e0617..ec0a024df8c 100644 --- a/src/common/graph/tests/ResponseEncodeDecodeTest.cpp +++ b/src/common/graph/tests/ResponseEncodeDecodeTest.cpp @@ -89,4 +89,57 @@ TEST(ResponseEncodDecodeTest, Basic) { } } +TEST(ResponseEncodDecodeTest, ToJson) { + // plan description + { + std::vector pds; + pds.emplace_back(PlanDescription{}); + pds.emplace_back(PlanDescription{std::vector{}, + std::unordered_map{{1, 2}, {4, 7}}, + "format"}); + for (const auto &pd : pds) { + std::string buf; + buf.reserve(128); + folly::dynamic jsonObj = pd.toJson(); + auto jsonString = folly::toJson(jsonObj); + serializer::serialize(jsonString, &buf); + std::string copy; + std::size_t s = serializer::deserialize(buf, copy); + ASSERT_EQ(s, buf.size()); + EXPECT_EQ(jsonString, copy); + } + } + // response + { + std::vector resps; + resps.emplace_back(ExecutionResponse{}); + resps.emplace_back(ExecutionResponse{ErrorCode::SUCCEEDED, 233}); + resps.emplace_back(ExecutionResponse{ErrorCode::SUCCEEDED, + 233, + std::make_unique(), + std::make_unique("test_space")}); + resps.emplace_back(ExecutionResponse{ErrorCode::SUCCEEDED, + 233, + nullptr, + std::make_unique("test_space"), + nullptr, + std::make_unique()}); + resps.emplace_back(ExecutionResponse{ErrorCode::E_SYNTAX_ERROR, + 233, + nullptr, + std::make_unique("test_space"), + std::make_unique("Error Msg.")}); + for (const auto &resp : resps) { + std::string buf; + buf.reserve(128); + folly::dynamic jsonObj = resp.toJson(); + auto jsonString = folly::toJson(jsonObj); + serializer::serialize(jsonString, &buf); + std::string copy; + std::size_t s = serializer::deserialize(buf, copy); + ASSERT_EQ(s, buf.size()); + EXPECT_EQ(jsonString, copy); + } + } +} } // namespace nebula diff --git a/src/common/ssl/CMakeLists.txt b/src/common/ssl/CMakeLists.txt new file mode 100644 index 00000000000..eb19ddc3442 --- /dev/null +++ b/src/common/ssl/CMakeLists.txt @@ -0,0 +1,10 @@ +# Copyright (c) 2021 vesoft inc. All rights reserved. +# +# This source code is licensed under Apache 2.0 License, +# attached with Common Clause Condition 1.0, found in the LICENSES directory. + +nebula_add_library( + ssl_obj + OBJECT + SSLConfig.cpp +) diff --git a/src/common/ssl/SSLConfig.cpp b/src/common/ssl/SSLConfig.cpp new file mode 100644 index 00000000000..5f39f26c5ac --- /dev/null +++ b/src/common/ssl/SSLConfig.cpp @@ -0,0 +1,38 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License, + * attached with Common Clause Condition 1.0, found in the LICENSES directory. + */ + +#include "common/ssl/SSLConfig.h" + +DEFINE_string(cert_path, "", "Path to cert pem."); +DEFINE_string(key_path, "", "Path to cert key."); +DEFINE_string(ca_path, "", "Path to trusted CA file."); +DEFINE_bool(enable_ssl, false, "Whether to enable ssl."); +DEFINE_bool(enable_graph_ssl, false, "Whether to enable ssl of graph server."); +DEFINE_bool(enable_meta_ssl, false, "Whether to enable ssl of meta server."); + +namespace nebula { + +std::shared_ptr sslContextConfig() { + auto sslCfg = std::make_shared(); + sslCfg->addCertificate(FLAGS_cert_path, FLAGS_key_path, ""); + sslCfg->isDefault = true; + return sslCfg; +} + +std::shared_ptr createSSLContext() { + auto context = std::make_shared(); + if (!FLAGS_ca_path.empty()) { + context->loadTrustedCertificates(FLAGS_ca_path.c_str()); + // don't do peer name validation + context->authenticate(true, false); + // verify the server cert + context->setVerificationOption(folly::SSLContext::SSLVerifyPeerEnum::VERIFY); + } + folly::ssl::setSignatureAlgorithms(*context); + return context; +} + +} // namespace nebula diff --git a/src/common/ssl/SSLConfig.h b/src/common/ssl/SSLConfig.h new file mode 100644 index 00000000000..45889b86b27 --- /dev/null +++ b/src/common/ssl/SSLConfig.h @@ -0,0 +1,26 @@ + +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License, + * attached with Common Clause Condition 1.0, found in the LICENSES directory. + */ + +#pragma once + +#include +#include +#include + +#include + +DECLARE_bool(enable_ssl); +DECLARE_bool(enable_graph_ssl); +DECLARE_bool(enable_meta_ssl); + +namespace nebula { + +extern std::shared_ptr sslContextConfig(); + +extern std::shared_ptr createSSLContext(); + +} // namespace nebula diff --git a/src/common/thrift/ThriftClientManager-inl.h b/src/common/thrift/ThriftClientManager-inl.h index fe907bcc7bb..caf0a27f252 100644 --- a/src/common/thrift/ThriftClientManager-inl.h +++ b/src/common/thrift/ThriftClientManager-inl.h @@ -6,11 +6,13 @@ #pragma once +#include #include #include #include #include "common/network/NetworkUtils.h" +#include "common/ssl/SSLConfig.h" DECLARE_int32(conn_timeout_ms); @@ -71,9 +73,14 @@ std::shared_ptr ThriftClientManager::client(const HostAd VLOG(2) << "Connecting to " << host << " for " << ++connectionCount << " times"; std::shared_ptr socket; - evb->runImmediatelyOrRunInEventBaseThreadAndWait([&socket, evb, resolved]() { - socket = - folly::AsyncSocket::newSocket(evb, resolved.host, resolved.port, FLAGS_conn_timeout_ms); + evb->runImmediatelyOrRunInEventBaseThreadAndWait([this, &socket, evb, resolved]() { + if (enableSSL_) { + socket = folly::AsyncSSLSocket::newSocket(nebula::createSSLContext(), evb); + socket->connect(nullptr, resolved.host, resolved.port, FLAGS_conn_timeout_ms); + } else { + socket = + folly::AsyncSocket::newSocket(evb, resolved.host, resolved.port, FLAGS_conn_timeout_ms); + } }); auto headerClientChannel = apache::thrift::HeaderClientChannel::newChannel(socket); if (timeout > 0) { diff --git a/src/common/thrift/ThriftClientManager.h b/src/common/thrift/ThriftClientManager.h index da5c16dcaf4..fa23b3678f3 100644 --- a/src/common/thrift/ThriftClientManager.h +++ b/src/common/thrift/ThriftClientManager.h @@ -7,6 +7,7 @@ #ifndef COMMON_THRIFT_THRIFTCLIENTMANAGER_H_ #define COMMON_THRIFT_THRIFTCLIENTMANAGER_H_ +#include #include #include "common/base/Base.h" @@ -25,7 +26,9 @@ class ThriftClientManager final { ~ThriftClientManager() { VLOG(3) << "~ThriftClientManager"; } - ThriftClientManager() { VLOG(3) << "ThriftClientManager"; } + explicit ThriftClientManager(bool enableSSL = false) : enableSSL_(enableSSL) { + VLOG(3) << "ThriftClientManager"; + } private: using ClientMap = std::unordered_map, // @@ -34,6 +37,8 @@ class ThriftClientManager final { >; folly::ThreadLocal clientMap_; + // whether enable ssl + bool enableSSL_{false}; }; } // namespace thrift diff --git a/src/common/time/TimeUtils.cpp b/src/common/time/TimeUtils.cpp index 694ab6600a5..01045531ee1 100644 --- a/src/common/time/TimeUtils.cpp +++ b/src/common/time/TimeUtils.cpp @@ -55,11 +55,16 @@ constexpr int64_t kMaxTimestamp = std::numeric_limits::max() / 10000000 return Status::Error("Invalid second number `%ld'.", kv.second.getInt()); } dt.sec = kv.second.getInt(); + } else if (kv.first == "millisecond") { + if (kv.second.getInt() < 0 || kv.second.getInt() > 999) { + return Status::Error("Invalid millisecond number `%ld'.", kv.second.getInt()); + } + dt.microsec += kv.second.getInt() * 1000; } else if (kv.first == "microsecond") { - if (kv.second.getInt() < 0 || kv.second.getInt() > 999999) { + if (kv.second.getInt() < 0 || kv.second.getInt() > 999) { return Status::Error("Invalid microsecond number `%ld'.", kv.second.getInt()); } - dt.microsec = kv.second.getInt(); + dt.microsec += kv.second.getInt(); } else { return Status::Error("Invlaid parameter `%s'.", kv.first.c_str()); } @@ -125,11 +130,16 @@ constexpr int64_t kMaxTimestamp = std::numeric_limits::max() / 10000000 return Status::Error("Invalid second number `%ld'.", kv.second.getInt()); } t.sec = kv.second.getInt(); + } else if (kv.first == "millisecond") { + if (kv.second.getInt() < 0 || kv.second.getInt() > 999) { + return Status::Error("Invalid millisecond number `%ld'.", kv.second.getInt()); + } + t.microsec += kv.second.getInt() * 1000; } else if (kv.first == "microsecond") { - if (kv.second.getInt() < 0 || kv.second.getInt() > 999999) { + if (kv.second.getInt() < 0 || kv.second.getInt() > 999) { return Status::Error("Invalid microsecond number `%ld'.", kv.second.getInt()); } - t.microsec = kv.second.getInt(); + t.microsec += kv.second.getInt(); } else { return Status::Error("Invlaid parameter `%s'.", kv.first.c_str()); } diff --git a/src/common/utils/NebulaKeyUtils.cpp b/src/common/utils/NebulaKeyUtils.cpp index 5b3ad287f3e..2bac2e2b7ee 100644 --- a/src/common/utils/NebulaKeyUtils.cpp +++ b/src/common/utils/NebulaKeyUtils.cpp @@ -240,4 +240,13 @@ std::string NebulaKeyUtils::toEdgeKey(const folly::StringPiece& lockKey) { return ret; } +std::string NebulaKeyUtils::adminTaskKey(int32_t seqId, JobID jobId, TaskID taskId) { + std::string key; + key.reserve(sizeof(int32_t) + sizeof(JobID) + sizeof(TaskID)); + key.append(reinterpret_cast(&seqId), sizeof(int32_t)); + key.append(reinterpret_cast(&jobId), sizeof(JobID)); + key.append(reinterpret_cast(&taskId), sizeof(TaskID)); + return key; +} + } // namespace nebula diff --git a/src/common/utils/NebulaKeyUtils.h b/src/common/utils/NebulaKeyUtils.h index 6ff51baa9da..adbdea7c32e 100644 --- a/src/common/utils/NebulaKeyUtils.h +++ b/src/common/utils/NebulaKeyUtils.h @@ -269,6 +269,8 @@ class NebulaKeyUtils final { LOG(FATAL) << msg.str(); } + static std::string adminTaskKey(int32_t seqId, JobID jobId, TaskID taskId); + static_assert(sizeof(NebulaKeyType) == sizeof(PartitionID)); private: diff --git a/src/daemons/CMakeLists.txt b/src/daemons/CMakeLists.txt index ce7ec27cb95..f69da66c10f 100644 --- a/src/daemons/CMakeLists.txt +++ b/src/daemons/CMakeLists.txt @@ -29,6 +29,7 @@ set(common_deps $ $ $ + $ ) set(storage_meta_deps diff --git a/src/daemons/GraphDaemon.cpp b/src/daemons/GraphDaemon.cpp index dccf4a0cae9..4bfee7617e2 100644 --- a/src/daemons/GraphDaemon.cpp +++ b/src/daemons/GraphDaemon.cpp @@ -5,6 +5,7 @@ */ #include +#include #include #include #include @@ -15,6 +16,7 @@ #include "common/fs/FileUtils.h" #include "common/network/NetworkUtils.h" #include "common/process/ProcessUtils.h" +#include "common/ssl/SSLConfig.h" #include "common/time/TimezoneInfo.h" #include "graph/service/GraphFlags.h" #include "graph/service/GraphService.h" @@ -52,6 +54,9 @@ int main(int argc, char *argv[]) { } folly::init(&argc, &argv, true); + if (FLAGS_enable_ssl || FLAGS_enable_graph_ssl || FLAGS_enable_meta_ssl) { + folly::ssl::init(); + } nebula::initCounters(); if (FLAGS_flagfile.empty()) { @@ -149,6 +154,9 @@ int main(int argc, char *argv[]) { gServer->setIdleTimeout(std::chrono::seconds(FLAGS_client_idle_timeout_secs)); gServer->setNumAcceptThreads(FLAGS_num_accept_threads); gServer->setListenBacklog(FLAGS_listen_backlog); + if (FLAGS_enable_ssl || FLAGS_enable_graph_ssl) { + gServer->setSSLConfig(nebula::sslContextConfig()); + } setupThreadManager(); // Setup the signal handlers diff --git a/src/daemons/MetaDaemon.cpp b/src/daemons/MetaDaemon.cpp index e2c176a8083..9c534951f43 100644 --- a/src/daemons/MetaDaemon.cpp +++ b/src/daemons/MetaDaemon.cpp @@ -4,6 +4,7 @@ * attached with Common Clause Condition 1.0, found in the LICENSES directory. */ +#include #include #include "common/base/Base.h" @@ -12,6 +13,7 @@ #include "common/hdfs/HdfsHelper.h" #include "common/network/NetworkUtils.h" #include "common/process/ProcessUtils.h" +#include "common/ssl/SSLConfig.h" #include "common/thread/GenericThreadPool.h" #include "common/time/TimezoneInfo.h" #include "kvstore/NebulaStore.h" @@ -204,6 +206,9 @@ int main(int argc, char* argv[]) { } folly::init(&argc, &argv, true); + if (FLAGS_enable_ssl || FLAGS_enable_meta_ssl) { + folly::ssl::init(); + } if (FLAGS_data_path.empty()) { LOG(ERROR) << "Meta Data Path should not empty"; return EXIT_FAILURE; @@ -307,6 +312,9 @@ int main(int argc, char* argv[]) { gServer->setPort(FLAGS_port); gServer->setIdleTimeout(std::chrono::seconds(0)); // No idle timeout on client connection gServer->setInterface(std::move(handler)); + if (FLAGS_enable_ssl || FLAGS_enable_meta_ssl) { + gServer->setSSLConfig(nebula::sslContextConfig()); + } gServer->serve(); // Will wait until the server shuts down } catch (const std::exception& e) { LOG(ERROR) << "Exception thrown: " << e.what(); diff --git a/src/daemons/StorageDaemon.cpp b/src/daemons/StorageDaemon.cpp index 70b2930cb84..b8f7f933be3 100644 --- a/src/daemons/StorageDaemon.cpp +++ b/src/daemons/StorageDaemon.cpp @@ -4,6 +4,7 @@ * attached with Common Clause Condition 1.0, found in the LICENSES directory. */ +#include #include #include "common/base/Base.h" @@ -69,6 +70,9 @@ int main(int argc, char *argv[]) { } folly::init(&argc, &argv, true); + if (FLAGS_enable_ssl || FLAGS_enable_meta_ssl) { + folly::ssl::init(); + } if (FLAGS_daemonize) { google::SetStderrLogging(google::FATAL); } else { diff --git a/src/graph/context/ast/QueryAstContext.h b/src/graph/context/ast/QueryAstContext.h index 6cc3e4e975e..9e994d745a7 100644 --- a/src/graph/context/ast/QueryAstContext.h +++ b/src/graph/context/ast/QueryAstContext.h @@ -128,6 +128,16 @@ struct SubgraphContext final : public AstContext { bool getEdgeProp{false}; }; +struct FetchVerticesContext final : public AstContext { + Starts from; + bool distinct{false}; + YieldColumns* yieldExpr{nullptr}; + ExpressionProps exprProps; + + // store the result of the previous sentence + std::string inputVarName; +}; + } // namespace graph } // namespace nebula #endif // GRAPH_CONTEXT_AST_QUERYASTCONTEXT_H_ diff --git a/src/graph/context/test/CMakeLists.txt b/src/graph/context/test/CMakeLists.txt index fc579f8a74a..02d05be1d42 100644 --- a/src/graph/context/test/CMakeLists.txt +++ b/src/graph/context/test/CMakeLists.txt @@ -40,6 +40,7 @@ SET(CONTEXT_TEST_LIBS $ $ $ + $ ) nebula_add_test( diff --git a/src/graph/executor/Executor.cpp b/src/graph/executor/Executor.cpp index 647a29b96a2..de79bda805b 100644 --- a/src/graph/executor/Executor.cpp +++ b/src/graph/executor/Executor.cpp @@ -226,6 +226,9 @@ Executor *Executor::makeExecutor(QueryContext *qctx, const PlanNode *node) { case PlanNode::Kind::kCreateSpace: { return pool->add(new CreateSpaceExecutor(node, qctx)); } + case PlanNode::Kind::kCreateSpaceAs: { + return pool->add(new CreateSpaceAsExecutor(node, qctx)); + } case PlanNode::Kind::kDescSpace: { return pool->add(new DescSpaceExecutor(node, qctx)); } diff --git a/src/graph/executor/admin/ListRolesExecutor.cpp b/src/graph/executor/admin/ListRolesExecutor.cpp index f72fddadb95..bd5b4ee808f 100644 --- a/src/graph/executor/admin/ListRolesExecutor.cpp +++ b/src/graph/executor/admin/ListRolesExecutor.cpp @@ -38,10 +38,17 @@ folly::Future ListRolesExecutor::listRoles() { auto foundItem = std::find_if(items.begin(), items.end(), [&account](const auto &item) { return item.get_user_id() == account; }); - if (foundItem != items.end() && foundItem->get_role_type() != meta::cpp2::RoleType::ADMIN) { - v.emplace_back(Row({foundItem->get_user_id(), - apache::thrift::util::enumNameSafe(foundItem->get_role_type())})); - } else { + if (foundItem != items.end()) { + if (foundItem->get_role_type() != meta::cpp2::RoleType::ADMIN) { + v.emplace_back(Row({foundItem->get_user_id(), + apache::thrift::util::enumNameSafe(foundItem->get_role_type())})); + } else { + for (const auto &item : items) { + v.emplace_back(nebula::Row( + {item.get_user_id(), apache::thrift::util::enumNameSafe(item.get_role_type())})); + } + } + } else if (qctx_->rctx()->session()->isGod()) { for (const auto &item : items) { v.emplace_back(nebula::Row( {item.get_user_id(), apache::thrift::util::enumNameSafe(item.get_role_type())})); diff --git a/src/graph/executor/admin/SpaceExecutor.cpp b/src/graph/executor/admin/SpaceExecutor.cpp index 52f89905a43..59d03ed1699 100644 --- a/src/graph/executor/admin/SpaceExecutor.cpp +++ b/src/graph/executor/admin/SpaceExecutor.cpp @@ -32,6 +32,25 @@ folly::Future CreateSpaceExecutor::execute() { }); } +folly::Future CreateSpaceAsExecutor::execute() { + SCOPED_TIMER(&execTime_); + + auto *csaNode = asNode(node()); + auto oldSpace = csaNode->getOldSpaceName(); + auto newSpace = csaNode->getNewSpaceName(); + return qctx() + ->getMetaClient() + ->createSpaceAs(oldSpace, newSpace) + .via(runner()) + .thenValue([](StatusOr resp) { + if (!resp.ok()) { + LOG(ERROR) << resp.status(); + return resp.status(); + } + return Status::OK(); + }); +} + folly::Future DescSpaceExecutor::execute() { SCOPED_TIMER(&execTime_); diff --git a/src/graph/executor/admin/SpaceExecutor.h b/src/graph/executor/admin/SpaceExecutor.h index accd84e8b5e..d3a339bb18a 100644 --- a/src/graph/executor/admin/SpaceExecutor.h +++ b/src/graph/executor/admin/SpaceExecutor.h @@ -20,6 +20,14 @@ class CreateSpaceExecutor final : public Executor { folly::Future execute() override; }; +class CreateSpaceAsExecutor final : public Executor { + public: + CreateSpaceAsExecutor(const PlanNode *node, QueryContext *qctx) + : Executor("CreateSpaceAsExecutor", node, qctx) {} + + folly::Future execute() override; +}; + class DescSpaceExecutor final : public Executor { public: DescSpaceExecutor(const PlanNode *node, QueryContext *qctx) diff --git a/src/graph/executor/test/CMakeLists.txt b/src/graph/executor/test/CMakeLists.txt index c0a8925a452..f78ff4ed0c0 100644 --- a/src/graph/executor/test/CMakeLists.txt +++ b/src/graph/executor/test/CMakeLists.txt @@ -47,6 +47,7 @@ SET(EXEC_QUERY_TEST_OBJS $ $ $ + $ ) SET(EXEC_QUERY_TEST_LIBS diff --git a/src/graph/optimizer/test/CMakeLists.txt b/src/graph/optimizer/test/CMakeLists.txt index 7a5ed87dcb6..a1fa426ac72 100644 --- a/src/graph/optimizer/test/CMakeLists.txt +++ b/src/graph/optimizer/test/CMakeLists.txt @@ -43,6 +43,7 @@ set(OPTIMIZER_TEST_LIB $ $ $ + $ ) nebula_add_test( diff --git a/src/graph/planner/CMakeLists.txt b/src/graph/planner/CMakeLists.txt index 2262a2e2049..9f5791f2f3f 100644 --- a/src/graph/planner/CMakeLists.txt +++ b/src/graph/planner/CMakeLists.txt @@ -40,4 +40,5 @@ nebula_add_library( ngql/GoPlanner.cpp ngql/SubgraphPlanner.cpp ngql/LookupPlanner.cpp + ngql/FetchVerticesPlanner.cpp ) diff --git a/src/graph/planner/PlannersRegister.cpp b/src/graph/planner/PlannersRegister.cpp index c67f5ea9d8c..0166d711c8f 100644 --- a/src/graph/planner/PlannersRegister.cpp +++ b/src/graph/planner/PlannersRegister.cpp @@ -13,6 +13,7 @@ #include "graph/planner/match/PropIndexSeek.h" #include "graph/planner/match/StartVidFinder.h" #include "graph/planner/match/VertexIdSeek.h" +#include "graph/planner/ngql/FetchVerticesPlanner.h" #include "graph/planner/ngql/GoPlanner.h" #include "graph/planner/ngql/LookupPlanner.h" #include "graph/planner/ngql/PathPlanner.h" @@ -47,6 +48,10 @@ void PlannersRegister::registSequential() { auto& planners = Planner::plannersMap()[Sentence::Kind::kGetSubgraph]; planners.emplace_back(&SubgraphPlanner::match, &SubgraphPlanner::make); } + { + auto& planners = Planner::plannersMap()[Sentence::Kind::kFetchVertices]; + planners.emplace_back(&FetchVerticesPlanner::match, &FetchVerticesPlanner::make); + } } void PlannersRegister::registMatch() { diff --git a/src/graph/planner/ngql/FetchVerticesPlanner.cpp b/src/graph/planner/ngql/FetchVerticesPlanner.cpp new file mode 100644 index 00000000000..2782ab622e6 --- /dev/null +++ b/src/graph/planner/ngql/FetchVerticesPlanner.cpp @@ -0,0 +1,69 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License, + * attached with Common Clause Condition 1.0, found in the LICENSES directory. + */ + +#include "graph/planner/ngql/FetchVerticesPlanner.h" + +#include "graph/planner/plan/Query.h" +#include "graph/util/PlannerUtil.h" + +namespace nebula { +namespace graph { + +std::unique_ptr FetchVerticesPlanner::buildVertexProps( + const ExpressionProps::TagIDPropsMap& propsMap) { + if (propsMap.empty()) { + return nullptr; + } + auto vertexProps = std::make_unique(propsMap.size()); + auto fun = [](auto& tag) { + VertexProp vp; + vp.set_tag(tag.first); + std::vector props(tag.second.begin(), tag.second.end()); + vp.set_props(std::move(props)); + return vp; + }; + std::transform(propsMap.begin(), propsMap.end(), vertexProps->begin(), fun); + return vertexProps; +} + +StatusOr FetchVerticesPlanner::transform(AstContext* astCtx) { + fetchCtx_ = static_cast(astCtx); + auto qctx = fetchCtx_->qctx; + auto space = fetchCtx_->space; + auto& starts = fetchCtx_->from; + + std::string vidsVar; + if (!starts.vids.empty() && starts.originalSrc == nullptr) { + PlannerUtil::buildConstantInput(qctx, starts, vidsVar); + } else { + starts.src = starts.originalSrc; + if (starts.fromType == kVariable) { + vidsVar = starts.userDefinedVarName; + } else { + vidsVar = fetchCtx_->inputVarName; + } + } + + SubPlan subPlan; + auto* getVertices = GetVertices::make(qctx, + nullptr, + space.id, + starts.src, + buildVertexProps(fetchCtx_->exprProps.tagProps()), + {}, + fetchCtx_->distinct); + getVertices->setInputVar(vidsVar); + + subPlan.root = Project::make(qctx, getVertices, fetchCtx_->yieldExpr); + if (fetchCtx_->distinct) { + subPlan.root = Dedup::make(qctx, subPlan.root); + } + subPlan.tail = getVertices; + return subPlan; +} + +} // namespace graph +} // namespace nebula diff --git a/src/graph/planner/ngql/FetchVerticesPlanner.h b/src/graph/planner/ngql/FetchVerticesPlanner.h new file mode 100644 index 00000000000..7bbd9e34b91 --- /dev/null +++ b/src/graph/planner/ngql/FetchVerticesPlanner.h @@ -0,0 +1,43 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License, + * attached with Common Clause Condition 1.0, found in the LICENSES directory. + */ + +#ifndef GRAPH_PLANNER_NGQL_FETCH_VERTICES_PLANNER_H_ +#define GRAPH_PLANNER_NGQL_FETCH_VERTICES_PLANNER_H_ + +#include "common/base/Base.h" +#include "graph/context/ast/QueryAstContext.h" +#include "graph/planner/Planner.h" +#include "graph/planner/plan/PlanNode.h" + +namespace nebula { +namespace graph { +class FetchVerticesPlanner final : public Planner { + public: + using VertexProp = nebula::storage::cpp2::VertexProp; + using VertexProps = std::vector; + + static std::unique_ptr make() { + return std::unique_ptr(new FetchVerticesPlanner()); + } + + static bool match(AstContext* astCtx) { + return astCtx->sentence->kind() == Sentence::Kind::kFetchVertices; + } + + StatusOr transform(AstContext* astCtx) override; + + private: + std::unique_ptr buildVertexProps(const ExpressionProps::TagIDPropsMap& propsMap); + + private: + FetchVerticesPlanner() = default; + + FetchVerticesContext* fetchCtx_{nullptr}; +}; +} // namespace graph +} // namespace nebula + +#endif // GRAPH_PLANNER_NGQL_FETCH_VERTICES_PLANNER_H diff --git a/src/graph/planner/ngql/GoPlanner.cpp b/src/graph/planner/ngql/GoPlanner.cpp index 926f8f10f8d..c12278417a1 100644 --- a/src/graph/planner/ngql/GoPlanner.cpp +++ b/src/graph/planner/ngql/GoPlanner.cpp @@ -6,12 +6,9 @@ #include "graph/planner/ngql/GoPlanner.h" -#include "graph/planner/plan/Algo.h" #include "graph/planner/plan/Logic.h" #include "graph/util/ExpressionUtils.h" -#include "graph/util/QueryUtil.h" -#include "graph/util/SchemaUtil.h" -#include "graph/validator/Validator.h" +#include "graph/util/PlannerUtil.h" namespace nebula { namespace graph { @@ -395,7 +392,7 @@ SubPlan GoPlanner::nStepsPlan(SubPlan& startVidPlan) { gn->setEdgeProps(buildEdgeProps(true)); gn->setInputVar(goCtx_->vidsVar); - auto* getDst = QueryUtil::extractDstFromGN(qctx, gn, goCtx_->vidsVar); + auto* getDst = PlannerUtil::extractDstFromGN(qctx, gn, goCtx_->vidsVar); PlanNode* loopBody = getDst; PlanNode* loopDep = nullptr; @@ -429,7 +426,7 @@ SubPlan GoPlanner::mToNStepsPlan(SubPlan& startVidPlan) { gn->setEdgeProps(buildEdgeProps(false)); gn->setInputVar(goCtx_->vidsVar); - auto* getDst = QueryUtil::extractDstFromGN(qctx, gn, goCtx_->vidsVar); + auto* getDst = PlannerUtil::extractDstFromGN(qctx, gn, goCtx_->vidsVar); auto* loopBody = getDst; auto* loopDep = startVidPlan.root; @@ -487,7 +484,7 @@ StatusOr GoPlanner::transform(AstContext* astCtx) { goCtx_->joinInput = goCtx_->from.fromType != FromType::kInstantExpr; goCtx_->joinDst = !goCtx_->exprProps.dstTagProps().empty(); - SubPlan startPlan = QueryUtil::buildStart(qctx, goCtx_->from, goCtx_->vidsVar); + SubPlan startPlan = PlannerUtil::buildStart(qctx, goCtx_->from, goCtx_->vidsVar); auto& steps = goCtx_->steps; if (steps.isMToN()) { diff --git a/src/graph/planner/ngql/GoPlanner.h b/src/graph/planner/ngql/GoPlanner.h index 4a7343bd6fb..19f7e5dbf93 100644 --- a/src/graph/planner/ngql/GoPlanner.h +++ b/src/graph/planner/ngql/GoPlanner.h @@ -12,7 +12,6 @@ #include "graph/planner/Planner.h" #include "graph/planner/plan/PlanNode.h" #include "graph/planner/plan/Query.h" -#include "graph/util/ExpressionUtils.h" namespace nebula { namespace graph { diff --git a/src/graph/planner/ngql/PathPlanner.cpp b/src/graph/planner/ngql/PathPlanner.cpp index 4509e20e4b8..3e2dab8bca2 100644 --- a/src/graph/planner/ngql/PathPlanner.cpp +++ b/src/graph/planner/ngql/PathPlanner.cpp @@ -8,7 +8,7 @@ #include "graph/planner/plan/Algo.h" #include "graph/planner/plan/Logic.h" #include "graph/util/ExpressionUtils.h" -#include "graph/util/QueryUtil.h" +#include "graph/util/PlannerUtil.h" #include "graph/util/SchemaUtil.h" #include "graph/validator/Validator.h" @@ -62,15 +62,15 @@ void PathPlanner::doBuildEdgeProps(std::unique_ptr>& edgeP void PathPlanner::buildStart(Starts& starts, std::string& vidsVar, bool reverse) { auto qctx = pathCtx_->qctx; if (!starts.vids.empty() && starts.originalSrc == nullptr) { - QueryUtil::buildConstantInput(qctx, starts, vidsVar); + PlannerUtil::buildConstantInput(qctx, starts, vidsVar); } else { if (reverse) { - auto subPlan = QueryUtil::buildRuntimeInput(qctx, starts); + auto subPlan = PlannerUtil::buildRuntimeInput(qctx, starts); pathCtx_->runtimeToProject = subPlan.tail; pathCtx_->runtimeToDedup = subPlan.root; vidsVar = pathCtx_->runtimeToDedup->outputVar(); } else { - auto subPlan = QueryUtil::buildRuntimeInput(qctx, starts); + auto subPlan = PlannerUtil::buildRuntimeInput(qctx, starts); pathCtx_->runtimeFromProject = subPlan.tail; pathCtx_->runtimeFromDedup = subPlan.root; vidsVar = pathCtx_->runtimeFromDedup->outputVar(); diff --git a/src/graph/planner/ngql/SubgraphPlanner.cpp b/src/graph/planner/ngql/SubgraphPlanner.cpp index 1621ff1a573..71220a2a2a3 100644 --- a/src/graph/planner/ngql/SubgraphPlanner.cpp +++ b/src/graph/planner/ngql/SubgraphPlanner.cpp @@ -8,7 +8,7 @@ #include "graph/planner/plan/Algo.h" #include "graph/planner/plan/Logic.h" #include "graph/util/ExpressionUtils.h" -#include "graph/util/QueryUtil.h" +#include "graph/util/PlannerUtil.h" #include "graph/util/SchemaUtil.h" #include "graph/validator/Validator.h" @@ -122,7 +122,7 @@ StatusOr SubgraphPlanner::transform(AstContext* astCtx) { auto qctx = subgraphCtx_->qctx; std::string vidsVar; - SubPlan startPlan = QueryUtil::buildStart(qctx, subgraphCtx_->from, vidsVar); + SubPlan startPlan = PlannerUtil::buildStart(qctx, subgraphCtx_->from, vidsVar); if (subgraphCtx_->steps.steps() == 0) { return zeroStep(startPlan, vidsVar); } diff --git a/src/graph/planner/plan/Admin.cpp b/src/graph/planner/plan/Admin.cpp index fba72ace130..2ae4d0cac35 100644 --- a/src/graph/planner/plan/Admin.cpp +++ b/src/graph/planner/plan/Admin.cpp @@ -21,6 +21,13 @@ std::unique_ptr CreateSpace::explain() const { return desc; } +std::unique_ptr CreateSpaceAsNode::explain() const { + auto desc = SingleDependencyNode::explain(); + addDescription("oldSpaceName", oldSpaceName_, desc.get()); + addDescription("newSpaceName", newSpaceName_, desc.get()); + return desc; +} + std::unique_ptr DropSpace::explain() const { auto desc = SingleDependencyNode::explain(); addDescription("spaceName", spaceName_, desc.get()); diff --git a/src/graph/planner/plan/Admin.h b/src/graph/planner/plan/Admin.h index d2925c93ba6..8d06377c010 100644 --- a/src/graph/planner/plan/Admin.h +++ b/src/graph/planner/plan/Admin.h @@ -107,6 +107,33 @@ class CreateSpace final : public SingleDependencyNode { bool ifNotExists_{false}; }; +class CreateSpaceAsNode final : public SingleDependencyNode { + public: + static CreateSpaceAsNode* make(QueryContext* qctx, + PlanNode* input, + const std::string& oldSpaceName, + const std::string& newSpaceName) { + return qctx->objPool()->add(new CreateSpaceAsNode(qctx, input, oldSpaceName, newSpaceName)); + } + + std::unique_ptr explain() const override; + + public: + std::string getOldSpaceName() const { return oldSpaceName_; } + + std::string getNewSpaceName() const { return newSpaceName_; } + + private: + CreateSpaceAsNode(QueryContext* qctx, PlanNode* input, std::string oldName, std::string newName) + : SingleDependencyNode(qctx, Kind::kCreateSpaceAs, input), + oldSpaceName_(std::move(oldName)), + newSpaceName_(std::move(newName)) {} + + private: + std::string oldSpaceName_; + std::string newSpaceName_; +}; + class DropSpace final : public SingleDependencyNode { public: static DropSpace* make(QueryContext* qctx, diff --git a/src/graph/planner/plan/PlanNode.cpp b/src/graph/planner/plan/PlanNode.cpp index 336a9faf083..6da34fa3093 100644 --- a/src/graph/planner/plan/PlanNode.cpp +++ b/src/graph/planner/plan/PlanNode.cpp @@ -92,6 +92,8 @@ const char* PlanNode::toString(PlanNode::Kind kind) { return "RegisterSpaceToSession"; case Kind::kCreateSpace: return "CreateSpace"; + case Kind::kCreateSpaceAs: + return "CreateSpaceAs"; case Kind::kCreateTag: return "CreateTag"; case Kind::kCreateEdge: diff --git a/src/graph/planner/plan/PlanNode.h b/src/graph/planner/plan/PlanNode.h index fe458cb2497..c969947e406 100644 --- a/src/graph/planner/plan/PlanNode.h +++ b/src/graph/planner/plan/PlanNode.h @@ -69,6 +69,7 @@ class PlanNode { // schema related kCreateSpace, + kCreateSpaceAs, kCreateTag, kCreateEdge, kDescSpace, diff --git a/src/graph/service/GraphService.cpp b/src/graph/service/GraphService.cpp index 6b217fb0c1f..5bed12c6187 100644 --- a/src/graph/service/GraphService.cpp +++ b/src/graph/service/GraphService.cpp @@ -153,6 +153,13 @@ folly::Future GraphService::future_execute(int64_t sessionId, return future; } +folly::Future GraphService::future_executeJson(int64_t sessionId, + const std::string& query) { + auto rawResp = future_execute(sessionId, query).get(); + auto respJsonObj = rawResp.toJson(); + return folly::toJson(respJsonObj); +} + bool GraphService::auth(const std::string& username, const std::string& password) { if (!FLAGS_enable_authorize) { return true; diff --git a/src/graph/service/GraphService.h b/src/graph/service/GraphService.h index 0e15ef0e873..f72c36bb39e 100644 --- a/src/graph/service/GraphService.h +++ b/src/graph/service/GraphService.h @@ -36,6 +36,9 @@ class GraphService final : public cpp2::GraphServiceSvIf { folly::Future future_execute(int64_t sessionId, const std::string& stmt) override; + folly::Future future_executeJson(int64_t sessionId, + const std::string& stmt) override; + private: bool auth(const std::string& username, const std::string& password); diff --git a/src/graph/service/PermissionCheck.cpp b/src/graph/service/PermissionCheck.cpp index 0cadb22c436..8cdc16008cf 100644 --- a/src/graph/service/PermissionCheck.cpp +++ b/src/graph/service/PermissionCheck.cpp @@ -52,6 +52,7 @@ Status PermissionCheck::permissionCheck(ClientSession *session, return Status::OK(); } case Sentence::Kind::kCreateSpace: + case Sentence::Kind::kCreateSpaceAs: case Sentence::Kind::kDropSpace: case Sentence::Kind::kCreateSnapshot: case Sentence::Kind::kDropSnapshot: diff --git a/src/graph/session/ClientSession.cpp b/src/graph/session/ClientSession.cpp index 39875b8887a..3c3fa758abe 100644 --- a/src/graph/session/ClientSession.cpp +++ b/src/graph/session/ClientSession.cpp @@ -55,7 +55,7 @@ void ClientSession::deleteQuery(QueryContext* qctx) { session_.queries_ref()->erase(epId); } -bool ClientSession::findQuery(nebula::ExecutionPlanID epId) { +bool ClientSession::findQuery(nebula::ExecutionPlanID epId) const { folly::RWSpinLock::ReadHolder rHolder(rwSpinLock_); auto context = contexts_.find(epId); if (context != contexts_.end()) { diff --git a/src/graph/session/ClientSession.h b/src/graph/session/ClientSession.h index 31154bc7cf8..6432d511147 100644 --- a/src/graph/session/ClientSession.h +++ b/src/graph/session/ClientSession.h @@ -29,12 +29,12 @@ class ClientSession final { static std::shared_ptr create(meta::cpp2::Session&& session, meta::MetaClient* metaClient); - int64_t id() { + int64_t id() const { folly::RWSpinLock::ReadHolder rHolder(rwSpinLock_); return session_.get_session_id(); } - const SpaceInfo space() { + const SpaceInfo& space() const { folly::RWSpinLock::ReadHolder rHolder(rwSpinLock_); return space_; } @@ -47,22 +47,22 @@ class ClientSession final { } } - const std::string spaceName() { + const std::string& spaceName() const { folly::RWSpinLock::ReadHolder rHolder(rwSpinLock_); return session_.get_space_name(); } - const std::string user() { + const std::string& user() const { folly::RWSpinLock::ReadHolder rHolder(rwSpinLock_); return session_.get_user_name(); } - std::unordered_map roles() { + const std::unordered_map& roles() const { folly::RWSpinLock::ReadHolder rHolder(rwSpinLock_); return roles_; } - StatusOr roleWithSpace(GraphSpaceID space) { + StatusOr roleWithSpace(GraphSpaceID space) const { folly::RWSpinLock::ReadHolder rHolder(rwSpinLock_); auto ret = roles_.find(space); if (ret == roles_.end()) { @@ -71,7 +71,7 @@ class ClientSession final { return ret->second; } - bool isGod() { + bool isGod() const { folly::RWSpinLock::ReadHolder rHolder(rwSpinLock_); // Cloud may have multiple God accounts for (auto& role : roles_) { @@ -91,12 +91,12 @@ class ClientSession final { void charge(); - int32_t getTimezone() { + int32_t getTimezone() const { folly::RWSpinLock::ReadHolder rHolder(rwSpinLock_); return session_.get_timezone(); } - HostAddr getGraphAddr() { + const HostAddr& getGraphAddr() const { folly::RWSpinLock::ReadHolder rHolder(rwSpinLock_); return session_.get_graph_addr(); } @@ -120,7 +120,7 @@ class ClientSession final { } } - const meta::cpp2::Session getSession() { + const meta::cpp2::Session& getSession() const { folly::RWSpinLock::ReadHolder rHolder(rwSpinLock_); return session_; } @@ -134,7 +134,7 @@ class ClientSession final { void deleteQuery(QueryContext* qctx); - bool findQuery(nebula::ExecutionPlanID epId); + bool findQuery(nebula::ExecutionPlanID epId) const; void markQueryKilled(nebula::ExecutionPlanID epId); @@ -150,7 +150,7 @@ class ClientSession final { time::Duration idleDuration_; meta::cpp2::Session session_; meta::MetaClient* metaClient_{nullptr}; - folly::RWSpinLock rwSpinLock_; + mutable folly::RWSpinLock rwSpinLock_; /* * map * One user can have roles in multiple spaces diff --git a/src/graph/util/CMakeLists.txt b/src/graph/util/CMakeLists.txt index a9feb25f5fa..da7bdf95a2e 100644 --- a/src/graph/util/CMakeLists.txt +++ b/src/graph/util/CMakeLists.txt @@ -13,7 +13,8 @@ nebula_add_library( ZoneUtil.cpp ToJson.cpp ParserUtil.cpp - QueryUtil.cpp + PlannerUtil.cpp + ValidateUtil.cpp ) nebula_add_library( diff --git a/src/graph/util/QueryUtil.cpp b/src/graph/util/PlannerUtil.cpp similarity index 81% rename from src/graph/util/QueryUtil.cpp rename to src/graph/util/PlannerUtil.cpp index 7c542aaa19a..f6613f55a06 100644 --- a/src/graph/util/QueryUtil.cpp +++ b/src/graph/util/PlannerUtil.cpp @@ -4,7 +4,7 @@ * attached with Common Clause Condition 1.0, found in the LICENSES directory. */ -#include "graph/util/QueryUtil.h" +#include "graph/util/PlannerUtil.h" #include "common/base/Base.h" #include "common/expression/ColumnExpression.h" @@ -17,7 +17,7 @@ namespace nebula { namespace graph { // static -void QueryUtil::buildConstantInput(QueryContext* qctx, Starts& starts, std::string& vidsVar) { +void PlannerUtil::buildConstantInput(QueryContext* qctx, Starts& starts, std::string& vidsVar) { vidsVar = qctx->vctx()->anonVarGen()->getVar(); DataSet ds; ds.colNames.emplace_back(kVid); @@ -33,7 +33,7 @@ void QueryUtil::buildConstantInput(QueryContext* qctx, Starts& starts, std::stri } // static -SubPlan QueryUtil::buildRuntimeInput(QueryContext* qctx, Starts& starts) { +SubPlan PlannerUtil::buildRuntimeInput(QueryContext* qctx, Starts& starts) { auto pool = qctx->objPool(); auto* columns = pool->add(new YieldColumns()); auto* column = new YieldColumn(starts.originalSrc->clone(), kVid); @@ -54,7 +54,7 @@ SubPlan QueryUtil::buildRuntimeInput(QueryContext* qctx, Starts& starts) { } // static -SubPlan QueryUtil::buildStart(QueryContext* qctx, Starts& starts, std::string& vidsVar) { +SubPlan PlannerUtil::buildStart(QueryContext* qctx, Starts& starts, std::string& vidsVar) { SubPlan subPlan; if (!starts.vids.empty() && starts.originalSrc == nullptr) { buildConstantInput(qctx, starts, vidsVar); @@ -65,7 +65,9 @@ SubPlan QueryUtil::buildStart(QueryContext* qctx, Starts& starts, std::string& v return subPlan; } -PlanNode* QueryUtil::extractDstFromGN(QueryContext* qctx, PlanNode* gn, const std::string& output) { +PlanNode* PlannerUtil::extractDstFromGN(QueryContext* qctx, + PlanNode* gn, + const std::string& output) { auto pool = qctx->objPool(); auto* columns = pool->add(new YieldColumns()); auto* column = new YieldColumn(EdgePropertyExpression::make(pool, "*", kDst), kVid); @@ -77,5 +79,6 @@ PlanNode* QueryUtil::extractDstFromGN(QueryContext* qctx, PlanNode* gn, const st dedup->setOutputVar(output); return dedup; } + } // namespace graph } // namespace nebula diff --git a/src/graph/util/QueryUtil.h b/src/graph/util/PlannerUtil.h similarity index 82% rename from src/graph/util/QueryUtil.h rename to src/graph/util/PlannerUtil.h index 3ff1ea0e951..7eb0accbec6 100644 --- a/src/graph/util/QueryUtil.h +++ b/src/graph/util/PlannerUtil.h @@ -4,8 +4,8 @@ * attached with Common Clause Condition 1.0, found in the LICENSES directory. */ -#ifndef GRAPH_UTIL_QUERYUTIL_H_ -#define GRAPH_UTIL_QUERYUTIL_H_ +#ifndef GRAPH_UTIL_PLANNER_UTIL_H_ +#define GRAPH_UTIL_PLANNER_UTIL_H_ #include "common/base/Base.h" namespace nebula { @@ -14,9 +14,9 @@ class QueryContext; struct Starts; struct SubPlan; class PlanNode; -class QueryUtil final { +class PlannerUtil final { public: - QueryUtil() = delete; + PlannerUtil() = delete; static void buildConstantInput(QueryContext* qctx, Starts& starts, std::string& vidsVar); @@ -29,4 +29,4 @@ class QueryUtil final { } // namespace graph } // namespace nebula -#endif // GRAPH_UTIL_ZONEUTIL_H_ +#endif // GRAPH_UTIL_PLANNER_UTIL_H_ diff --git a/src/graph/util/ValidateUtil.cpp b/src/graph/util/ValidateUtil.cpp new file mode 100644 index 00000000000..953c7b75ab2 --- /dev/null +++ b/src/graph/util/ValidateUtil.cpp @@ -0,0 +1,93 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License, + * attached with Common Clause Condition 1.0, found in the LICENSES directory. + */ + +#include "graph/util/ValidateUtil.h" + +#include "common/base/Base.h" +#include "common/expression/ColumnExpression.h" +#include "graph/context/QueryContext.h" +#include "graph/context/ast/QueryAstContext.h" +#include "graph/planner/Planner.h" +#include "graph/planner/plan/Query.h" +#include "graph/util/ExpressionUtils.h" + +namespace nebula { +namespace graph { + +Status ValidateUtil::validateStep(const StepClause* clause, StepClause& step) { + if (clause == nullptr) { + return Status::SemanticError("Step clause nullptr."); + } + step = *clause; + if (clause->isMToN()) { + if (step.mSteps() == 0) { + step.setMSteps(1); + } + if (step.nSteps() < step.mSteps()) { + return Status::SemanticError("`%s', upper bound steps should be greater than lower bound.", + step.toString().c_str()); + } + } + return Status::OK(); +} + +Status ValidateUtil::validateOver(QueryContext* qctx, const OverClause* clause, Over& over) { + if (clause == nullptr) { + return Status::SemanticError("Over clause nullptr."); + } + auto space = qctx->vctx()->whichSpace(); + + over.direction = clause->direction(); + auto* schemaMng = qctx->schemaMng(); + if (clause->isOverAll()) { + auto edgeStatus = schemaMng->getAllEdge(space.id); + NG_RETURN_IF_ERROR(edgeStatus); + auto edges = std::move(edgeStatus).value(); + if (edges.empty()) { + return Status::SemanticError("No edge type found in space `%s'", space.name.c_str()); + } + for (auto edge : edges) { + auto edgeType = schemaMng->toEdgeType(space.id, edge); + if (!edgeType.ok()) { + return Status::SemanticError( + "`%s' not found in space [`%s'].", edge.c_str(), space.name.c_str()); + } + over.edgeTypes.emplace_back(edgeType.value()); + } + over.allEdges = std::move(edges); + over.isOverAll = true; + } else { + auto edges = clause->edges(); + for (auto* edge : edges) { + auto edgeName = *edge->edge(); + auto edgeType = schemaMng->toEdgeType(space.id, edgeName); + if (!edgeType.ok()) { + return Status::SemanticError( + "%s not found in space [%s].", edgeName.c_str(), space.name.c_str()); + } + over.edgeTypes.emplace_back(edgeType.value()); + } + } + return Status::OK(); +} + +Status ValidateUtil::invalidLabelIdentifiers(const Expression* expr) { + auto labelExprs = ExpressionUtils::collectAll(expr, {Expression::Kind::kLabel}); + if (!labelExprs.empty()) { + std::stringstream ss; + ss << "Invalid label identifiers: "; + for (auto* label : labelExprs) { + ss << label->toString() << ","; + } + auto errMsg = ss.str(); + errMsg.pop_back(); + return Status::SemanticError(std::move(errMsg)); + } + return Status::OK(); +} + +} // namespace graph +} // namespace nebula diff --git a/src/graph/util/ValidateUtil.h b/src/graph/util/ValidateUtil.h new file mode 100644 index 00000000000..ea532e166ad --- /dev/null +++ b/src/graph/util/ValidateUtil.h @@ -0,0 +1,33 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License, + * attached with Common Clause Condition 1.0, found in the LICENSES directory. + */ + +#ifndef GRAPH_UTIL_VALIDATE_UTIL_H_ +#define GRAPH_UTIL_VALIDATE_UTIL_H_ +#include "common/base/Base.h" +#include "common/base/StatusOr.h" +#include "common/expression/Expression.h" +#include "parser/Clauses.h" + +namespace nebula { +namespace graph { +class QueryContext; +class PlanNode; +struct Over; + +class ValidateUtil final { + public: + ValidateUtil() = delete; + + static Status validateStep(const StepClause* clause, StepClause& step); + + static Status validateOver(QueryContext* qctx, const OverClause* clause, Over& over); + + static Status invalidLabelIdentifiers(const Expression* expr); +}; + +} // namespace graph +} // namespace nebula +#endif // GRAPH_UTIL_VALIDATE_UTIL_H_ diff --git a/src/graph/util/test/CMakeLists.txt b/src/graph/util/test/CMakeLists.txt index f87291477e9..745ab224efe 100644 --- a/src/graph/util/test/CMakeLists.txt +++ b/src/graph/util/test/CMakeLists.txt @@ -32,6 +32,7 @@ nebula_add_test( $ $ $ + $ $ $ $ diff --git a/src/graph/validator/AdminJobValidator.cpp b/src/graph/validator/AdminJobValidator.cpp index e174699b6d6..c2d9cfd7793 100644 --- a/src/graph/validator/AdminJobValidator.cpp +++ b/src/graph/validator/AdminJobValidator.cpp @@ -49,8 +49,9 @@ Status AdminJobValidator::validateImpl() { } } } + } else { + sentence_->addPara(qctx()->rctx()->session()->space().name); } - return Status::OK(); } diff --git a/src/graph/validator/AdminJobValidator.h b/src/graph/validator/AdminJobValidator.h index 37a650f6c1d..ea96710e10a 100644 --- a/src/graph/validator/AdminJobValidator.h +++ b/src/graph/validator/AdminJobValidator.h @@ -38,6 +38,7 @@ class AdminJobValidator final : public Validator { case meta::cpp2::AdminCmd::COMPACT: case meta::cpp2::AdminCmd::FLUSH: return true; + // TODO: Also space related, but not available in CreateJobExcutor now. case meta::cpp2::AdminCmd::DATA_BALANCE: case meta::cpp2::AdminCmd::DOWNLOAD: case meta::cpp2::AdminCmd::INGEST: @@ -49,7 +50,7 @@ class AdminJobValidator final : public Validator { case meta::cpp2::AdminJobOp::SHOW: case meta::cpp2::AdminJobOp::STOP: case meta::cpp2::AdminJobOp::RECOVER: - return false; + return true; } return false; } diff --git a/src/graph/validator/AdminValidator.cpp b/src/graph/validator/AdminValidator.cpp index f797e4e08ca..1f64b1f5171 100644 --- a/src/graph/validator/AdminValidator.cpp +++ b/src/graph/validator/AdminValidator.cpp @@ -160,6 +160,20 @@ Status CreateSpaceValidator::toPlan() { return Status::OK(); } +Status CreateSpaceAsValidator::validateImpl() { + auto sentence = static_cast(sentence_); + oldSpaceName_ = sentence->getOldSpaceName(); + newSpaceName_ = sentence->getNewSpaceName(); + return Status::OK(); +} + +Status CreateSpaceAsValidator::toPlan() { + auto *doNode = CreateSpaceAsNode::make(qctx_, nullptr, oldSpaceName_, newSpaceName_); + root_ = doNode; + tail_ = root_; + return Status::OK(); +} + Status DescSpaceValidator::validateImpl() { return Status::OK(); } Status DescSpaceValidator::toPlan() { diff --git a/src/graph/validator/AdminValidator.h b/src/graph/validator/AdminValidator.h index bbd647436db..9eadf9c09fc 100644 --- a/src/graph/validator/AdminValidator.h +++ b/src/graph/validator/AdminValidator.h @@ -34,6 +34,22 @@ class CreateSpaceValidator final : public Validator { bool ifNotExist_; }; +class CreateSpaceAsValidator final : public Validator { + public: + CreateSpaceAsValidator(Sentence* sentence, QueryContext* context) : Validator(sentence, context) { + setNoSpaceRequired(); + } + + private: + Status validateImpl() override; + + Status toPlan() override; + + private: + std::string oldSpaceName_; + std::string newSpaceName_; +}; + class DescSpaceValidator final : public Validator { public: DescSpaceValidator(Sentence* sentence, QueryContext* context) : Validator(sentence, context) { diff --git a/src/graph/validator/CMakeLists.txt b/src/graph/validator/CMakeLists.txt index 9138284b442..0c84e7c290a 100644 --- a/src/graph/validator/CMakeLists.txt +++ b/src/graph/validator/CMakeLists.txt @@ -24,7 +24,6 @@ nebula_add_library( LimitValidator.cpp OrderByValidator.cpp YieldValidator.cpp - TraversalValidator.cpp ExplainValidator.cpp GroupByValidator.cpp FindPathValidator.cpp diff --git a/src/graph/validator/FetchEdgesValidator.cpp b/src/graph/validator/FetchEdgesValidator.cpp index 60c5636389f..8158906b940 100644 --- a/src/graph/validator/FetchEdgesValidator.cpp +++ b/src/graph/validator/FetchEdgesValidator.cpp @@ -9,6 +9,7 @@ #include "graph/planner/plan/Query.h" #include "graph/util/ExpressionUtils.h" #include "graph/util/SchemaUtil.h" +#include "graph/util/ValidateUtil.h" namespace nebula { namespace graph { @@ -184,7 +185,7 @@ Status FetchEdgesValidator::preparePropertiesWithYield(const YieldClause *yield) dedup_ = newYield_->isDistinct(); for (auto col : newYield_->columns()) { col->setExpr(ExpressionUtils::rewriteLabelAttr2EdgeProp(col->expr())); - NG_RETURN_IF_ERROR(invalidLabelIdentifiers(col->expr())); + NG_RETURN_IF_ERROR(ValidateUtil::invalidLabelIdentifiers(col->expr())); const auto *invalidExpr = findInvalidYieldExpression(col->expr()); if (invalidExpr != nullptr) { return Status::SemanticError("Invalid newYield_ expression `%s'.", diff --git a/src/graph/validator/FetchVerticesValidator.cpp b/src/graph/validator/FetchVerticesValidator.cpp index 10f870ea231..114c983a281 100644 --- a/src/graph/validator/FetchVerticesValidator.cpp +++ b/src/graph/validator/FetchVerticesValidator.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. +/* Copyright (c) 2021 vesoft inc. All rights reserved. * * This source code is licensed under Apache 2.0 License, * attached with Common Clause Condition 1.0, found in the LICENSES directory. @@ -7,7 +7,7 @@ #include "graph/planner/plan/Query.h" #include "graph/util/ExpressionUtils.h" -#include "graph/util/SchemaUtil.h" +#include "graph/util/ValidateUtil.h" #include "graph/visitor/DeducePropsVisitor.h" namespace nebula { @@ -16,238 +16,136 @@ namespace graph { static constexpr char VertexID[] = "VertexID"; Status FetchVerticesValidator::validateImpl() { - props_ = std::make_unique>(); - exprs_ = std::make_unique>(); - NG_RETURN_IF_ERROR(check()); - NG_RETURN_IF_ERROR(prepareVertices()); - NG_RETURN_IF_ERROR(prepareProperties()); - return Status::OK(); -} + auto *fSentence = static_cast(sentence_); + fetchCtx_ = getContext(); + fetchCtx_->inputVarName = inputVarName_; -Status FetchVerticesValidator::toPlan() { - // Start [-> some input] -> GetVertices [-> Project] [-> Dedup] [-> next - // stage] -> End - std::string vidsVar = (srcRef_ == nullptr ? buildConstantInput() : buildRuntimeInput()); - auto *getVerticesNode = GetVertices::make(qctx_, - nullptr, - space_.id, - src_, - std::move(props_), - std::move(exprs_), - dedup_, - std::move(orderBy_), - limit_, - filter_); - getVerticesNode->setInputVar(vidsVar); - getVerticesNode->setColNames(gvColNames_); - // pipe will set the input variable - PlanNode *current = getVerticesNode; - - if (withYield_) { - current = Project::make(qctx_, current, newYieldColumns_); - - // Project select properties then dedup - if (dedup_) { - current = Dedup::make(qctx_, current); - - // the framework will add data collect to collect the result - // if the result is required - } - } else { - auto *pool = qctx_->objPool(); - auto *columns = pool->add(new YieldColumns()); - columns->addColumn(new YieldColumn(VertexExpression::make(pool), "vertices_")); - current = Project::make(qctx_, current, columns); - } - root_ = current; - tail_ = getVerticesNode; + NG_RETURN_IF_ERROR(validateTag(fSentence->tags())); + NG_RETURN_IF_ERROR(validateStarts(fSentence->vertices(), fetchCtx_->from)); + NG_RETURN_IF_ERROR(validateYield(fSentence->yieldClause())); return Status::OK(); } -Status FetchVerticesValidator::check() { - auto *sentence = static_cast(sentence_); - - if (!sentence->isAllTagProps()) { - onStar_ = false; - auto tags = sentence->tags()->labels(); - for (const auto &tag : tags) { - auto tagStatus = qctx_->schemaMng()->toTagID(space_.id, *tag); - NG_RETURN_IF_ERROR(tagStatus); - auto tagId = tagStatus.value(); +Expression *FetchVerticesValidator::rewriteIDVertex2Vid(const Expression *expr) { + auto *pool = qctx_->objPool(); + auto matcher = [](const Expression *e) -> bool { + std::string lowerStr = e->toString(); + folly::toLowerAscii(lowerStr); + return e->kind() == Expression::Kind::kFunctionCall && lowerStr == "id(vertex)"; + }; + auto rewriter = [pool](const Expression *e) -> Expression * { + UNUSED(e); + return InputPropertyExpression::make(pool, nebula::kVid); + }; + + return RewriteVisitor::transform(expr, std::move(matcher), std::move(rewriter)); +} - tags_.emplace(*tag, tagId); - auto schema = qctx_->schemaMng()->getTagSchema(space_.id, tagId); - if (schema == nullptr) { - LOG(ERROR) << "No schema found for " << *tag; - return Status::SemanticError("No schema found for `%s'", tag->c_str()); - } - tagsSchema_.emplace(tagId, schema); - } - } else { - onStar_ = true; - const auto allTagsResult = qctx_->schemaMng()->getAllLatestVerTagSchema(space_.id); - NG_RETURN_IF_ERROR(allTagsResult); - const auto allTags = std::move(allTagsResult).value(); - for (const auto &tag : allTags) { +Status FetchVerticesValidator::validateTag(const NameLabelList *nameLabels) { + if (nameLabels == nullptr) { + // all tag + const auto &tagStatus = qctx_->schemaMng()->getAllLatestVerTagSchema(space_.id); + NG_RETURN_IF_ERROR(tagStatus); + for (const auto &tag : tagStatus.value()) { tagsSchema_.emplace(tag.first, tag.second); } - for (const auto &tagSchema : tagsSchema_) { - auto tagNameResult = qctx_->schemaMng()->toTagName(space_.id, tagSchema.first); - NG_RETURN_IF_ERROR(tagNameResult); - tags_.emplace(std::move(tagNameResult).value(), tagSchema.first); + } else { + auto labels = nameLabels->labels(); + auto *schemaMng = qctx_->schemaMng(); + for (const auto &label : labels) { + auto tagStatus = schemaMng->toTagID(space_.id, *label); + NG_RETURN_IF_ERROR(tagStatus); + auto tagID = tagStatus.value(); + auto tagSchema = schemaMng->getTagSchema(space_.id, tagID); + if (tagSchema == nullptr) { + return Status::SemanticError("no schema found for `%s'", label->c_str()); + } + tagsSchema_.emplace(tagID, tagSchema); } } return Status::OK(); } -Status FetchVerticesValidator::prepareVertices() { - auto *sentence = static_cast(sentence_); - // from ref, eval when execute - if (sentence->vertices()->isRef()) { - srcRef_ = sentence->vertices()->ref(); - auto result = checkRef(srcRef_, vidType_); - NG_RETURN_IF_ERROR(result); - inputVar_ = std::move(result).value(); - return Status::OK(); +Status FetchVerticesValidator::validateYield(YieldClause *yield) { + auto pool = qctx_->objPool(); + bool noYield = false; + if (yield == nullptr) { + // TODO: compatible with previous version, this will be deprecated in version 3.0. + auto *yieldColumns = new YieldColumns(); + auto *vertex = new YieldColumn(VertexExpression::make(pool), "vertices_"); + yieldColumns->addColumn(vertex); + yield = pool->add(new YieldClause(yieldColumns)); + noYield = true; + } + fetchCtx_->distinct = yield->isDistinct(); + auto size = yield->columns().size(); + outputs_.reserve(size + 1); + + auto *newCols = pool->add(new YieldColumns()); + if (!noYield) { + outputs_.emplace_back(VertexID, vidType_); + auto *vidCol = new YieldColumn(InputPropertyExpression::make(pool, nebula::kVid), VertexID); + newCols->addColumn(vidCol); } - // from constant, eval now - // TODO(shylock) add eval() method for expression - QueryExpressionContext dummy(nullptr); - auto vids = sentence->vertices()->vidList(); - srcVids_.rows.reserve(vids.size()); - for (const auto vid : vids) { - DCHECK(ExpressionUtils::isConstExpr(vid)); - auto v = vid->eval(dummy); - if (v.type() != vidType_) { - std::stringstream ss; - ss << "`" << vid->toString() << "', the vid should be type of " << vidType_ << ", but was`" - << v.type() << "'"; - return Status::SemanticError(ss.str()); + auto &exprProps = fetchCtx_->exprProps; + for (const auto &col : yield->columns()) { + if (col->expr()->kind() == Expression::Kind::kVertex) { + extractVertexProp(exprProps); + break; } - srcVids_.emplace_back(nebula::Row({std::move(v)})); } - return Status::OK(); -} -// TODO(shylock) select _vid property instead of return always. -Status FetchVerticesValidator::prepareProperties() { - auto *sentence = static_cast(sentence_); - auto *yield = sentence->yieldClause(); - if (yield == nullptr) { - return preparePropertiesWithoutYield(); - } else { - return preparePropertiesWithYield(yield); - } -} - -Status FetchVerticesValidator::preparePropertiesWithYield(const YieldClause *yield) { - withYield_ = true; - // outputs - auto yieldSize = yield->columns().size(); - outputs_.reserve(yieldSize + 1); - gvColNames_.emplace_back(nebula::kVid); - outputs_.emplace_back(VertexID, vidType_); // kVid - - dedup_ = yield->isDistinct(); - ExpressionProps exprProps; - DeducePropsVisitor deducePropsVisitor(qctx_, space_.id, &exprProps, &userDefinedVarNameList_); - - auto *pool = qctx_->objPool(); for (auto col : yield->columns()) { - col->setExpr(ExpressionUtils::rewriteLabelAttr2TagProp(col->expr())); - NG_RETURN_IF_ERROR(invalidLabelIdentifiers(col->expr())); - col->expr()->accept(&deducePropsVisitor); - if (!deducePropsVisitor.ok()) { - return std::move(deducePropsVisitor).status(); - } - if (exprProps.hasInputVarProperty()) { - return Status::SemanticError("Unsupported input/variable property expression in yield."); + if (ExpressionUtils::hasAny(col->expr(), + {Expression::Kind::kEdge, Expression::Kind::kPathBuild})) { + return Status::SemanticError("illegal yield clauses `%s'", col->toString().c_str()); } - if (!exprProps.edgeProps().empty()) { - return Status::SemanticError("Unsupported edge property expression in yield."); - } - if (exprProps.hasSrcDstTagProperty()) { - return Status::SemanticError("Unsupported src/dst property expression in yield."); + col->setExpr(ExpressionUtils::rewriteLabelAttr2TagProp(col->expr())); + NG_RETURN_IF_ERROR(ValidateUtil::invalidLabelIdentifiers(col->expr())); + auto colExpr = col->expr(); + auto typeStatus = deduceExprType(colExpr); + NG_RETURN_IF_ERROR(typeStatus); + outputs_.emplace_back(col->name(), typeStatus.value()); + if (colExpr->kind() == Expression::Kind::kFunctionCall) { + col->setAlias(col->name()); + col->setExpr(rewriteIDVertex2Vid(colExpr)); } + newCols->addColumn(col->clone().release()); - auto typeResult = deduceExprType(col->expr()); - NG_RETURN_IF_ERROR(typeResult); - outputs_.emplace_back(col->name(), typeResult.value()); - // TODO(shylock) think about the push-down expr + NG_RETURN_IF_ERROR(deduceProps(colExpr, exprProps)); } if (exprProps.tagProps().empty()) { - return Status::SemanticError("Unsupported empty tag property expression in yield."); + for (const auto &tagSchema : tagsSchema_) { + exprProps.insertTagProp(tagSchema.first, nebula::kTag); + } } + fetchCtx_->yieldExpr = newCols; - for (const auto &tag : exprProps.tagNameIds()) { - if (tags_.find(tag.first) == tags_.end()) { - return Status::SemanticError("Mismatched tag: %s", tag.first.c_str()); - } + if (exprProps.hasInputVarProperty()) { + return Status::SemanticError("unsupported input/variable property expression in yield."); } - for (const auto &tagNameId : exprProps.tagNameIds()) { - storage::cpp2::VertexProp vProp; - std::vector propNames; - propNames.reserve(exprProps.tagProps().at(tagNameId.second).size()); - vProp.set_tag(tagNameId.second); - for (const auto &prop : exprProps.tagProps().at(tagNameId.second)) { - propNames.emplace_back(prop.toString()); - gvColNames_.emplace_back(tagNameId.first + "." + prop.toString()); - } - vProp.set_props(std::move(propNames)); - props_->emplace_back(std::move(vProp)); + if (exprProps.hasSrcDstTagProperty()) { + return Status::SemanticError("unsupported src/dst property expression in yield."); } - // insert the reserved properties expression be compatible with 1.0 - // TODO(shylock) select kVid from storage - newYieldColumns_ = qctx_->objPool()->add(new YieldColumns()); - // note eval vid by input expression - newYieldColumns_->addColumn( - new YieldColumn(InputPropertyExpression::make(pool, nebula::kVid), VertexID)); - for (auto col : yield->columns()) { - newYieldColumns_->addColumn(col->clone().release()); + for (const auto &tag : exprProps.tagNameIds()) { + if (tagsSchema_.find(tag.second) == tagsSchema_.end()) { + return Status::SemanticError("mismatched tag `%s'", tag.first.c_str()); + } } return Status::OK(); } -Status FetchVerticesValidator::preparePropertiesWithoutYield() { - props_->clear(); - outputs_.emplace_back("vertices_", Value::Type::VERTEX); - gvColNames_.emplace_back(nebula::kVid); +void FetchVerticesValidator::extractVertexProp(ExpressionProps &exprProps) { for (const auto &tagSchema : tagsSchema_) { - storage::cpp2::VertexProp vProp; - vProp.set_tag(tagSchema.first); - std::vector propNames; - propNames.reserve(tagSchema.second->getNumFields() + 1); - auto tagNameResult = qctx_->schemaMng()->toTagName(space_.id, tagSchema.first); - NG_RETURN_IF_ERROR(tagNameResult); - auto tagName = std::move(tagNameResult).value(); + auto tagID = tagSchema.first; + exprProps.insertTagProp(tagID, nebula::kTag); for (std::size_t i = 0; i < tagSchema.second->getNumFields(); ++i) { const auto propName = tagSchema.second->getFieldName(i); - propNames.emplace_back(propName); - gvColNames_.emplace_back(tagName + "." + propName); + exprProps.insertTagProp(tagID, propName); } - gvColNames_.emplace_back(tagName + "._tag"); - propNames.emplace_back(nebula::kTag); // "_tag" - vProp.set_props(std::move(propNames)); - props_->emplace_back(std::move(vProp)); } - return Status::OK(); -} - -// TODO(shylock) optimize dedup input when distinct given -std::string FetchVerticesValidator::buildConstantInput() { - auto input = vctx_->anonVarGen()->getVar(); - qctx_->ectx()->setResult(input, ResultBuilder().value(Value(std::move(srcVids_))).build()); - - auto *pool = qctx_->objPool(); - src_ = VariablePropertyExpression::make(pool, input, kVid); - return input; -} - -std::string FetchVerticesValidator::buildRuntimeInput() { - src_ = DCHECK_NOTNULL(srcRef_); - return inputVar_; } } // namespace graph diff --git a/src/graph/validator/FetchVerticesValidator.h b/src/graph/validator/FetchVerticesValidator.h index b915a3cc0de..608aa39ffd7 100644 --- a/src/graph/validator/FetchVerticesValidator.h +++ b/src/graph/validator/FetchVerticesValidator.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. +/* Copyright (c) 2021 vesoft inc. All rights reserved. * * This source code is licensed under Apache 2.0 License, * attached with Common Clause Condition 1.0, found in the LICENSES directory. @@ -7,8 +7,8 @@ #ifndef _VALIDATOR_FETCH_VERTICES_VALIDATOR_H_ #define _VALIDATOR_FETCH_VERTICES_VALIDATOR_H_ +#include "graph/context/ast/QueryAstContext.h" #include "graph/validator/Validator.h" -#include "interface/gen-cpp2/storage_types.h" #include "parser/TraverseSentences.h" namespace nebula { @@ -16,52 +16,26 @@ namespace graph { class FetchVerticesValidator final : public Validator { public: - using VertexProp = nebula::storage::cpp2::VertexProp; - using Expr = nebula::storage::cpp2::Expr; FetchVerticesValidator(Sentence* sentence, QueryContext* context) : Validator(sentence, context) {} private: Status validateImpl() override; - Status toPlan() override; + Status validateTag(const NameLabelList* nameLables); - Status check(); + Status validateYield(YieldClause* yield); - Status prepareVertices(); + AstContext* getAstContext() override { return fetchCtx_.get(); } - Status preparePropertiesWithYield(const YieldClause* yield); - Status preparePropertiesWithoutYield(); - Status prepareProperties(); + void extractVertexProp(ExpressionProps& exprProps); - // TODO(shylock) merge the code - std::string buildConstantInput(); - std::string buildRuntimeInput(); + Expression* rewriteIDVertex2Vid(const Expression* expr); private: - // src from constant - DataSet srcVids_{{kVid}}; - // src from runtime - Expression* srcRef_{nullptr}; - Expression* src_{nullptr}; - bool onStar_{false}; - std::unordered_map tags_; std::map> tagsSchema_; - std::unique_ptr> props_; - std::unique_ptr> exprs_; - bool dedup_{false}; - std::vector orderBy_{}; - int64_t limit_{std::numeric_limits::max()}; - Expression* filter_{nullptr}; - // valid when yield expression not require storage - // So expression like these will be evaluate in Project Executor - bool withYield_{false}; - // outputs - std::vector gvColNames_; - // new yield to inject reserved properties for compatible with 1.0 - YieldColumns* newYieldColumns_{nullptr}; - // input - std::string inputVar_; // empty when pipe or no input in fact + + std::unique_ptr fetchCtx_; }; } // namespace graph diff --git a/src/graph/validator/FindPathValidator.cpp b/src/graph/validator/FindPathValidator.cpp index c7d4c5384d1..2f4975149f9 100644 --- a/src/graph/validator/FindPathValidator.cpp +++ b/src/graph/validator/FindPathValidator.cpp @@ -9,6 +9,7 @@ #include "common/expression/VariableExpression.h" #include "graph/planner/plan/Algo.h" #include "graph/planner/plan/Logic.h" +#include "graph/util/ValidateUtil.h" namespace nebula { namespace graph { @@ -22,9 +23,9 @@ Status FindPathValidator::validateImpl() { NG_RETURN_IF_ERROR(validateStarts(fpSentence->from(), pathCtx_->from)); NG_RETURN_IF_ERROR(validateStarts(fpSentence->to(), pathCtx_->to)); - NG_RETURN_IF_ERROR(validateOver(fpSentence->over(), pathCtx_->over)); + NG_RETURN_IF_ERROR(ValidateUtil::validateOver(qctx_, fpSentence->over(), pathCtx_->over)); NG_RETURN_IF_ERROR(validateWhere(fpSentence->where())); - NG_RETURN_IF_ERROR(validateStep(fpSentence->step(), pathCtx_->steps)); + NG_RETURN_IF_ERROR(ValidateUtil::validateStep(fpSentence->step(), pathCtx_->steps)); outputs_.emplace_back("path", Value::Type::PATH); return Status::OK(); diff --git a/src/graph/validator/FindPathValidator.h b/src/graph/validator/FindPathValidator.h index 2cc367dc197..0f2d03a23ec 100644 --- a/src/graph/validator/FindPathValidator.h +++ b/src/graph/validator/FindPathValidator.h @@ -9,15 +9,14 @@ #include "common/base/Base.h" #include "graph/context/ast/QueryAstContext.h" -#include "graph/validator/TraversalValidator.h" +#include "graph/validator/Validator.h" namespace nebula { namespace graph { -class FindPathValidator final : public TraversalValidator { +class FindPathValidator final : public Validator { public: - FindPathValidator(Sentence* sentence, QueryContext* context) - : TraversalValidator(sentence, context) {} + FindPathValidator(Sentence* sentence, QueryContext* context) : Validator(sentence, context) {} private: Status validateImpl() override; diff --git a/src/graph/validator/GetSubgraphValidator.cpp b/src/graph/validator/GetSubgraphValidator.cpp index 2f4fc5f68d3..fd7cf6fb184 100644 --- a/src/graph/validator/GetSubgraphValidator.cpp +++ b/src/graph/validator/GetSubgraphValidator.cpp @@ -14,6 +14,7 @@ #include "graph/context/QueryExpressionContext.h" #include "graph/planner/plan/Logic.h" #include "graph/planner/plan/Query.h" +#include "graph/util/ValidateUtil.h" #include "parser/TraverseSentences.h" namespace nebula { @@ -24,7 +25,7 @@ Status GetSubgraphValidator::validateImpl() { subgraphCtx_ = getContext(); subgraphCtx_->withProp = gsSentence->withProp(); - NG_RETURN_IF_ERROR(validateStep(gsSentence->step(), subgraphCtx_->steps)); + NG_RETURN_IF_ERROR(ValidateUtil::validateStep(gsSentence->step(), subgraphCtx_->steps)); NG_RETURN_IF_ERROR(validateStarts(gsSentence->from(), subgraphCtx_->from)); NG_RETURN_IF_ERROR(validateInBound(gsSentence->in())); NG_RETURN_IF_ERROR(validateOutBound(gsSentence->out())); diff --git a/src/graph/validator/GetSubgraphValidator.h b/src/graph/validator/GetSubgraphValidator.h index 1bcfce43e03..e51bfefc4f1 100644 --- a/src/graph/validator/GetSubgraphValidator.h +++ b/src/graph/validator/GetSubgraphValidator.h @@ -8,15 +8,14 @@ #define GRAPH_VALIDATOR_GETSUBGRAPHVALIDATOR_H_ #include "graph/context/ast/QueryAstContext.h" -#include "graph/validator/TraversalValidator.h" +#include "graph/validator/Validator.h" #include "parser/Clauses.h" namespace nebula { namespace graph { -class GetSubgraphValidator final : public TraversalValidator { +class GetSubgraphValidator final : public Validator { public: - GetSubgraphValidator(Sentence* sentence, QueryContext* context) - : TraversalValidator(sentence, context) {} + GetSubgraphValidator(Sentence* sentence, QueryContext* context) : Validator(sentence, context) {} private: Status validateImpl() override; diff --git a/src/graph/validator/GoValidator.cpp b/src/graph/validator/GoValidator.cpp index a6f6c5f715a..ed43b87d03c 100644 --- a/src/graph/validator/GoValidator.cpp +++ b/src/graph/validator/GoValidator.cpp @@ -10,6 +10,7 @@ #include "common/expression/VariableExpression.h" #include "graph/planner/plan/Logic.h" #include "graph/util/ExpressionUtils.h" +#include "graph/util/ValidateUtil.h" #include "graph/visitor/ExtractPropExprVisitor.h" #include "parser/TraverseSentences.h" @@ -20,9 +21,9 @@ Status GoValidator::validateImpl() { goCtx_ = getContext(); goCtx_->inputVarName = inputVarName_; - NG_RETURN_IF_ERROR(validateStep(goSentence->stepClause(), goCtx_->steps)); + NG_RETURN_IF_ERROR(ValidateUtil::validateStep(goSentence->stepClause(), goCtx_->steps)); NG_RETURN_IF_ERROR(validateStarts(goSentence->fromClause(), goCtx_->from)); - NG_RETURN_IF_ERROR(validateOver(goSentence->overClause(), goCtx_->over)); + NG_RETURN_IF_ERROR(ValidateUtil::validateOver(qctx_, goSentence->overClause(), goCtx_->over)); NG_RETURN_IF_ERROR(validateWhere(goSentence->whereClause())); NG_RETURN_IF_ERROR(validateYield(goSentence->yieldClause())); NG_RETURN_IF_ERROR(validateTruncate(goSentence->truncateClause())); @@ -116,9 +117,6 @@ Status GoValidator::validateTruncate(TruncateClause* truncate) { } Status GoValidator::validateYield(YieldClause* yield) { - if (yield == nullptr) { - return Status::SemanticError("Yield clause nullptr."); - } goCtx_->distinct = yield->isDistinct(); const auto& over = goCtx_->over; auto* pool = qctx_->objPool(); @@ -140,7 +138,7 @@ Status GoValidator::validateYield(YieldClause* yield) { for (auto col : cols) { col->setExpr(ExpressionUtils::rewriteLabelAttr2EdgeProp(col->expr())); - NG_RETURN_IF_ERROR(invalidLabelIdentifiers(col->expr())); + NG_RETURN_IF_ERROR(ValidateUtil::invalidLabelIdentifiers(col->expr())); auto* colExpr = col->expr(); if (graph::ExpressionUtils::findAny(colExpr, {Expression::Kind::kAggregate})) { diff --git a/src/graph/validator/GoValidator.h b/src/graph/validator/GoValidator.h index 140bf92530c..e2f97866b17 100644 --- a/src/graph/validator/GoValidator.h +++ b/src/graph/validator/GoValidator.h @@ -9,16 +9,16 @@ #include "graph/context/ast/QueryAstContext.h" #include "graph/planner/plan/Query.h" -#include "graph/validator/TraversalValidator.h" +#include "graph/validator/Validator.h" namespace nebula { namespace graph { -class GoValidator final : public TraversalValidator { +class GoValidator final : public Validator { public: using VertexProp = nebula::storage::cpp2::VertexProp; using EdgeProp = nebula::storage::cpp2::EdgeProp; - GoValidator(Sentence* sentence, QueryContext* context) : TraversalValidator(sentence, context) {} + GoValidator(Sentence* sentence, QueryContext* context) : Validator(sentence, context) {} private: Status validateImpl() override; diff --git a/src/graph/validator/MatchValidator.cpp b/src/graph/validator/MatchValidator.cpp index 4d8823e6a33..d5e8a3a92d6 100644 --- a/src/graph/validator/MatchValidator.cpp +++ b/src/graph/validator/MatchValidator.cpp @@ -13,7 +13,7 @@ namespace nebula { namespace graph { MatchValidator::MatchValidator(Sentence *sentence, QueryContext *context) - : TraversalValidator(sentence, context) { + : Validator(sentence, context) { matchCtx_ = getContext(); } @@ -353,6 +353,12 @@ Status MatchValidator::validateReturn(MatchReturn *ret, } if (ret->returnItems()->columns()) { for (auto *column : ret->returnItems()->columns()->columns()) { + if (ExpressionUtils::hasAny(column->expr(), + {Expression::Kind::kVertex, Expression::Kind::kEdge})) { + return Status::SemanticError( + "keywords: vertex and edge are not supported in return clause `%s'", + column->toString().c_str()); + } columns->addColumn(column->clone().release()); } } diff --git a/src/graph/validator/MatchValidator.h b/src/graph/validator/MatchValidator.h index 997744b2107..762589a2dc8 100644 --- a/src/graph/validator/MatchValidator.h +++ b/src/graph/validator/MatchValidator.h @@ -11,14 +11,14 @@ #include "graph/context/ast/CypherAstContext.h" #include "graph/planner/plan/Query.h" #include "graph/util/AnonVarGenerator.h" -#include "graph/validator/TraversalValidator.h" +#include "graph/validator/Validator.h" namespace nebula { class MatchStepRange; class ObjectPool; namespace graph { -class MatchValidator final : public TraversalValidator { +class MatchValidator final : public Validator { public: MatchValidator(Sentence *sentence, QueryContext *context); diff --git a/src/graph/validator/MutateValidator.cpp b/src/graph/validator/MutateValidator.cpp index a99edceadb5..d913e3cbad6 100644 --- a/src/graph/validator/MutateValidator.cpp +++ b/src/graph/validator/MutateValidator.cpp @@ -148,7 +148,7 @@ Status InsertEdgesValidator::toPlan() { nullptr, spaceId_, std::move(edges_), - std::move(propNames_), + std::move(entirePropNames_), ifNotExists_, useChainInsert); root_ = doNode; @@ -196,6 +196,11 @@ Status InsertEdgesValidator::prepareEdges() { auto useToss = isoLevel == IsoLevel::TOSS; auto size = useToss ? rows_.size() : rows_.size() * 2; edges_.reserve(size); + + size_t fieldNum = schema_->getNumFields(); + for (size_t j = 0; j < fieldNum; ++j) { + entirePropNames_.emplace_back(schema_->field(j)->name()); + } for (auto i = 0u; i < rows_.size(); i++) { auto *row = rows_[i]; if (propNames_.size() != row->values().size()) { @@ -233,6 +238,34 @@ Status InsertEdgesValidator::prepareEdges() { auto valsRet = SchemaUtil::toValueVec(row->values()); NG_RETURN_IF_ERROR(valsRet); auto props = std::move(valsRet).value(); + + std::vector entirePropValues; + for (size_t j = 0; j < fieldNum; ++j) { + auto *field = schema_->field(j); + auto propName = entirePropNames_[j]; + auto iter = std::find(propNames_.begin(), propNames_.end(), propName); + if (iter == propNames_.end()) { + if (field->hasDefault()) { + auto *defaultValue = field->defaultValue(); + DCHECK(!!defaultValue); + auto v = defaultValue->eval(QueryExpressionContext()(nullptr)); + entirePropValues.emplace_back(v); + } else { + if (!field->nullable()) { + return Status::SemanticError( + "The property `%s' is not nullable and has no default value.", field->name()); + } + entirePropValues.emplace_back(Value(NullType::__NULL__)); + } + } else { + auto v = props[std::distance(propNames_.begin(), iter)]; + if (!field->nullable() && v.isNull()) { + return Status::SemanticError("The non-nullable property `%s' could not be NULL.", + field->name()); + } + entirePropValues.emplace_back(v); + } + } storage::cpp2::NewEdge edge; storage::cpp2::EdgeKey key; @@ -241,7 +274,7 @@ Status InsertEdgesValidator::prepareEdges() { key.set_edge_type(edgeType_); key.set_ranking(rank); edge.set_key(key); - edge.set_props(std::move(props)); + edge.set_props(std::move(entirePropValues)); edges_.emplace_back(edge); if (!useToss) { // inbound diff --git a/src/graph/validator/MutateValidator.h b/src/graph/validator/MutateValidator.h index 8ebd2a876df..58464c32b74 100644 --- a/src/graph/validator/MutateValidator.h +++ b/src/graph/validator/MutateValidator.h @@ -58,6 +58,7 @@ class InsertEdgesValidator final : public Validator { EdgeType edgeType_{-1}; std::shared_ptr schema_; std::vector propNames_; + std::vector entirePropNames_; std::vector rows_; std::vector edges_; }; diff --git a/src/graph/validator/TraversalValidator.cpp b/src/graph/validator/TraversalValidator.cpp deleted file mode 100644 index 01cb47f0bb3..00000000000 --- a/src/graph/validator/TraversalValidator.cpp +++ /dev/null @@ -1,127 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License, - * attached with Common Clause Condition 1.0, found in the LICENSES directory. - */ - -#include "graph/validator/TraversalValidator.h" - -#include - -#include "common/expression/VariableExpression.h" -#include "graph/util/SchemaUtil.h" - -namespace nebula { -namespace graph { - -Status TraversalValidator::validateStarts(const VerticesClause* clause, Starts& starts) { - if (clause == nullptr) { - return Status::SemanticError("From clause nullptr."); - } - if (clause->isRef()) { - auto* src = clause->ref(); - if (src->kind() != Expression::Kind::kInputProperty && - src->kind() != Expression::Kind::kVarProperty) { - return Status::SemanticError( - "`%s', Only input and variable expression is acceptable" - " when starts are evaluated at runtime.", - src->toString().c_str()); - } - starts.fromType = src->kind() == Expression::Kind::kInputProperty ? kPipe : kVariable; - auto type = deduceExprType(src); - if (!type.ok()) { - return type.status(); - } - auto vidType = space_.spaceDesc.vid_type_ref().value().get_type(); - if (type.value() != SchemaUtil::propTypeToValueType(vidType)) { - std::stringstream ss; - ss << "`" << src->toString() << "', the srcs should be type of " - << apache::thrift::util::enumNameSafe(vidType) << ", but was`" << type.value() << "'"; - return Status::SemanticError(ss.str()); - } - starts.originalSrc = src; - auto* propExpr = static_cast(src); - if (starts.fromType == kVariable) { - starts.userDefinedVarName = propExpr->sym(); - userDefinedVarNameList_.emplace(starts.userDefinedVarName); - } - starts.runtimeVidName = propExpr->prop(); - } else { - auto vidList = clause->vidList(); - QueryExpressionContext ctx; - for (auto* expr : vidList) { - if (!evaluableExpr(expr)) { - return Status::SemanticError("`%s' is not an evaluable expression.", - expr->toString().c_str()); - } - auto vid = expr->eval(ctx(nullptr)); - auto vidType = space_.spaceDesc.vid_type_ref().value().get_type(); - if (!SchemaUtil::isValidVid(vid, vidType)) { - std::stringstream ss; - ss << "Vid should be a " << apache::thrift::util::enumNameSafe(vidType); - return Status::SemanticError(ss.str()); - } - starts.vids.emplace_back(std::move(vid)); - } - } - return Status::OK(); -} - -Status TraversalValidator::validateOver(const OverClause* clause, Over& over) { - if (clause == nullptr) { - return Status::SemanticError("Over clause nullptr."); - } - - over.direction = clause->direction(); - auto* schemaMng = qctx_->schemaMng(); - if (clause->isOverAll()) { - auto allEdgeStatus = schemaMng->getAllEdge(space_.id); - NG_RETURN_IF_ERROR(allEdgeStatus); - auto edges = std::move(allEdgeStatus).value(); - if (edges.empty()) { - return Status::SemanticError("No edge type found in space `%s'", space_.name.c_str()); - } - for (auto edge : edges) { - auto edgeType = schemaMng->toEdgeType(space_.id, edge); - if (!edgeType.ok()) { - return Status::SemanticError( - "`%s' not found in space [`%s'].", edge.c_str(), space_.name.c_str()); - } - over.edgeTypes.emplace_back(edgeType.value()); - } - over.allEdges = std::move(edges); - over.isOverAll = true; - } else { - auto edges = clause->edges(); - for (auto* edge : edges) { - auto edgeName = *edge->edge(); - auto edgeType = schemaMng->toEdgeType(space_.id, edgeName); - if (!edgeType.ok()) { - return Status::SemanticError( - "%s not found in space [%s].", edgeName.c_str(), space_.name.c_str()); - } - over.edgeTypes.emplace_back(edgeType.value()); - } - } - return Status::OK(); -} - -Status TraversalValidator::validateStep(const StepClause* clause, StepClause& step) { - if (clause == nullptr) { - return Status::SemanticError("Step clause nullptr."); - } - step = *clause; - if (clause->isMToN()) { - if (step.mSteps() == 0) { - step.setMSteps(1); - } - if (step.nSteps() < step.mSteps()) { - return Status::SemanticError("`%s', upper bound steps should be greater than lower bound.", - step.toString().c_str()); - } - } - return Status::OK(); -} - -} // namespace graph -} // namespace nebula diff --git a/src/graph/validator/TraversalValidator.h b/src/graph/validator/TraversalValidator.h deleted file mode 100644 index d6a622c1493..00000000000 --- a/src/graph/validator/TraversalValidator.h +++ /dev/null @@ -1,34 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License, - * attached with Common Clause Condition 1.0, found in the LICENSES directory. - */ - -#ifndef GRAPH_VALIDATOR_TRAVERSALVALIDATOR_H_ -#define GRAPH_VALIDATOR_TRAVERSALVALIDATOR_H_ - -#include "common/base/Base.h" -#include "graph/context/ast/QueryAstContext.h" -#include "graph/planner/plan/Query.h" -#include "graph/util/ExpressionUtils.h" -#include "graph/validator/Validator.h" - -namespace nebula { -namespace graph { - -// some utils for the validator to traverse the graph -class TraversalValidator : public Validator { - protected: - TraversalValidator(Sentence* sentence, QueryContext* qctx) : Validator(sentence, qctx) {} - - Status validateStarts(const VerticesClause* clause, Starts& starts); - - Status validateOver(const OverClause* clause, Over& over); - - Status validateStep(const StepClause* clause, StepClause& step); -}; - -} // namespace graph -} // namespace nebula - -#endif diff --git a/src/graph/validator/Validator.cpp b/src/graph/validator/Validator.cpp index dd5f330a2e3..dea25aba4d5 100644 --- a/src/graph/validator/Validator.cpp +++ b/src/graph/validator/Validator.cpp @@ -6,6 +6,8 @@ #include "graph/validator/Validator.h" +#include + #include "common/function/FunctionManager.h" #include "graph/planner/plan/Query.h" #include "graph/util/ExpressionUtils.h" @@ -78,6 +80,8 @@ std::unique_ptr Validator::makeValidator(Sentence* sentence, QueryCon return std::make_unique(sentence, context); case Sentence::Kind::kCreateSpace: return std::make_unique(sentence, context); + case Sentence::Kind::kCreateSpaceAs: + return std::make_unique(sentence, context); case Sentence::Kind::kCreateTag: return std::make_unique(sentence, context); case Sentence::Kind::kCreateEdge: @@ -449,20 +453,58 @@ Status Validator::checkDuplicateColName() { return Status::OK(); } -Status Validator::invalidLabelIdentifiers(const Expression* expr) const { - auto labelExprs = ExpressionUtils::collectAll(expr, {Expression::Kind::kLabel}); - if (!labelExprs.empty()) { - std::stringstream ss; - ss << "Invalid label identifiers: "; - for (auto* label : labelExprs) { - ss << label->toString() << ","; +Status Validator::validateStarts(const VerticesClause* clause, Starts& starts) { + if (clause == nullptr) { + return Status::SemanticError("From clause nullptr."); + } + if (clause->isRef()) { + auto* src = clause->ref(); + if (src->kind() != Expression::Kind::kInputProperty && + src->kind() != Expression::Kind::kVarProperty) { + return Status::SemanticError( + "`%s', Only input and variable expression is acceptable" + " when starts are evaluated at runtime.", + src->toString().c_str()); + } + starts.fromType = src->kind() == Expression::Kind::kInputProperty ? kPipe : kVariable; + auto type = deduceExprType(src); + if (!type.ok()) { + return type.status(); + } + auto vidType = space_.spaceDesc.vid_type_ref().value().get_type(); + if (type.value() != SchemaUtil::propTypeToValueType(vidType)) { + std::stringstream ss; + ss << "`" << src->toString() << "', the srcs should be type of " + << apache::thrift::util::enumNameSafe(vidType) << ", but was`" << type.value() << "'"; + return Status::SemanticError(ss.str()); + } + starts.originalSrc = src; + auto* propExpr = static_cast(src); + if (starts.fromType == kVariable) { + starts.userDefinedVarName = propExpr->sym(); + userDefinedVarNameList_.emplace(starts.userDefinedVarName); + } + starts.runtimeVidName = propExpr->prop(); + } else { + auto vidList = clause->vidList(); + QueryExpressionContext ctx; + for (auto* expr : vidList) { + if (!evaluableExpr(expr)) { + return Status::SemanticError("`%s' is not an evaluable expression.", + expr->toString().c_str()); + } + auto vid = expr->eval(ctx(nullptr)); + auto vidType = space_.spaceDesc.vid_type_ref().value().get_type(); + if (!SchemaUtil::isValidVid(vid, vidType)) { + std::stringstream ss; + ss << "Vid should be a " << apache::thrift::util::enumNameSafe(vidType); + return Status::SemanticError(ss.str()); + } + starts.vids.emplace_back(std::move(vid)); } - auto errMsg = ss.str(); - errMsg.pop_back(); - return Status::SemanticError(std::move(errMsg)); } - return Status::OK(); } + } // namespace graph } // namespace nebula diff --git a/src/graph/validator/Validator.h b/src/graph/validator/Validator.h index 69e8bcbf8d1..3b204af51ab 100644 --- a/src/graph/validator/Validator.h +++ b/src/graph/validator/Validator.h @@ -19,7 +19,7 @@ namespace nebula { namespace graph { - +struct Starts; class Validator { public: virtual ~Validator() = default; @@ -127,14 +127,14 @@ class Validator { return Status::OK(); } + // Check the output for duplicate column names + Status checkDuplicateColName(); + // Check the variable or input property reference // return the input variable StatusOr checkRef(const Expression* ref, const Value::Type type); - // Check the output for duplicate column names - Status checkDuplicateColName(); - - Status invalidLabelIdentifiers(const Expression* expr) const; + Status validateStarts(const VerticesClause* clause, Starts& starts); template std::unique_ptr getContext() const { diff --git a/src/graph/validator/YieldValidator.cpp b/src/graph/validator/YieldValidator.cpp index fa61b759658..e36c57f9271 100644 --- a/src/graph/validator/YieldValidator.cpp +++ b/src/graph/validator/YieldValidator.cpp @@ -10,6 +10,7 @@ #include "graph/context/QueryContext.h" #include "graph/planner/plan/Query.h" #include "graph/util/ExpressionUtils.h" +#include "graph/util/ValidateUtil.h" #include "parser/Clauses.h" #include "parser/TraverseSentences.h" @@ -111,7 +112,7 @@ Status YieldValidator::validateYieldAndBuildOutputs(const YieldClause *clause) { columns_ = pool->add(new YieldColumns); for (auto column : columns) { auto expr = DCHECK_NOTNULL(column->expr()); - NG_RETURN_IF_ERROR(invalidLabelIdentifiers(expr)); + NG_RETURN_IF_ERROR(ValidateUtil::invalidLabelIdentifiers(expr)); if (expr->kind() == Expression::Kind::kInputProperty) { auto ipe = static_cast(expr); diff --git a/src/graph/validator/test/CMakeLists.txt b/src/graph/validator/test/CMakeLists.txt index d0b9077c44d..487e7791f4a 100644 --- a/src/graph/validator/test/CMakeLists.txt +++ b/src/graph/validator/test/CMakeLists.txt @@ -51,6 +51,7 @@ set(VALIDATOR_TEST_LIBS $ $ $ + $ ) nebula_add_test( diff --git a/src/graph/validator/test/FetchVerticesTest.cpp b/src/graph/validator/test/FetchVerticesTest.cpp index d574882b93c..2d83805a4cb 100644 --- a/src/graph/validator/test/FetchVerticesTest.cpp +++ b/src/graph/validator/test/FetchVerticesTest.cpp @@ -23,7 +23,7 @@ class FetchVerticesValidatorTest : public ValidatorTestBase { }; TEST_F(FetchVerticesValidatorTest, FetchVerticesProp) { - auto src = VariablePropertyExpression::make(pool_.get(), "_VARNAME_", "VertexID"); + auto src = ColumnExpression::make(pool_.get(), 0); { auto qctx = getQCtx("FETCH PROP ON person \"1\""); auto *pool = qctx->objPool(); @@ -42,7 +42,6 @@ TEST_F(FetchVerticesValidatorTest, FetchVerticesProp) { auto yieldColumns = std::make_unique(); yieldColumns->addColumn(new YieldColumn(VertexExpression::make(pool), "vertices_")); auto *project = Project::make(qctx, gv, yieldColumns.get()); - project->setColNames({"vertices_"}); auto result = Eq(qctx->plan()->root(), project); ASSERT_TRUE(result.ok()) << result; } @@ -73,7 +72,6 @@ TEST_F(FetchVerticesValidatorTest, FetchVerticesProp) { auto yieldColumns = std::make_unique(); yieldColumns->addColumn(new YieldColumn(VertexExpression::make(pool), "vertices_")); auto *project = Project::make(qctx, gv, yieldColumns.get()); - project->setColNames({"vertices_"}); auto result = Eq(qctx->plan()->root(), project); ASSERT_TRUE(result.ok()) << result; } @@ -407,7 +405,6 @@ TEST_F(FetchVerticesValidatorTest, FetchVerticesProp) { auto yieldColumns = std::make_unique(); yieldColumns->addColumn(new YieldColumn(VertexExpression::make(pool), "vertices_")); auto *project = Project::make(qctx, gv, yieldColumns.get()); - project->setColNames({"vertices_"}); auto result = Eq(qctx->plan()->root(), project); ASSERT_TRUE(result.ok()) << result; } @@ -422,7 +419,6 @@ TEST_F(FetchVerticesValidatorTest, FetchVerticesProp) { auto yieldColumns = std::make_unique(); yieldColumns->addColumn(new YieldColumn(VertexExpression::make(pool), "vertices_")); auto *project = Project::make(qctx, gv, yieldColumns.get()); - project->setColNames({"vertices_"}); auto result = Eq(qctx->plan()->root(), project); ASSERT_TRUE(result.ok()) << result; } @@ -443,7 +439,6 @@ TEST_F(FetchVerticesValidatorTest, FetchVerticesProp) { new YieldColumn(InputPropertyExpression::make(pool, nebula::kVid), "VertexID")); yieldColumns->addColumn(new YieldColumn(TagPropertyExpression::make(pool, "person", "name"))); auto *project = Project::make(qctx, gv, yieldColumns.get()); - project->setColNames(colNames); auto result = Eq(qctx->plan()->root(), project); ASSERT_TRUE(result.ok()) << result; @@ -465,7 +460,6 @@ TEST_F(FetchVerticesValidatorTest, FetchVerticesProp) { yieldColumns->addColumn(new YieldColumn(TagPropertyExpression::make(pool, "person", "name"))); yieldColumns->addColumn(new YieldColumn(TagPropertyExpression::make(pool, "person", "age"))); auto *project = Project::make(qctx, gv, yieldColumns.get()); - project->setColNames(colNames); auto result = Eq(qctx->plan()->root(), project); ASSERT_TRUE(result.ok()) << result; @@ -488,7 +482,6 @@ TEST_F(FetchVerticesValidatorTest, FetchVerticesProp) { yieldColumns->addColumn(new YieldColumn(TagPropertyExpression::make(pool, "person", "name"))); yieldColumns->addColumn(new YieldColumn(TagPropertyExpression::make(pool, "person", "age"))); auto *project = Project::make(qctx, gv, yieldColumns.get()); - project->setColNames({"VertexID", "(1+1)", "person.name", "person.age"}); auto result = Eq(qctx->plan()->root(), project); ASSERT_TRUE(result.ok()) << result; diff --git a/src/graph/validator/test/MatchValidatorTest.cpp b/src/graph/validator/test/MatchValidatorTest.cpp index 03947c59366..43bbb9faa6c 100644 --- a/src/graph/validator/test/MatchValidatorTest.cpp +++ b/src/graph/validator/test/MatchValidatorTest.cpp @@ -611,6 +611,34 @@ TEST_F(MatchValidatorTest, validateAlias) { EXPECT_EQ(std::string(result.message()), "SemanticError: Path `p' does not have the type attribute"); } + { + std::string query = "MATCH (v:person) return id(vertex)"; + auto result = checkResult(query); + EXPECT_EQ( + std::string(result.message()), + "SemanticError: keywords: vertex and edge are not supported in return clause `id(VERTEX)'"); + } + { + std::string query = "MATCH (v:person) return vertex as a"; + auto result = checkResult(query); + EXPECT_EQ(std::string(result.message()), + "SemanticError: keywords: vertex and edge are not supported in return clause `VERTEX " + "AS a'"); + } + { + std::string query = "MATCH (v:person)-[e]-(v2) return src(edge)"; + auto result = checkResult(query); + EXPECT_EQ( + std::string(result.message()), + "SemanticError: keywords: vertex and edge are not supported in return clause `src(EDGE)'"); + } + { + std::string query = "MATCH (v:person)-[e]-(v2) return edge as b"; + auto result = checkResult(query); + EXPECT_EQ( + std::string(result.message()), + "SemanticError: keywords: vertex and edge are not supported in return clause `EDGE AS b'"); + } } } // namespace graph diff --git a/src/graph/validator/test/MutateValidatorTest.cpp b/src/graph/validator/test/MutateValidatorTest.cpp index b60a4562b05..a0677381f4b 100644 --- a/src/graph/validator/test/MutateValidatorTest.cpp +++ b/src/graph/validator/test/MutateValidatorTest.cpp @@ -44,11 +44,18 @@ TEST_F(MutateValidatorTest, InsertEdgeTest) { ASSERT_FALSE(checkResult(cmd, {})); } // vid use function call + { + auto cmd = + "INSERT EDGE like(start, end, likeness) VALUES lower(\"Lily\")->\"Tom\":(2010, " + "2020, 90);"; + ASSERT_TRUE(checkResult(cmd, {PK::kInsertEdges, PK::kStart})); + } + // vid use function call { auto cmd = "INSERT EDGE like(start, end) VALUES lower(\"Lily\")->\"Tom\":(2010, " "2020);"; - ASSERT_TRUE(checkResult(cmd, {PK::kInsertEdges, PK::kStart})); + ASSERT_FALSE(checkResult(cmd, {PK::kInsertEdges, PK::kStart})); } } diff --git a/src/graph/validator/test/QueryValidatorTest.cpp b/src/graph/validator/test/QueryValidatorTest.cpp index 025e3475f5a..ac931a86420 100644 --- a/src/graph/validator/test/QueryValidatorTest.cpp +++ b/src/graph/validator/test/QueryValidatorTest.cpp @@ -950,6 +950,12 @@ TEST_F(QueryValidatorTest, GoInvalid) { auto result = checkResult(query); EXPECT_EQ(std::string(result.message()), "SemanticError: Duplicate Column Name : `id'"); } + { + std::string query = "GO FROM id(vertex) OVER * "; + auto result = checkResult(query); + EXPECT_EQ(std::string(result.message()), + "SemanticError: `id(VERTEX)' is not an evaluable expression."); + } } TEST_F(QueryValidatorTest, Limit) { diff --git a/src/graph/visitor/test/CMakeLists.txt b/src/graph/visitor/test/CMakeLists.txt index b5b61c02e28..159e50b613a 100644 --- a/src/graph/visitor/test/CMakeLists.txt +++ b/src/graph/visitor/test/CMakeLists.txt @@ -56,6 +56,7 @@ nebula_add_test( $ $ $ + $ LIBRARIES gtest ${THRIFT_LIBRARIES} diff --git a/src/interface/common.thrift b/src/interface/common.thrift index 6f05fb32fbf..001ae49950d 100644 --- a/src/interface/common.thrift +++ b/src/interface/common.thrift @@ -324,6 +324,7 @@ enum ErrorCode { E_BALANCER_FAILURE = -2047, E_JOB_NOT_FINISHED = -2048, E_TASK_REPORT_OUT_DATE = -2049, + E_JOB_NOT_IN_SPACE = -2050, E_INVALID_JOB = -2065, // Backup Failure diff --git a/src/interface/meta.thrift b/src/interface/meta.thrift index 922736f0f30..d4fe48298a6 100644 --- a/src/interface/meta.thrift +++ b/src/interface/meta.thrift @@ -328,6 +328,11 @@ struct CreateSpaceReq { 2: bool if_not_exists, } +struct CreateSpaceAsReq { + 1: binary old_space_name, + 2: binary new_space_name, +} + struct DropSpaceReq { 1: binary space_name 2: bool if_exists, @@ -1171,6 +1176,8 @@ service MetaService { GetSpaceResp getSpace(1: GetSpaceReq req); ListSpacesResp listSpaces(1: ListSpacesReq req); + ExecResp createSpaceAs(1: CreateSpaceAsReq req); + ExecResp createTag(1: CreateTagReq req); ExecResp alterTag(1: AlterTagReq req); ExecResp dropTag(1: DropTagReq req); diff --git a/src/kvstore/EventListener.h b/src/kvstore/EventListener.h index 2f63b8137ca..f10363552a3 100644 --- a/src/kvstore/EventListener.h +++ b/src/kvstore/EventListener.h @@ -210,8 +210,9 @@ class EventListener : public rocksdb::EventListener { return "PeriodicCompaction"; case rocksdb::CompactionReason::kNumOfReasons: return "NumOfReasons"; + default: + return "Unknown"; } - return "Unknown"; } std::string flushReasonString(const rocksdb::FlushReason& reason) { diff --git a/src/kvstore/KVEngine.h b/src/kvstore/KVEngine.h index dd23f8ccd00..bd485a4333b 100644 --- a/src/kvstore/KVEngine.h +++ b/src/kvstore/KVEngine.h @@ -72,6 +72,8 @@ class KVEngine { const std::string& prefix, std::unique_ptr* iter) = 0; + virtual nebula::cpp2::ErrorCode scan(std::unique_ptr* storageIter) = 0; + // Write a single record virtual nebula::cpp2::ErrorCode put(std::string key, std::string value) = 0; @@ -111,6 +113,10 @@ class KVEngine { virtual nebula::cpp2::ErrorCode setDBOption(const std::string& configKey, const std::string& configValue) = 0; + // Get DB Property + virtual ErrorOr getProperty( + const std::string& property) = 0; + virtual nebula::cpp2::ErrorCode compact() = 0; virtual nebula::cpp2::ErrorCode flush() = 0; diff --git a/src/kvstore/KVStore.h b/src/kvstore/KVStore.h index c4ac838a45a..dcef7e1a9f9 100644 --- a/src/kvstore/KVStore.h +++ b/src/kvstore/KVStore.h @@ -216,6 +216,9 @@ class KVStore { virtual std::vector getDataRoot() const = 0; + virtual ErrorOr getProperty( + GraphSpaceID spaceId, const std::string& property) = 0; + protected: KVStore() = default; }; diff --git a/src/kvstore/NebulaSnapshotManager.cpp b/src/kvstore/NebulaSnapshotManager.cpp index 5d31055be5e..26d41f44d6f 100644 --- a/src/kvstore/NebulaSnapshotManager.cpp +++ b/src/kvstore/NebulaSnapshotManager.cpp @@ -24,14 +24,13 @@ NebulaSnapshotManager::NebulaSnapshotManager(NebulaStore* kv) : store_(kv) { // Snapshot rate is limited to FLAGS_snapshot_worker_threads * FLAGS_snapshot_part_rate_limit. // So by default, the total send rate is limited to 4 * 10Mb = 40Mb. LOG(INFO) << "Send snapshot is rate limited to " << FLAGS_snapshot_part_rate_limit - << " for each part"; + << " for each part by default"; } void NebulaSnapshotManager::accessAllRowsInSnapshot(GraphSpaceID spaceId, PartitionID partId, raftex::SnapshotCallback cb) { - auto rateLimiter = std::make_unique(FLAGS_snapshot_part_rate_limit, - FLAGS_snapshot_part_rate_limit); + auto rateLimiter = std::make_unique(); CHECK_NOTNULL(store_); auto tables = NebulaKeyUtils::snapshotPrefix(partId); std::vector data; @@ -74,7 +73,9 @@ bool NebulaSnapshotManager::accessTable(GraphSpaceID spaceId, size_t batchSize = 0; while (iter && iter->valid()) { if (batchSize >= FLAGS_snapshot_batch_size) { - rateLimiter->consume(batchSize); + rateLimiter->consume(static_cast(batchSize), // toConsume + static_cast(FLAGS_snapshot_part_rate_limit), // rate + static_cast(FLAGS_snapshot_part_rate_limit)); // burstSize if (cb(data, totalCount, totalSize, raftex::SnapshotStatus::IN_PROGRESS)) { data.clear(); batchSize = 0; diff --git a/src/kvstore/NebulaStore.cpp b/src/kvstore/NebulaStore.cpp index aa7bfc767fc..0727677f876 100644 --- a/src/kvstore/NebulaStore.cpp +++ b/src/kvstore/NebulaStore.cpp @@ -1168,5 +1168,26 @@ nebula::cpp2::ErrorCode NebulaStore::multiPutWithoutReplicator(GraphSpaceID spac return nebula::cpp2::ErrorCode::SUCCEEDED; } +ErrorOr NebulaStore::getProperty( + GraphSpaceID spaceId, const std::string& property) { + auto spaceRet = space(spaceId); + if (!ok(spaceRet)) { + LOG(ERROR) << "Get Space " << spaceId << " Failed"; + return error(spaceRet); + } + auto space = nebula::value(spaceRet); + + folly::dynamic obj = folly::dynamic::object; + for (size_t i = 0; i < space->engines_.size(); i++) { + auto val = space->engines_[i]->getProperty(property); + if (!ok(val)) { + return error(val); + } + auto eng = folly::stringPrintf("Engine %zu", i); + obj[eng] = std::move(value(val)); + } + return folly::toJson(obj); +} + } // namespace kvstore } // namespace nebula diff --git a/src/kvstore/NebulaStore.h b/src/kvstore/NebulaStore.h index 306f716827f..e8529f18010 100644 --- a/src/kvstore/NebulaStore.h +++ b/src/kvstore/NebulaStore.h @@ -11,6 +11,7 @@ #include #include "common/base/Base.h" +#include "common/ssl/SSLConfig.h" #include "common/utils/Utils.h" #include "interface/gen-cpp2/RaftexServiceAsyncClient.h" #include "kvstore/DiskManager.h" @@ -65,7 +66,8 @@ class NebulaStore : public KVStore, public Handler { options_(std::move(options)) { CHECK_NOTNULL(options_.partMan_); clientMan_ = - std::make_shared>(); + std::make_shared>( + FLAGS_enable_ssl); } ~NebulaStore(); @@ -273,6 +275,9 @@ class NebulaStore : public KVStore, public Handler { nebula::cpp2::ErrorCode multiPutWithoutReplicator(GraphSpaceID spaceId, std::vector keyValues) override; + ErrorOr getProperty(GraphSpaceID spaceId, + const std::string& property) override; + private: void loadPartFromDataPath(); diff --git a/src/kvstore/RateLimiter.h b/src/kvstore/RateLimiter.h index 7b91ad4b189..97b4ef196fa 100644 --- a/src/kvstore/RateLimiter.h +++ b/src/kvstore/RateLimiter.h @@ -21,32 +21,29 @@ namespace kvstore { // For now, there are two major cases: snapshot (both for balance or catch up) and rebuild index. class RateLimiter { public: - RateLimiter(int32_t rate, int32_t burstSize) - : rate_(static_cast(rate)), burstSize_(static_cast(burstSize)) { + RateLimiter() { // token will be available after 1 second, to prevent speed spike at the beginning auto now = time::WallClock::fastNowInSec(); int64_t waitInSec = FLAGS_skip_wait_in_rate_limiter ? 0 : 1; - bucket_.reset(new folly::TokenBucket(rate_, burstSize_, static_cast(now + waitInSec))); + bucket_.reset(new folly::DynamicTokenBucket(static_cast(now + waitInSec))); } // Caller must make sure the **the parition has been add, and won't be removed during consume.** // Snaphot and rebuild index follow this principle by design. - void consume(size_t toConsume) { - if (toConsume > burstSize_) { + void consume(double toConsume, double rate, double burstSize) { + if (toConsume > burstSize) { // consumeWithBorrowAndWait do nothing when toConsume > burstSize_, we sleep 1s instead std::this_thread::sleep_for(std::chrono::seconds(1)); } else { // If there are enouth tokens, consume and return immediately. // If not, cosume anyway, but sleep enough time before return. auto now = time::WallClock::fastNowInSec(); - bucket_->consumeWithBorrowAndWait(static_cast(toConsume), static_cast(now)); + bucket_->consumeWithBorrowAndWait(toConsume, rate, burstSize, static_cast(now)); } } private: - std::unique_ptr bucket_; - double rate_{1 << 20}; - double burstSize_{1 << 20}; + std::unique_ptr bucket_; }; } // namespace kvstore diff --git a/src/kvstore/RocksEngine.cpp b/src/kvstore/RocksEngine.cpp index 6b536b9cecc..73bdb14e104 100644 --- a/src/kvstore/RocksEngine.cpp +++ b/src/kvstore/RocksEngine.cpp @@ -13,7 +13,6 @@ #include "common/fs/FileUtils.h" #include "common/utils/NebulaKeyUtils.h" #include "kvstore/KVStore.h" -#include "kvstore/RocksEngineConfig.h" DEFINE_bool(move_files, false, "Move the SST files instead of copy when ingest into dataset"); @@ -126,6 +125,7 @@ RocksEngine::RocksEngine(GraphSpaceID spaceId, CHECK(status.ok()) << status.ToString(); db_.reset(db); partsNum_ = allParts().size(); + extractorLen_ = sizeof(PartitionID) + vIdLen; LOG(INFO) << "open rocksdb on " << path; backup(); @@ -202,7 +202,7 @@ nebula::cpp2::ErrorCode RocksEngine::range(const std::string& start, const std::string& end, std::unique_ptr* storageIter) { rocksdb::ReadOptions options; - options.total_order_seek = true; + options.total_order_seek = FLAGS_enable_rocksdb_prefix_filtering; rocksdb::Iterator* iter = db_->NewIterator(options); if (iter) { iter->Seek(rocksdb::Slice(start)); @@ -213,7 +213,32 @@ nebula::cpp2::ErrorCode RocksEngine::range(const std::string& start, nebula::cpp2::ErrorCode RocksEngine::prefix(const std::string& prefix, std::unique_ptr* storageIter) { + // In fact, we don't need to check prefix.size() >= extractorLen_, which is caller's duty to make + // sure the prefix bloom filter exists. But this is quite error-proning, so we do a check here. + if (FLAGS_enable_rocksdb_prefix_filtering && prefix.size() >= extractorLen_) { + return prefixWithExtractor(prefix, storageIter); + } else { + return prefixWithoutExtractor(prefix, storageIter); + } +} + +nebula::cpp2::ErrorCode RocksEngine::prefixWithExtractor(const std::string& prefix, + std::unique_ptr* storageIter) { rocksdb::ReadOptions options; + options.prefix_same_as_start = true; + rocksdb::Iterator* iter = db_->NewIterator(options); + if (iter) { + iter->Seek(rocksdb::Slice(prefix)); + } + storageIter->reset(new RocksPrefixIter(iter, prefix)); + return nebula::cpp2::ErrorCode::SUCCEEDED; +} + +nebula::cpp2::ErrorCode RocksEngine::prefixWithoutExtractor( + const std::string& prefix, std::unique_ptr* storageIter) { + rocksdb::ReadOptions options; + // prefix_same_as_start is false by default + options.total_order_seek = FLAGS_enable_rocksdb_prefix_filtering; rocksdb::Iterator* iter = db_->NewIterator(options); if (iter) { iter->Seek(rocksdb::Slice(prefix)); @@ -226,6 +251,8 @@ nebula::cpp2::ErrorCode RocksEngine::rangeWithPrefix(const std::string& start, const std::string& prefix, std::unique_ptr* storageIter) { rocksdb::ReadOptions options; + // prefix_same_as_start is false by default + options.total_order_seek = FLAGS_enable_rocksdb_prefix_filtering; rocksdb::Iterator* iter = db_->NewIterator(options); if (iter) { iter->Seek(rocksdb::Slice(start)); @@ -234,6 +261,15 @@ nebula::cpp2::ErrorCode RocksEngine::rangeWithPrefix(const std::string& start, return nebula::cpp2::ErrorCode::SUCCEEDED; } +nebula::cpp2::ErrorCode RocksEngine::scan(std::unique_ptr* storageIter) { + rocksdb::ReadOptions options; + options.total_order_seek = true; + rocksdb::Iterator* iter = db_->NewIterator(options); + iter->SeekToFirst(); + storageIter->reset(new RocksCommonIter(iter)); + return nebula::cpp2::ErrorCode::SUCCEEDED; +} + nebula::cpp2::ErrorCode RocksEngine::put(std::string key, std::string value) { rocksdb::WriteOptions options; options.disableWAL = FLAGS_rocksdb_disable_wal; @@ -397,6 +433,16 @@ nebula::cpp2::ErrorCode RocksEngine::setDBOption(const std::string& configKey, } } +ErrorOr RocksEngine::getProperty( + const std::string& property) { + std::string value; + if (!db_->GetProperty(property, &value)) { + return nebula::cpp2::ErrorCode::E_INVALID_PARM; + } else { + return value; + } +} + nebula::cpp2::ErrorCode RocksEngine::compact() { rocksdb::CompactRangeOptions options; options.change_level = FLAGS_rocksdb_compact_change_level; diff --git a/src/kvstore/RocksEngine.h b/src/kvstore/RocksEngine.h index c781b24cab5..d94343d84e7 100644 --- a/src/kvstore/RocksEngine.h +++ b/src/kvstore/RocksEngine.h @@ -15,6 +15,7 @@ #include "common/base/Base.h" #include "kvstore/KVEngine.h" #include "kvstore/KVIterator.h" +#include "kvstore/RocksEngineConfig.h" namespace nebula { namespace kvstore { @@ -75,6 +76,30 @@ class RocksPrefixIter : public KVIterator { rocksdb::Slice prefix_; }; +class RocksCommonIter : public KVIterator { + public: + explicit RocksCommonIter(rocksdb::Iterator* iter) : iter_(iter) {} + + ~RocksCommonIter() = default; + + bool valid() const override { return !!iter_ && iter_->Valid(); } + + void next() override { iter_->Next(); } + + void prev() override { iter_->Prev(); } + + folly::StringPiece key() const override { + return folly::StringPiece(iter_->key().data(), iter_->key().size()); + } + + folly::StringPiece val() const override { + return folly::StringPiece(iter_->value().data(), iter_->value().size()); + } + + protected: + std::unique_ptr iter_; +}; + /************************************************************************** * * An implementation of KVEngine based on Rocksdb @@ -128,6 +153,13 @@ class RocksEngine : public KVEngine { const std::string& prefix, std::unique_ptr* iter) override; + nebula::cpp2::ErrorCode prefixWithExtractor(const std::string& prefix, + std::unique_ptr* storageIter); + + nebula::cpp2::ErrorCode prefixWithoutExtractor(const std::string& prefix, + std::unique_ptr* storageIter); + + nebula::cpp2::ErrorCode scan(std::unique_ptr* storageIter) override; /********************* * Data modification ********************/ @@ -161,6 +193,8 @@ class RocksEngine : public KVEngine { nebula::cpp2::ErrorCode setDBOption(const std::string& configKey, const std::string& configValue) override; + ErrorOr getProperty(const std::string& property) override; + nebula::cpp2::ErrorCode compact() override; nebula::cpp2::ErrorCode flush() override; @@ -190,6 +224,7 @@ class RocksEngine : public KVEngine { std::string backupPath_; std::unique_ptr backupDb_{nullptr}; int32_t partsNum_ = -1; + size_t extractorLen_; }; } // namespace kvstore diff --git a/src/kvstore/RocksEngineConfig.cpp b/src/kvstore/RocksEngineConfig.cpp index 81f32e37a30..47eee99aaa3 100644 --- a/src/kvstore/RocksEngineConfig.cpp +++ b/src/kvstore/RocksEngineConfig.cpp @@ -51,7 +51,7 @@ DEFINE_int64(rocksdb_block_cache, 1024, "The default block cache size used in BlockBasedTable. The unit is MB"); -DEFINE_int32(row_cache_num, 16 * 1000 * 1000, "Total keys inside the cache"); +DEFINE_int32(rocksdb_row_cache_num, 16 * 1000 * 1000, "Total keys inside the cache"); DEFINE_int32(cache_bucket_exp, 8, "Total buckets number is 1 << cache_bucket_exp"); @@ -78,15 +78,13 @@ DEFINE_int32(rocksdb_rate_limit, 0, "write limit in bytes per sec. The unit is MB. 0 means unlimited."); -DEFINE_bool(enable_rocksdb_prefix_filtering, +DEFINE_bool(enable_rocksdb_whole_key_filtering, false, + "Whether or not to enable rocksdb's whole key bloom filter"); + +DEFINE_bool(enable_rocksdb_prefix_filtering, + true, "Whether or not to enable rocksdb's prefix bloom filter."); -DEFINE_bool(rocksdb_prefix_bloom_filter_length_flag, - false, - "If true, prefix bloom filter will be sizeof(PartitionID) + vidLen + " - "sizeof(EdgeType). " - "If false, prefix bloom filter will be sizeof(PartitionID) + vidLen. "); -DEFINE_int32(rocksdb_plain_table_prefix_length, 4, "PlainTable prefix size"); DEFINE_bool(rocksdb_compact_change_level, true, @@ -118,34 +116,6 @@ DEFINE_int32(rocksdb_backup_interval_secs, namespace nebula { namespace kvstore { -class GraphPrefixTransform : public rocksdb::SliceTransform { - private: - size_t prefixLen_; - std::string name_; - - public: - explicit GraphPrefixTransform(size_t prefixLen) - : prefixLen_(prefixLen), name_("nebula.GraphPrefix." + std::to_string(prefixLen_)) {} - - const char* Name() const override { return name_.c_str(); } - - rocksdb::Slice Transform(const rocksdb::Slice& src) const override { - return rocksdb::Slice(src.data(), prefixLen_); - } - - bool InDomain(const rocksdb::Slice& key) const override { - if (key.size() < prefixLen_) { - return false; - } - // And we should not use NebulaKeyUtils::isVertex or isEdge here, because it - // will regard the prefix itself not in domain since its length does not - // satisfy - constexpr int32_t len = static_cast(sizeof(NebulaKeyType)); - auto type = static_cast(readInt(key.data(), len) & kTypeMask); - return type == NebulaKeyType::kEdge || type == NebulaKeyType::kVertex; - } -}; - static rocksdb::Status initRocksdbCompression(rocksdb::Options& baseOpts) { static std::unordered_map m = { {"no", rocksdb::kNoCompression}, @@ -256,10 +226,8 @@ rocksdb::Status initRocksdbOptions(rocksdb::Options& baseOpts, baseOpts.rate_limiter = rate_limiter; } + size_t prefixLength = sizeof(PartitionID) + vidLen; if (FLAGS_rocksdb_table_format == "BlockBasedTable") { - size_t prefixLength = FLAGS_rocksdb_prefix_bloom_filter_length_flag - ? sizeof(PartitionID) + vidLen + sizeof(EdgeType) - : sizeof(PartitionID) + vidLen; // BlockBasedTableOptions std::unordered_map bbtOptsMap; if (!loadOptionsMap(bbtOptsMap, FLAGS_rocksdb_block_based_table_options)) { @@ -279,9 +247,9 @@ rocksdb::Status initRocksdbOptions(rocksdb::Options& baseOpts, bbtOpts.block_cache = blockCache; } - if (FLAGS_row_cache_num) { + if (FLAGS_rocksdb_row_cache_num) { static std::shared_ptr rowCache = - rocksdb::NewLRUCache(FLAGS_row_cache_num, FLAGS_cache_bucket_exp); + rocksdb::NewLRUCache(FLAGS_rocksdb_row_cache_num, FLAGS_cache_bucket_exp); baseOpts.row_cache = rowCache; } @@ -296,8 +264,9 @@ rocksdb::Status initRocksdbOptions(rocksdb::Options& baseOpts, baseOpts.compaction_style == rocksdb::CompactionStyle::kCompactionStyleLevel; } if (FLAGS_enable_rocksdb_prefix_filtering) { - baseOpts.prefix_extractor.reset(new GraphPrefixTransform(prefixLength)); + baseOpts.prefix_extractor.reset(rocksdb::NewCappedPrefixTransform(prefixLength)); } + bbtOpts.whole_key_filtering = FLAGS_enable_rocksdb_whole_key_filtering; baseOpts.table_factory.reset(NewBlockBasedTableFactory(bbtOpts)); baseOpts.create_if_missing = true; } else if (FLAGS_rocksdb_table_format == "PlainTable") { @@ -308,8 +277,10 @@ rocksdb::Status initRocksdbOptions(rocksdb::Options& baseOpts, // by default. WAL_ttl_seconds and rocksdb_backup_interval_secs need to be // modify together if necessary FLAGS_rocksdb_disable_wal = false; - baseOpts.prefix_extractor.reset( - rocksdb::NewCappedPrefixTransform(FLAGS_rocksdb_plain_table_prefix_length)); + if (!FLAGS_enable_rocksdb_prefix_filtering) { + return rocksdb::Status::InvalidArgument("PlainTable should use prefix bloom filter"); + } + baseOpts.prefix_extractor.reset(rocksdb::NewCappedPrefixTransform(prefixLength)); baseOpts.table_factory.reset(rocksdb::NewPlainTableFactory()); baseOpts.create_if_missing = true; } else { diff --git a/src/kvstore/RocksEngineConfig.h b/src/kvstore/RocksEngineConfig.h index 7fd174cebec..018fae42dbc 100644 --- a/src/kvstore/RocksEngineConfig.h +++ b/src/kvstore/RocksEngineConfig.h @@ -37,7 +37,7 @@ DECLARE_int64(rocksdb_block_cache); DECLARE_int32(rocksdb_batch_size); -DECLARE_int32(row_cache_num); +DECLARE_int32(rocksdb_row_cache_num); DECLARE_int32(cache_bucket_exp); @@ -53,8 +53,7 @@ DECLARE_bool(enable_rocksdb_statistics); DECLARE_string(rocksdb_stats_level); DECLARE_bool(enable_rocksdb_prefix_filtering); -DECLARE_bool(rocksdb_prefix_bloom_filter_length_flag); -DECLARE_int32(rocksdb_plain_table_prefix_length); +DECLARE_bool(enable_rocksdb_whole_key_filtering); // rocksdb compact RangeOptions DECLARE_bool(rocksdb_compact_change_level); diff --git a/src/kvstore/raftex/RaftexService.cpp b/src/kvstore/raftex/RaftexService.cpp index e48f545d34e..e8aba1fb432 100644 --- a/src/kvstore/raftex/RaftexService.cpp +++ b/src/kvstore/raftex/RaftexService.cpp @@ -9,6 +9,7 @@ #include #include "common/base/Base.h" +#include "common/ssl/SSLConfig.h" #include "kvstore/raftex/RaftPart.h" namespace nebula { @@ -60,6 +61,9 @@ void RaftexService::initThriftServer(std::shared_ptr workers, uint16_t port) { LOG(INFO) << "Init thrift server for raft service, port: " << port; + if (FLAGS_enable_ssl) { + server_->setSSLConfig(nebula::sslContextConfig()); + } server_->setPort(port); server_->setIdleTimeout(std::chrono::seconds(0)); if (pool != nullptr) { diff --git a/src/kvstore/raftex/test/CMakeLists.txt b/src/kvstore/raftex/test/CMakeLists.txt index 0a62fe2cabc..3a0abe49b43 100644 --- a/src/kvstore/raftex/test/CMakeLists.txt +++ b/src/kvstore/raftex/test/CMakeLists.txt @@ -10,6 +10,7 @@ set(RAFTEX_TEST_LIBS $ $ $ + $ ) diff --git a/src/kvstore/raftex/test/TestShard.cpp b/src/kvstore/raftex/test/TestShard.cpp index 7187aa862f9..d7c4683e1b0 100644 --- a/src/kvstore/raftex/test/TestShard.cpp +++ b/src/kvstore/raftex/test/TestShard.cpp @@ -7,6 +7,7 @@ #include "kvstore/raftex/test/TestShard.h" #include "common/base/Base.h" +#include "common/ssl/SSLConfig.h" #include "kvstore/raftex/Host.h" #include "kvstore/raftex/RaftexService.h" #include "kvstore/wal/FileBasedWal.h" @@ -118,7 +119,7 @@ HostAddr decodeRemovePeer(const folly::StringPiece& log) { std::shared_ptr> getClientMan() { static std::shared_ptr> clientMan( - new thrift::ThriftClientManager()); + new thrift::ThriftClientManager(FLAGS_enable_ssl)); return clientMan; } diff --git a/src/kvstore/test/CMakeLists.txt b/src/kvstore/test/CMakeLists.txt index 17d043ba6c2..a4d9c040d58 100644 --- a/src/kvstore/test/CMakeLists.txt +++ b/src/kvstore/test/CMakeLists.txt @@ -31,6 +31,7 @@ set(KVSTORE_TEST_LIBS $ $ $ + $ ) nebula_add_test( diff --git a/src/kvstore/test/NebulaStoreTest.cpp b/src/kvstore/test/NebulaStoreTest.cpp index 022df3f3a65..98da491681a 100644 --- a/src/kvstore/test/NebulaStoreTest.cpp +++ b/src/kvstore/test/NebulaStoreTest.cpp @@ -917,6 +917,7 @@ TEST(NebulaStoreTest, BackupRestoreTest) { FLAGS_rocksdb_table_format = "PlainTable"; FLAGS_rocksdb_wal_dir = rocksdbWalPath.path(); FLAGS_rocksdb_backup_dir = backupPath.path(); + FLAGS_enable_rocksdb_prefix_filtering = true; auto waitLeader = [](const std::unique_ptr& store) { while (true) { diff --git a/src/kvstore/test/RateLimiterTest.cpp b/src/kvstore/test/RateLimiterTest.cpp index f01fc70d071..f263c72d803 100644 --- a/src/kvstore/test/RateLimiterTest.cpp +++ b/src/kvstore/test/RateLimiterTest.cpp @@ -17,32 +17,38 @@ namespace nebula { namespace kvstore { TEST(RateLimter, ConsumeLessEqualThanBurst) { - RateLimiter limiter(FLAGS_snapshot_part_rate_limit, FLAGS_snapshot_part_rate_limit); + RateLimiter limiter; auto now = time::WallClock::fastNowInSec(); int64_t count = 0; while (count++ < 50) { - limiter.consume(FLAGS_snapshot_part_rate_limit / 10); + limiter.consume(FLAGS_snapshot_part_rate_limit / 10, // toConsume + FLAGS_snapshot_part_rate_limit, // rate + FLAGS_snapshot_part_rate_limit); // burstSize } EXPECT_GE(time::WallClock::fastNowInSec() - now, 5); } TEST(RateLimter, ConsumeGreaterThanBurst) { - RateLimiter limiter(FLAGS_snapshot_part_rate_limit, FLAGS_snapshot_part_rate_limit / 10); + RateLimiter limiter; auto now = time::WallClock::fastNowInSec(); int64_t count = 0; while (count++ < 5) { // greater than burst size, will sleep 1 second instead - limiter.consume(FLAGS_snapshot_part_rate_limit); + limiter.consume(FLAGS_snapshot_part_rate_limit, // toConsume + FLAGS_snapshot_part_rate_limit, // rate + FLAGS_snapshot_part_rate_limit / 10); // burstSize } EXPECT_GE(time::WallClock::fastNowInSec() - now, 5); } TEST(RateLimter, RateLessThanBurst) { - RateLimiter limiter(FLAGS_snapshot_part_rate_limit, 2 * FLAGS_snapshot_part_rate_limit); + RateLimiter limiter; auto now = time::WallClock::fastNowInSec(); int64_t count = 0; while (count++ < 5) { - limiter.consume(FLAGS_snapshot_part_rate_limit); + limiter.consume(FLAGS_snapshot_part_rate_limit, // toConsume + FLAGS_snapshot_part_rate_limit, // rate + 2 * FLAGS_snapshot_part_rate_limit); // burstSize } EXPECT_GE(time::WallClock::fastNowInSec() - now, 5); } diff --git a/src/kvstore/test/RocksEngineTest.cpp b/src/kvstore/test/RocksEngineTest.cpp index a800463e4c7..66eddedff2c 100644 --- a/src/kvstore/test/RocksEngineTest.cpp +++ b/src/kvstore/test/RocksEngineTest.cpp @@ -20,7 +20,19 @@ namespace kvstore { const int32_t kDefaultVIdLen = 8; -TEST(RocksEngineTest, SimpleTest) { +class RocksEngineTest : public ::testing::TestWithParam> { + public: + void SetUp() override { + auto param = GetParam(); + FLAGS_enable_rocksdb_prefix_filtering = std::get<0>(param); + FLAGS_enable_rocksdb_whole_key_filtering = std::get<1>(param); + FLAGS_rocksdb_table_format = std::get<2>(param); + } + + void TearDown() override {} +}; + +TEST_P(RocksEngineTest, SimpleTest) { fs::TempDir rootPath("/tmp/rocksdb_engine_SimpleTest.XXXXXX"); auto engine = std::make_unique(0, kDefaultVIdLen, rootPath.path()); EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, engine->put("key", "val")); @@ -29,7 +41,7 @@ TEST(RocksEngineTest, SimpleTest) { EXPECT_EQ("val", val); } -TEST(RocksEngineTest, RangeTest) { +TEST_P(RocksEngineTest, RangeTest) { fs::TempDir rootPath("/tmp/rocksdb_engine_RangeTest.XXXXXX"); auto engine = std::make_unique(0, kDefaultVIdLen, rootPath.path()); std::vector data; @@ -66,7 +78,7 @@ TEST(RocksEngineTest, RangeTest) { checkRange(1, 15, 10, 5); } -TEST(RocksEngineTest, PrefixTest) { +TEST_P(RocksEngineTest, PrefixTest) { fs::TempDir rootPath("/tmp/rocksdb_engine_PrefixTest.XXXXXX"); auto engine = std::make_unique(0, kDefaultVIdLen, rootPath.path()); LOG(INFO) << "Write data in batch and scan them..."; @@ -105,7 +117,7 @@ TEST(RocksEngineTest, PrefixTest) { checkPrefix("c", 20, 20); } -TEST(RocksEngineTest, RemoveTest) { +TEST_P(RocksEngineTest, RemoveTest) { fs::TempDir rootPath("/tmp/rocksdb_engine_RemoveTest.XXXXXX"); auto engine = std::make_unique(0, kDefaultVIdLen, rootPath.path()); EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, engine->put("key", "val")); @@ -116,7 +128,10 @@ TEST(RocksEngineTest, RemoveTest) { EXPECT_EQ(nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND, engine->get("key", &val)); } -TEST(RocksEngineTest, RemoveRangeTest) { +TEST_P(RocksEngineTest, RemoveRangeTest) { + if (FLAGS_rocksdb_table_format == "PlainTable") { + return; + } fs::TempDir rootPath("/tmp/rocksdb_engine_RemoveRangeTest.XXXXXX"); auto engine = std::make_unique(0, kDefaultVIdLen, rootPath.path()); for (int32_t i = 0; i < 100; i++) { @@ -154,7 +169,7 @@ TEST(RocksEngineTest, RemoveRangeTest) { } } -TEST(RocksEngineTest, OptionTest) { +TEST_P(RocksEngineTest, OptionTest) { fs::TempDir rootPath("/tmp/rocksdb_engine_OptionTest.XXXXXX"); auto engine = std::make_unique(0, kDefaultVIdLen, rootPath.path()); EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, @@ -173,7 +188,7 @@ TEST(RocksEngineTest, OptionTest) { engine->setDBOption("max_background_compactions", "bad_value")); } -TEST(RocksEngineTest, CompactTest) { +TEST_P(RocksEngineTest, CompactTest) { fs::TempDir rootPath("/tmp/rocksdb_engine_CompactTest.XXXXXX"); auto engine = std::make_unique(0, kDefaultVIdLen, rootPath.path()); std::vector data; @@ -184,7 +199,10 @@ TEST(RocksEngineTest, CompactTest) { EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, engine->compact()); } -TEST(RocksEngineTest, IngestTest) { +TEST_P(RocksEngineTest, IngestTest) { + if (FLAGS_rocksdb_table_format == "PlainTable") { + return; + } rocksdb::Options options; rocksdb::SstFileWriter writer(rocksdb::EnvOptions(), options); fs::TempDir rootPath("/tmp/rocksdb_engine_IngestTest.XXXXXX"); @@ -210,7 +228,10 @@ TEST(RocksEngineTest, IngestTest) { EXPECT_EQ(nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND, engine->get("key_not_exist", &result)); } -TEST(RocksEngineTest, BackupRestoreTable) { +TEST_P(RocksEngineTest, BackupRestoreTable) { + if (FLAGS_rocksdb_table_format == "PlainTable") { + return; + } rocksdb::Options options; rocksdb::SstFileWriter writer(rocksdb::EnvOptions(), options); fs::TempDir rootPath("/tmp/rocksdb_engine_backuptable.XXXXXX"); @@ -271,90 +292,16 @@ TEST(RocksEngineTest, BackupRestoreTable) { EXPECT_EQ(num, 5); } -TEST(RocksEngineTest, BackupRestoreWithoutData) { - fs::TempDir dataPath("/tmp/rocks_engine_test_data_path.XXXXXX"); - fs::TempDir rocksdbWalPath("/tmp/rocks_engine_test_rocksdb_wal_path.XXXXXX"); - fs::TempDir backupPath("/tmp/rocks_engine_test_backup_path.XXXXXX"); - FLAGS_rocksdb_table_format = "PlainTable"; - FLAGS_rocksdb_wal_dir = rocksdbWalPath.path(); - FLAGS_rocksdb_backup_dir = backupPath.path(); - - auto engine = std::make_unique(0, kDefaultVIdLen, dataPath.path()); - - LOG(INFO) << "Stop the engine and remove data"; - // release the engine and mock machine reboot by removing the data - engine.reset(); - CHECK(fs::FileUtils::remove(dataPath.path(), true)); - - LOG(INFO) << "Start recover"; - // reopen the engine, and it will try to restore from the previous backup - engine = std::make_unique(0, kDefaultVIdLen, dataPath.path()); - - FLAGS_rocksdb_table_format = "BlockBasedTable"; - FLAGS_rocksdb_wal_dir = ""; - FLAGS_rocksdb_backup_dir = ""; -} - -TEST(RocksEngineTest, BackupRestoreWithData) { - fs::TempDir dataPath("/tmp/rocks_engine_test_data_path.XXXXXX"); - fs::TempDir rocksdbWalPath("/tmp/rocks_engine_test_rocksdb_wal_path.XXXXXX"); - fs::TempDir backupPath("/tmp/rocks_engine_test_backup_path.XXXXXX"); - FLAGS_rocksdb_table_format = "PlainTable"; - FLAGS_rocksdb_wal_dir = rocksdbWalPath.path(); - FLAGS_rocksdb_backup_dir = backupPath.path(); - - auto engine = std::make_unique(0, kDefaultVIdLen, dataPath.path()); - PartitionID partId = 1; - - auto checkData = [&] { - std::string prefix = NebulaKeyUtils::vertexPrefix(kDefaultVIdLen, partId, "vertex"); - std::unique_ptr iter; - auto code = engine->prefix(prefix, &iter); - EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); - int32_t num = 0; - while (iter->valid()) { - num++; - iter->next(); - } - EXPECT_EQ(num, 10); - - std::string value; - code = engine->get(NebulaKeyUtils::systemCommitKey(partId), &value); - EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); - EXPECT_EQ("123", value); - }; - - LOG(INFO) << "Write some data"; - std::vector data; - for (auto tagId = 0; tagId < 10; tagId++) { - data.emplace_back(NebulaKeyUtils::vertexKey(kDefaultVIdLen, partId, "vertex", tagId), - folly::stringPrintf("val_%d", tagId)); +TEST_P(RocksEngineTest, VertexWholeKeyBloomFilterTest) { + if (FLAGS_rocksdb_table_format == "PlainTable") { + return; } - data.emplace_back(NebulaKeyUtils::systemCommitKey(partId), "123"); - engine->multiPut(std::move(data)); - - checkData(); - LOG(INFO) << "Stop the engine and remove data"; - // release the engine and mock machine reboot by removing the data - engine.reset(); - CHECK(fs::FileUtils::remove(dataPath.path(), true)); - - LOG(INFO) << "Start recover"; - // reopen the engine, and it will try to restore from the previous backup - engine = std::make_unique(0, kDefaultVIdLen, dataPath.path()); - checkData(); - - FLAGS_rocksdb_table_format = "BlockBasedTable"; - FLAGS_rocksdb_wal_dir = ""; - FLAGS_rocksdb_backup_dir = ""; -} - -TEST(RocksEngineTest, VertexBloomFilterTest) { FLAGS_enable_rocksdb_statistics = true; fs::TempDir rootPath("/tmp/rocksdb_engine_VertexBloomFilterTest.XXXXXX"); auto engine = std::make_unique(0, kDefaultVIdLen, rootPath.path()); PartitionID partId = 1; VertexID vId = "vertex"; + VertexID notExisted = "notexist"; auto writeVertex = [&](TagID tagId) { std::vector data; @@ -374,34 +321,65 @@ TEST(RocksEngineTest, VertexBloomFilterTest) { } }; + auto scanVertex = [&](VertexID id) { + auto prefix = NebulaKeyUtils::vertexPrefix(kDefaultVIdLen, partId, id); + std::unique_ptr iter; + auto ret = engine->prefix(prefix, &iter); + EXPECT_EQ(ret, nebula::cpp2::ErrorCode::SUCCEEDED); + }; + auto statistics = kvstore::getDBStatistics(); + statistics->getAndResetTickerCount(rocksdb::Tickers::BLOOM_FILTER_USEFUL); + statistics->getAndResetTickerCount(rocksdb::Tickers::BLOOM_FILTER_PREFIX_USEFUL); // write initial vertex writeVertex(0); // read data while in memtable - readVertex(0); - EXPECT_EQ(statistics->getTickerCount(rocksdb::Tickers::BLOOM_FILTER_USEFUL), 0); - readVertex(1); - EXPECT_EQ(statistics->getTickerCount(rocksdb::Tickers::BLOOM_FILTER_USEFUL), 0); + if (FLAGS_enable_rocksdb_whole_key_filtering) { + readVertex(0); + EXPECT_EQ(statistics->getTickerCount(rocksdb::Tickers::BLOOM_FILTER_USEFUL), 0); + readVertex(1); + EXPECT_EQ(statistics->getTickerCount(rocksdb::Tickers::BLOOM_FILTER_USEFUL), 0); + } + if (FLAGS_enable_rocksdb_prefix_filtering) { + scanVertex(vId); + EXPECT_EQ(statistics->getTickerCount(rocksdb::Tickers::BLOOM_FILTER_PREFIX_USEFUL), 0); + scanVertex(notExisted); + EXPECT_EQ(statistics->getTickerCount(rocksdb::Tickers::BLOOM_FILTER_PREFIX_USEFUL), 0); + } // flush to sst, read again EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, engine->flush()); - readVertex(0); - EXPECT_EQ(statistics->getTickerCount(rocksdb::Tickers::BLOOM_FILTER_USEFUL), 0); - // read not exists data, whole key bloom filter will be useful - readVertex(1); - EXPECT_GT(statistics->getTickerCount(rocksdb::Tickers::BLOOM_FILTER_USEFUL), 0); + if (FLAGS_enable_rocksdb_whole_key_filtering) { + readVertex(0); + EXPECT_EQ(statistics->getTickerCount(rocksdb::Tickers::BLOOM_FILTER_USEFUL), 0); + // read not exists data, whole key bloom filter will be useful + readVertex(1); + EXPECT_GT(statistics->getTickerCount(rocksdb::Tickers::BLOOM_FILTER_USEFUL), 0); + } + if (FLAGS_enable_rocksdb_prefix_filtering) { + scanVertex(vId); + EXPECT_EQ(statistics->getTickerCount(rocksdb::Tickers::BLOOM_FILTER_PREFIX_USEFUL), 0); + // read not exists data, prefix key bloom filter will be useful + scanVertex(notExisted); + EXPECT_GT(statistics->getTickerCount(rocksdb::Tickers::BLOOM_FILTER_PREFIX_USEFUL), 0); + } FLAGS_enable_rocksdb_statistics = false; } -TEST(RocksEngineTest, EdgeBloomFilterTest) { +TEST_P(RocksEngineTest, EdgeWholeKeyBloomFilterTest) { + if (FLAGS_rocksdb_table_format == "PlainTable") { + return; + } FLAGS_enable_rocksdb_statistics = true; fs::TempDir rootPath("/tmp/rocksdb_engine_EdgeBloomFilterTest.XXXXXX"); auto engine = std::make_unique(0, kDefaultVIdLen, rootPath.path()); PartitionID partId = 1; VertexID vId = "vertex"; + VertexID notExisted = "notexist"; + auto writeEdge = [&](EdgeType edgeType) { std::vector data; data.emplace_back(NebulaKeyUtils::edgeKey(kDefaultVIdLen, partId, vId, edgeType, 0, vId), @@ -420,72 +398,188 @@ TEST(RocksEngineTest, EdgeBloomFilterTest) { } }; + auto scanEdge = [&](VertexID id) { + auto prefix = NebulaKeyUtils::edgePrefix(kDefaultVIdLen, partId, id); + std::unique_ptr iter; + auto ret = engine->prefix(prefix, &iter); + EXPECT_EQ(ret, nebula::cpp2::ErrorCode::SUCCEEDED); + }; + auto statistics = kvstore::getDBStatistics(); statistics->getAndResetTickerCount(rocksdb::Tickers::BLOOM_FILTER_USEFUL); + statistics->getAndResetTickerCount(rocksdb::Tickers::BLOOM_FILTER_PREFIX_USEFUL); // write initial vertex writeEdge(0); // read data while in memtable - readEdge(0); - EXPECT_EQ(statistics->getTickerCount(rocksdb::Tickers::BLOOM_FILTER_USEFUL), 0); - readEdge(1); - EXPECT_EQ(statistics->getTickerCount(rocksdb::Tickers::BLOOM_FILTER_USEFUL), 0); + if (FLAGS_enable_rocksdb_whole_key_filtering) { + readEdge(0); + EXPECT_EQ(statistics->getTickerCount(rocksdb::Tickers::BLOOM_FILTER_USEFUL), 0); + readEdge(1); + EXPECT_EQ(statistics->getTickerCount(rocksdb::Tickers::BLOOM_FILTER_USEFUL), 0); + } + if (FLAGS_enable_rocksdb_prefix_filtering) { + scanEdge(vId); + EXPECT_EQ(statistics->getTickerCount(rocksdb::Tickers::BLOOM_FILTER_PREFIX_USEFUL), 0); + scanEdge(notExisted); + EXPECT_EQ(statistics->getTickerCount(rocksdb::Tickers::BLOOM_FILTER_PREFIX_USEFUL), 0); + } // flush to sst, read again EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, engine->flush()); - readEdge(0); - EXPECT_EQ(statistics->getTickerCount(rocksdb::Tickers::BLOOM_FILTER_USEFUL), 0); - // read not exists data, whole key bloom filter will be useful - readEdge(1); - EXPECT_GT(statistics->getTickerCount(rocksdb::Tickers::BLOOM_FILTER_USEFUL), 0); + if (FLAGS_enable_rocksdb_whole_key_filtering) { + readEdge(0); + EXPECT_EQ(statistics->getTickerCount(rocksdb::Tickers::BLOOM_FILTER_USEFUL), 0); + // read not exists data, whole key bloom filter will be useful + readEdge(1); + EXPECT_GT(statistics->getTickerCount(rocksdb::Tickers::BLOOM_FILTER_USEFUL), 0); + } + if (FLAGS_enable_rocksdb_prefix_filtering) { + scanEdge(vId); + EXPECT_EQ(statistics->getTickerCount(rocksdb::Tickers::BLOOM_FILTER_PREFIX_USEFUL), 0); + // read not exists data, prefix key bloom filter will be useful + scanEdge(notExisted); + EXPECT_GT(statistics->getTickerCount(rocksdb::Tickers::BLOOM_FILTER_PREFIX_USEFUL), 0); + } FLAGS_enable_rocksdb_statistics = false; } -class RocksEnginePrefixTest - : public ::testing::TestWithParam> { - public: - void SetUp() override { - auto param = GetParam(); - FLAGS_enable_rocksdb_prefix_filtering = std::get<0>(param); - FLAGS_rocksdb_table_format = std::get<1>(param); - if (FLAGS_rocksdb_table_format == "PlainTable") { - FLAGS_rocksdb_plain_table_prefix_length = std::get<2>(param); - } - } - - void TearDown() override {} -}; - -TEST_P(RocksEnginePrefixTest, PrefixTest) { +TEST_P(RocksEngineTest, PrefixBloomTest) { fs::TempDir rootPath("/tmp/rocksdb_engine_PrefixExtractorTest.XXXXXX"); auto engine = std::make_unique(0, kDefaultVIdLen, rootPath.path()); - PartitionID partId = 1; - std::vector data; for (auto tagId = 0; tagId < 10; tagId++) { - data.emplace_back(NebulaKeyUtils::vertexKey(kDefaultVIdLen, partId, "vertex", tagId), + data.emplace_back(NebulaKeyUtils::vertexKey(kDefaultVIdLen, 1, "1", tagId), + folly::stringPrintf("val_%d", tagId)); + data.emplace_back(NebulaKeyUtils::vertexKey(kDefaultVIdLen, 1, "2", tagId), + folly::stringPrintf("val_%d", tagId)); + data.emplace_back(NebulaKeyUtils::vertexKey(kDefaultVIdLen, 2, "3", tagId), + folly::stringPrintf("val_%d", tagId)); + data.emplace_back(NebulaKeyUtils::vertexKey(kDefaultVIdLen, 2, "4", tagId), folly::stringPrintf("val_%d", tagId)); } - data.emplace_back(NebulaKeyUtils::systemCommitKey(partId), "123"); + data.emplace_back(NebulaKeyUtils::systemCommitKey(1), "123"); + data.emplace_back(NebulaKeyUtils::systemCommitKey(2), "123"); EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, engine->multiPut(std::move(data))); { - std::string prefix = NebulaKeyUtils::vertexPrefix(kDefaultVIdLen, partId, "vertex"); - std::unique_ptr iter; - auto code = engine->prefix(prefix, &iter); - EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); - int32_t num = 0; - while (iter->valid()) { - num++; - iter->next(); - } - EXPECT_EQ(num, 10); + // vertexPrefix(partId) will not be included + auto checkVertexPrefix = [&](PartitionID partId, const VertexID& vId) { + std::string prefix = NebulaKeyUtils::vertexPrefix(kDefaultVIdLen, partId, vId); + std::unique_ptr iter; + auto code = engine->prefix(prefix, &iter); + EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); + int32_t num = 0; + while (iter->valid()) { + num++; + iter->next(); + } + EXPECT_EQ(num, 10); + }; + checkVertexPrefix(1, "1"); + checkVertexPrefix(1, "2"); + checkVertexPrefix(2, "3"); + checkVertexPrefix(2, "4"); + } + { + // vertexPrefix(partId) will be included + auto checkPartPrefix = [&](PartitionID partId) { + std::string prefix = NebulaKeyUtils::vertexPrefix(partId); + std::unique_ptr iter; + auto code = engine->prefix(prefix, &iter); + EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); + int32_t num = 0; + while (iter->valid()) { + num++; + iter->next(); + } + EXPECT_EQ(num, 20); + }; + checkPartPrefix(1); + checkPartPrefix(2); + } + { + // vertexPrefix(partId) will be included + auto checkRangeWithPartPrefix = [&](PartitionID partId) { + std::string prefix = NebulaKeyUtils::vertexPrefix(partId); + std::unique_ptr iter; + auto code = engine->rangeWithPrefix(prefix, prefix, &iter); + EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); + int32_t num = 0; + while (iter->valid()) { + num++; + iter->next(); + } + EXPECT_EQ(num, 20); + }; + checkRangeWithPartPrefix(1); + checkRangeWithPartPrefix(2); } { - std::string prefix = NebulaKeyUtils::vertexPrefix(partId); + auto checkSystemCommit = [&](PartitionID partId) { + std::string value; + auto code = engine->get(NebulaKeyUtils::systemCommitKey(partId), &value); + EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); + EXPECT_EQ("123", value); + }; + checkSystemCommit(1); + checkSystemCommit(2); + } +} + +INSTANTIATE_TEST_CASE_P(EnablePrefixExtractor_EnableWholeKeyFilter_TableFormat, + RocksEngineTest, + ::testing::Values(std::make_tuple(false, false, "BlockBasedTable"), + std::make_tuple(false, true, "BlockBasedTable"), + std::make_tuple(true, false, "BlockBasedTable"), + std::make_tuple(true, true, "BlockBasedTable"), + // PlainTable will always enable prefix extractor + std::make_tuple(true, false, "PlainTable"), + std::make_tuple(true, true, "PlainTable"))); + +TEST(PlainTableTest, BackupRestoreWithoutData) { + fs::TempDir dataPath("/tmp/rocks_engine_test_data_path.XXXXXX"); + fs::TempDir rocksdbWalPath("/tmp/rocks_engine_test_rocksdb_wal_path.XXXXXX"); + fs::TempDir backupPath("/tmp/rocks_engine_test_backup_path.XXXXXX"); + FLAGS_rocksdb_table_format = "PlainTable"; + FLAGS_rocksdb_wal_dir = rocksdbWalPath.path(); + FLAGS_rocksdb_backup_dir = backupPath.path(); + FLAGS_enable_rocksdb_prefix_filtering = true; + + auto engine = std::make_unique(0, kDefaultVIdLen, dataPath.path()); + + LOG(INFO) << "Stop the engine and remove data"; + // release the engine and mock machine reboot by removing the data + engine.reset(); + CHECK(fs::FileUtils::remove(dataPath.path(), true)); + + LOG(INFO) << "Start recover"; + // reopen the engine, and it will try to restore from the previous backup + engine = std::make_unique(0, kDefaultVIdLen, dataPath.path()); + + FLAGS_rocksdb_table_format = "BlockBasedTable"; + FLAGS_rocksdb_wal_dir = ""; + FLAGS_rocksdb_backup_dir = ""; + FLAGS_enable_rocksdb_prefix_filtering = false; +} + +TEST(PlainTableTest, BackupRestoreWithData) { + fs::TempDir dataPath("/tmp/rocks_engine_test_data_path.XXXXXX"); + fs::TempDir rocksdbWalPath("/tmp/rocks_engine_test_rocksdb_wal_path.XXXXXX"); + fs::TempDir backupPath("/tmp/rocks_engine_test_backup_path.XXXXXX"); + FLAGS_rocksdb_table_format = "PlainTable"; + FLAGS_rocksdb_wal_dir = rocksdbWalPath.path(); + FLAGS_rocksdb_backup_dir = backupPath.path(); + FLAGS_enable_rocksdb_prefix_filtering = true; + + auto engine = std::make_unique(0, kDefaultVIdLen, dataPath.path()); + PartitionID partId = 1; + + auto checkData = [&] { + std::string prefix = NebulaKeyUtils::vertexPrefix(kDefaultVIdLen, partId, "vertex"); std::unique_ptr iter; auto code = engine->prefix(prefix, &iter); EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); @@ -495,23 +589,270 @@ TEST_P(RocksEnginePrefixTest, PrefixTest) { iter->next(); } EXPECT_EQ(num, 10); - } - { + std::string value; - auto code = engine->get(NebulaKeyUtils::systemCommitKey(partId), &value); + code = engine->get(NebulaKeyUtils::systemCommitKey(partId), &value); EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); EXPECT_EQ("123", value); + }; + + LOG(INFO) << "Write some data"; + std::vector data; + for (auto tagId = 0; tagId < 10; tagId++) { + data.emplace_back(NebulaKeyUtils::vertexKey(kDefaultVIdLen, partId, "vertex", tagId), + folly::stringPrintf("val_%d", tagId)); } + data.emplace_back(NebulaKeyUtils::systemCommitKey(partId), "123"); + engine->multiPut(std::move(data)); + + checkData(); + LOG(INFO) << "Stop the engine and remove data"; + // release the engine and mock machine reboot by removing the data + engine.reset(); + CHECK(fs::FileUtils::remove(dataPath.path(), true)); + + LOG(INFO) << "Start recover"; + // reopen the engine, and it will try to restore from the previous backup + engine = std::make_unique(0, kDefaultVIdLen, dataPath.path()); + checkData(); + + FLAGS_rocksdb_table_format = "BlockBasedTable"; + FLAGS_rocksdb_wal_dir = ""; + FLAGS_rocksdb_backup_dir = ""; + FLAGS_enable_rocksdb_prefix_filtering = false; } -INSTANTIATE_TEST_CASE_P( - PrefixExtractor_TableFormat_PlainTablePrefixSize, - RocksEnginePrefixTest, - ::testing::Values(std::make_tuple(false, "BlockBasedTable", 0), - std::make_tuple(true, "BlockBasedTable", 0), - // PlainTable will always enable prefix extractor - std::make_tuple(true, "PlainTable", sizeof(PartitionID)), - std::make_tuple(true, "PlainTable", sizeof(PartitionID) + kDefaultVIdLen))); +TEST(RebuildPrefixBloomFilter, RebuildPrefixBloomFilter) { + GraphSpaceID spaceId = 1; + // previously default config (prefix off whole on) + FLAGS_rocksdb_table_format = "BlockBasedTable"; + FLAGS_enable_rocksdb_prefix_filtering = false; + FLAGS_enable_rocksdb_whole_key_filtering = true; + + fs::TempDir dataPath("/tmp/rocksdb_engine_rebuild_prefix_bloom_filter.XXXXXX"); + LOG(INFO) << "start the engine with prefix bloom filter disabled"; + auto engine = std::make_unique(spaceId, kDefaultVIdLen, dataPath.path()); + + auto checkData = [&] { + auto checkVertexPrefix = [&](PartitionID partId, VertexID vId) { + { + std::string prefix = NebulaKeyUtils::vertexPrefix(kDefaultVIdLen, partId, vId); + std::unique_ptr iter; + auto code = engine->prefix(prefix, &iter); + EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); + int32_t num = 0; + while (iter->valid()) { + num++; + iter->next(); + } + EXPECT_EQ(num, 10); + } + for (TagID tagId = 0; tagId < 10; tagId++) { + std::string prefix = NebulaKeyUtils::vertexPrefix(kDefaultVIdLen, partId, vId, tagId); + std::unique_ptr iter; + auto code = engine->prefix(prefix, &iter); + EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); + int32_t num = 0; + while (iter->valid()) { + num++; + iter->next(); + } + EXPECT_EQ(num, 1); + } + }; + + auto checkEdgePrefix = [&](PartitionID partId, VertexID vId) { + { + std::string prefix = NebulaKeyUtils::edgePrefix(kDefaultVIdLen, partId, vId); + std::unique_ptr iter; + auto code = engine->prefix(prefix, &iter); + EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); + int32_t num = 0; + while (iter->valid()) { + num++; + iter->next(); + } + EXPECT_EQ(num, 10); + } + for (EdgeType edgeType = 0; edgeType < 10; edgeType++) { + std::string prefix = NebulaKeyUtils::edgePrefix(kDefaultVIdLen, partId, vId, edgeType); + std::unique_ptr iter; + auto code = engine->prefix(prefix, &iter); + EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); + int32_t num = 0; + while (iter->valid()) { + num++; + iter->next(); + } + EXPECT_EQ(num, 1); + } + }; + + auto checkVertexPartPrefix = [&](PartitionID partId) { + std::string prefix = NebulaKeyUtils::vertexPrefix(partId); + std::unique_ptr iter; + auto code = engine->prefix(prefix, &iter); + EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); + int32_t num = 0; + while (iter->valid()) { + num++; + iter->next(); + } + EXPECT_EQ(num, 20); + }; + + auto checkEdgePartPrefix = [&](PartitionID partId) { + std::string prefix = NebulaKeyUtils::edgePrefix(partId); + std::unique_ptr iter; + auto code = engine->prefix(prefix, &iter); + EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); + int32_t num = 0; + while (iter->valid()) { + num++; + iter->next(); + } + EXPECT_EQ(num, 20); + }; + + auto checkRangeWithPartPrefix = [&](PartitionID partId) { + std::string prefix = NebulaKeyUtils::vertexPrefix(partId); + std::unique_ptr iter; + auto code = engine->rangeWithPrefix(prefix, prefix, &iter); + EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); + int32_t num = 0; + while (iter->valid()) { + num++; + iter->next(); + } + EXPECT_EQ(num, 20); + }; + + auto checkSystemCommit = [&](PartitionID partId) { + std::string value; + auto code = engine->get(NebulaKeyUtils::systemCommitKey(partId), &value); + EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); + EXPECT_EQ("123", value); + }; + + checkVertexPrefix(1, "1"); + checkVertexPrefix(1, "2"); + checkVertexPrefix(2, "3"); + checkVertexPrefix(2, "4"); + checkEdgePrefix(1, "1"); + checkEdgePrefix(1, "2"); + checkEdgePrefix(2, "3"); + checkEdgePrefix(2, "4"); + checkVertexPartPrefix(1); + checkVertexPartPrefix(2); + checkEdgePartPrefix(1); + checkEdgePartPrefix(2); + checkRangeWithPartPrefix(1); + checkRangeWithPartPrefix(2); + checkSystemCommit(1); + checkSystemCommit(2); + }; + + auto writeData = [&engine] { + LOG(INFO) << "Write some data"; + std::vector data; + for (TagID tagId = 0; tagId < 10; tagId++) { + data.emplace_back(NebulaKeyUtils::vertexKey(kDefaultVIdLen, 1, "1", tagId), + folly::stringPrintf("val_%d", tagId)); + data.emplace_back(NebulaKeyUtils::vertexKey(kDefaultVIdLen, 1, "2", tagId), + folly::stringPrintf("val_%d", tagId)); + data.emplace_back(NebulaKeyUtils::vertexKey(kDefaultVIdLen, 2, "3", tagId), + folly::stringPrintf("val_%d", tagId)); + data.emplace_back(NebulaKeyUtils::vertexKey(kDefaultVIdLen, 2, "4", tagId), + folly::stringPrintf("val_%d", tagId)); + } + EdgeRanking rank = 0; + for (EdgeType edgeType = 0; edgeType < 10; edgeType++) { + data.emplace_back(NebulaKeyUtils::edgeKey(kDefaultVIdLen, 1, "1", edgeType, rank, "1"), + folly::stringPrintf("val_%d", edgeType)); + data.emplace_back(NebulaKeyUtils::edgeKey(kDefaultVIdLen, 1, "2", edgeType, rank, "2"), + folly::stringPrintf("val_%d", edgeType)); + data.emplace_back(NebulaKeyUtils::edgeKey(kDefaultVIdLen, 2, "3", edgeType, rank, "3"), + folly::stringPrintf("val_%d", edgeType)); + data.emplace_back(NebulaKeyUtils::edgeKey(kDefaultVIdLen, 2, "4", edgeType, rank, "4"), + folly::stringPrintf("val_%d", edgeType)); + } + data.emplace_back(NebulaKeyUtils::systemCommitKey(1), "123"); + data.emplace_back(NebulaKeyUtils::systemCommitKey(2), "123"); + auto code = engine->multiPut(std::move(data)); + EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); + }; + + auto writeNewData = [&engine] { + std::vector data; + data.emplace_back(NebulaKeyUtils::vertexKey(kDefaultVIdLen, 3, "5", 0), + "vertex_data_after_enable_prefix_bloom_filter"); + data.emplace_back(NebulaKeyUtils::edgeKey(kDefaultVIdLen, 3, "5", 0, 0, "5"), + "edge_data_after_enable_prefix_bloom_filter"); + auto code = engine->multiPut(std::move(data)); + EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); + }; + + auto checkNewData = [&engine] { + std::string value; + auto code = engine->get(NebulaKeyUtils::vertexKey(kDefaultVIdLen, 3, "5", 0), &value); + EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); + EXPECT_EQ("vertex_data_after_enable_prefix_bloom_filter", value); + code = engine->get(NebulaKeyUtils::edgeKey(kDefaultVIdLen, 3, "5", 0, 0, "5"), &value); + EXPECT_EQ("edge_data_after_enable_prefix_bloom_filter", value); + + auto checkPrefix = [&](const std::string& prefix) { + std::unique_ptr iter; + code = engine->prefix(prefix, &iter); + EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); + int32_t num = 0; + while (iter->valid()) { + num++; + iter->next(); + } + EXPECT_EQ(num, 1); + }; + + checkPrefix(NebulaKeyUtils::vertexPrefix(3)); + checkPrefix(NebulaKeyUtils::vertexPrefix(kDefaultVIdLen, 3, "5")); + checkPrefix(NebulaKeyUtils::vertexPrefix(kDefaultVIdLen, 3, "5", 0)); + checkPrefix(NebulaKeyUtils::edgePrefix(3)); + checkPrefix(NebulaKeyUtils::edgePrefix(kDefaultVIdLen, 3, "5")); + checkPrefix(NebulaKeyUtils::edgePrefix(kDefaultVIdLen, 3, "5", 0)); + checkPrefix(NebulaKeyUtils::edgePrefix(kDefaultVIdLen, 3, "5", 0, 0, "5")); + }; + + writeData(); + checkData(); + + LOG(INFO) << "release the engine and restart with prefix bloom filter enabled"; + engine.reset(); + // new default config (prefix on whole off) + FLAGS_enable_rocksdb_prefix_filtering = true; + FLAGS_enable_rocksdb_whole_key_filtering = false; + engine = std::make_unique(spaceId, kDefaultVIdLen, dataPath.path()); + checkData(); + + writeNewData(); + checkNewData(); + + LOG(INFO) << "compact to rebuild prefix bloom filter"; + engine->compact(); + checkData(); + checkNewData(); + + LOG(INFO) << "release the engine and restart with prefix bloom filter disabled again"; + engine.reset(); + FLAGS_enable_rocksdb_prefix_filtering = false; + FLAGS_enable_rocksdb_whole_key_filtering = true; + engine = std::make_unique(spaceId, kDefaultVIdLen, dataPath.path()); + checkData(); + checkNewData(); + + LOG(INFO) << "compact to rebuild whole key bloom filter"; + engine->compact(); + checkData(); + checkNewData(); +} } // namespace kvstore } // namespace nebula diff --git a/src/meta/CMakeLists.txt b/src/meta/CMakeLists.txt index 63e2d75dde1..5c69e0336eb 100644 --- a/src/meta/CMakeLists.txt +++ b/src/meta/CMakeLists.txt @@ -11,6 +11,7 @@ nebula_add_library( processors/parts/ListHostsProcessor.cpp processors/parts/ListPartsProcessor.cpp processors/parts/CreateSpaceProcessor.cpp + processors/parts/CreateSpaceAsProcessor.cpp processors/parts/GetSpaceProcessor.cpp processors/parts/ListSpacesProcessor.cpp processors/parts/DropSpaceProcessor.cpp @@ -158,6 +159,7 @@ set(meta_test_deps $ $ $ + $ ) nebula_add_subdirectory(http) diff --git a/src/meta/MetaServiceHandler.cpp b/src/meta/MetaServiceHandler.cpp index 2ceec503ded..76d3ca0009f 100644 --- a/src/meta/MetaServiceHandler.cpp +++ b/src/meta/MetaServiceHandler.cpp @@ -43,6 +43,7 @@ #include "meta/processors/kv/RemoveRangeProcessor.h" #include "meta/processors/kv/ScanProcessor.h" #include "meta/processors/listener/ListenerProcessor.h" +#include "meta/processors/parts/CreateSpaceAsProcessor.h" #include "meta/processors/parts/CreateSpaceProcessor.h" #include "meta/processors/parts/DropSpaceProcessor.h" #include "meta/processors/parts/GetPartsAllocProcessor.h" @@ -87,6 +88,12 @@ folly::Future MetaServiceHandler::future_createSpace( RETURN_FUTURE(processor); } +folly::Future MetaServiceHandler::future_createSpaceAs( + const cpp2::CreateSpaceAsReq& req) { + auto* processor = CreateSpaceAsProcessor::instance(kvstore_); + RETURN_FUTURE(processor); +} + folly::Future MetaServiceHandler::future_dropSpace(const cpp2::DropSpaceReq& req) { auto* processor = DropSpaceProcessor::instance(kvstore_); RETURN_FUTURE(processor); diff --git a/src/meta/MetaServiceHandler.h b/src/meta/MetaServiceHandler.h index 2a6919e99b1..bafec070efe 100644 --- a/src/meta/MetaServiceHandler.h +++ b/src/meta/MetaServiceHandler.h @@ -31,6 +31,8 @@ class MetaServiceHandler final : public cpp2::MetaServiceSvIf { * */ folly::Future future_createSpace(const cpp2::CreateSpaceReq& req) override; + folly::Future future_createSpaceAs(const cpp2::CreateSpaceAsReq& req) override; + folly::Future future_dropSpace(const cpp2::DropSpaceReq& req) override; folly::Future future_listSpaces(const cpp2::ListSpacesReq& req) override; diff --git a/src/meta/processors/admin/AdminClient.h b/src/meta/processors/admin/AdminClient.h index c1a3a1bef8a..61ae5157f22 100644 --- a/src/meta/processors/admin/AdminClient.h +++ b/src/meta/processors/admin/AdminClient.h @@ -11,6 +11,7 @@ #include "common/base/Base.h" #include "common/base/Status.h" +#include "common/ssl/SSLConfig.h" #include "common/thrift/ThriftClientManager.h" #include "interface/gen-cpp2/StorageAdminServiceAsyncClient.h" #include "kvstore/KVStore.h" @@ -33,7 +34,8 @@ class AdminClient { explicit AdminClient(kvstore::KVStore* kv) : kv_(kv) { ioThreadPool_ = std::make_unique(10); clientsMan_ = std::make_unique< - thrift::ThriftClientManager>(); + thrift::ThriftClientManager>( + FLAGS_enable_ssl); } virtual ~AdminClient() = default; diff --git a/src/meta/processors/job/AdminJobProcessor.cpp b/src/meta/processors/job/AdminJobProcessor.cpp index 9e9595fbd6f..efb2e8eb1eb 100644 --- a/src/meta/processors/job/AdminJobProcessor.cpp +++ b/src/meta/processors/job/AdminJobProcessor.cpp @@ -64,7 +64,7 @@ void AdminJobProcessor::process(const cpp2::AdminJobReq& req) { break; } case nebula::meta::cpp2::AdminJobOp::SHOW_All: { - auto ret = jobMgr->showJobs(); + auto ret = jobMgr->showJobs(req.get_paras().back()); if (nebula::ok(ret)) { result.set_job_desc(nebula::value(ret)); } else { @@ -73,8 +73,9 @@ void AdminJobProcessor::process(const cpp2::AdminJobReq& req) { break; } case nebula::meta::cpp2::AdminJobOp::SHOW: { - if (req.get_paras().empty()) { - LOG(ERROR) << "Parameter should be not empty"; + static const size_t kShowArgsNum = 2; + if (req.get_paras().size() != kShowArgsNum) { + LOG(ERROR) << "Parameter number not matched"; errorCode = nebula::cpp2::ErrorCode::E_INVALID_PARM; break; } @@ -85,8 +86,7 @@ void AdminJobProcessor::process(const cpp2::AdminJobReq& req) { errorCode = nebula::cpp2::ErrorCode::E_INVALID_PARM; break; } - - auto ret = jobMgr->showJob(iJob); + auto ret = jobMgr->showJob(iJob, req.get_paras().back()); if (nebula::ok(ret)) { result.set_job_desc(std::vector{nebula::value(ret).first}); result.set_task_desc(nebula::value(ret).second); @@ -96,8 +96,9 @@ void AdminJobProcessor::process(const cpp2::AdminJobReq& req) { break; } case nebula::meta::cpp2::AdminJobOp::STOP: { - if (req.get_paras().empty()) { - LOG(ERROR) << "Parameter should be not empty"; + static const size_t kStopJobArgsNum = 2; + if (req.get_paras().size() != kStopJobArgsNum) { + LOG(ERROR) << "Parameter number not matched"; errorCode = nebula::cpp2::ErrorCode::E_INVALID_PARM; break; } @@ -107,11 +108,11 @@ void AdminJobProcessor::process(const cpp2::AdminJobReq& req) { errorCode = nebula::cpp2::ErrorCode::E_INVALID_PARM; break; } - errorCode = jobMgr->stopJob(iJob); + errorCode = jobMgr->stopJob(iJob, req.get_paras().back()); break; } case nebula::meta::cpp2::AdminJobOp::RECOVER: { - auto ret = jobMgr->recoverJob(); + auto ret = jobMgr->recoverJob(req.get_paras().back()); if (nebula::ok(ret)) { result.set_recovered_job_num(nebula::value(ret)); } else { diff --git a/src/meta/processors/job/JobDescription.h b/src/meta/processors/job/JobDescription.h index b6f538b7ea1..746e8d69e1a 100644 --- a/src/meta/processors/job/JobDescription.h +++ b/src/meta/processors/job/JobDescription.h @@ -27,6 +27,8 @@ class JobDescription { FRIEND_TEST(JobManagerTest, loadJobDescription); FRIEND_TEST(JobManagerTest, showJobs); FRIEND_TEST(JobManagerTest, showJob); + FRIEND_TEST(JobManagerTest, showJobsFromMultiSpace); + FRIEND_TEST(JobManagerTest, showJobInOtherSpace); FRIEND_TEST(JobManagerTest, backupJob); FRIEND_TEST(JobManagerTest, recoverJob); FRIEND_TEST(GetStatsTest, StatsJob); diff --git a/src/meta/processors/job/JobManager.cpp b/src/meta/processors/job/JobManager.cpp index 94d21f5cc94..b083d16a155 100644 --- a/src/meta/processors/job/JobManager.cpp +++ b/src/meta/processors/job/JobManager.cpp @@ -14,6 +14,7 @@ #include "common/http/HttpClient.h" #include "common/time/WallClock.h" +#include "interface/gen-cpp2/common_types.h" #include "kvstore/Common.h" #include "kvstore/KVIterator.h" #include "meta/MetaServiceUtils.h" @@ -375,7 +376,8 @@ void JobManager::enqueue(const JobID& jobId, const cpp2::AdminCmd& cmd) { } } -ErrorOr> JobManager::showJobs() { +ErrorOr> JobManager::showJobs( + const std::string& spaceName) { std::unique_ptr iter; auto retCode = kvStore_->prefix(kDefaultSpaceId, kDefaultPartId, JobUtil::jobPrefix(), &iter); if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { @@ -404,6 +406,9 @@ ErrorOr> JobManager::showJob expiredJobKeys.emplace_back(jobKey); continue; } + if (jobDesc.get_paras().back() != spaceName) { + continue; + } ret.emplace_back(jobDesc); } else { // iter-key() is a TaskKey TaskDescription task(jobKey, iter->val()); @@ -477,7 +482,7 @@ bool JobManager::checkJobExist(const cpp2::AdminCmd& cmd, } ErrorOr>> -JobManager::showJob(JobID iJob) { +JobManager::showJob(JobID iJob, const std::string& spaceName) { auto jobKey = JobDescription::makeJobKey(iJob); std::unique_ptr iter; auto rc = kvStore_->prefix(kDefaultSpaceId, kDefaultPartId, jobKey, &iter); @@ -498,6 +503,10 @@ JobManager::showJob(JobID iJob) { return nebula::error(optJobRet); } auto optJob = nebula::value(optJobRet); + if (optJob.getParas().back() != spaceName) { + LOG(WARNING) << "Show job " << iJob << " not in current space " << spaceName; + return nebula::cpp2::ErrorCode::E_JOB_NOT_IN_SPACE; + } ret.first = optJob.toJobDesc(); } else { TaskDescription td(jKey, iter->val()); @@ -507,15 +516,27 @@ JobManager::showJob(JobID iJob) { return ret; } -nebula::cpp2::ErrorCode JobManager::stopJob(JobID iJob) { +nebula::cpp2::ErrorCode JobManager::stopJob(JobID iJob, const std::string& spaceName) { LOG(INFO) << "try to stop job " << iJob; + auto optJobDescRet = JobDescription::loadJobDescription(iJob, kvStore_); + if (!nebula::ok(optJobDescRet)) { + auto retCode = nebula::error(optJobDescRet); + LOG(WARNING) << "LoadJobDesc failed, jobId " << iJob + << " error: " << apache::thrift::util::enumNameSafe(retCode); + return retCode; + } + auto optJobDesc = nebula::value(optJobDescRet); + if (optJobDesc.getParas().back() != spaceName) { + LOG(WARNING) << "Stop job " << iJob << " not in space " << spaceName; + return nebula::cpp2::ErrorCode::E_JOB_NOT_IN_SPACE; + } return jobFinished(iJob, cpp2::JobStatus::STOPPED); } /* * Return: recovered job num. * */ -ErrorOr JobManager::recoverJob() { +ErrorOr JobManager::recoverJob(const std::string& spaceName) { int32_t recoveredJobNum = 0; std::unique_ptr iter; auto retCode = kvStore_->prefix(kDefaultSpaceId, kDefaultPartId, JobUtil::jobPrefix(), &iter); @@ -531,6 +552,9 @@ ErrorOr JobManager::recoverJob() { auto optJobRet = JobDescription::makeJobDescription(iter->key(), iter->val()); if (nebula::ok(optJobRet)) { auto optJob = nebula::value(optJobRet); + if (optJob.getParas().back() != spaceName) { + continue; + } if (optJob.getStatus() == cpp2::JobStatus::QUEUE) { // Check if the job exists JobID jId = 0; diff --git a/src/meta/processors/job/JobManager.h b/src/meta/processors/job/JobManager.h index 378c8d01a5e..90bdfd695e2 100644 --- a/src/meta/processors/job/JobManager.h +++ b/src/meta/processors/job/JobManager.h @@ -36,7 +36,9 @@ class JobManager : public nebula::cpp::NonCopyable, public nebula::cpp::NonMovab FRIEND_TEST(JobManagerTest, JobDeduplication); FRIEND_TEST(JobManagerTest, loadJobDescription); FRIEND_TEST(JobManagerTest, showJobs); + FRIEND_TEST(JobManagerTest, showJobsFromMultiSpace); FRIEND_TEST(JobManagerTest, showJob); + FRIEND_TEST(JobManagerTest, showJobInOtherSpace); FRIEND_TEST(JobManagerTest, recoverJob); FRIEND_TEST(JobManagerTest, AddRebuildTagIndexJob); FRIEND_TEST(JobManagerTest, AddRebuildEdgeIndexJob); @@ -69,14 +71,16 @@ class JobManager : public nebula::cpp::NonCopyable, public nebula::cpp::NonMovab */ bool checkJobExist(const cpp2::AdminCmd& cmd, const std::vector& paras, JobID& iJob); - ErrorOr> showJobs(); + ErrorOr> showJobs( + const std::string& spaceName); ErrorOr>> showJob( - JobID iJob); + JobID iJob, const std::string& spaceName); - nebula::cpp2::ErrorCode stopJob(JobID iJob); + nebula::cpp2::ErrorCode stopJob(JobID iJob, const std::string& spaceName); - ErrorOr recoverJob(); + // return error/recovered job num + ErrorOr recoverJob(const std::string& spaceName); /** * @brief persist job executed result, and do the cleanup diff --git a/src/meta/processors/parts/CreateSpaceAsProcessor.cpp b/src/meta/processors/parts/CreateSpaceAsProcessor.cpp new file mode 100644 index 00000000000..7f9497ca67d --- /dev/null +++ b/src/meta/processors/parts/CreateSpaceAsProcessor.cpp @@ -0,0 +1,229 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License, + * attached with Common Clause Condition 1.0, found in the LICENSES directory. + */ + +#include "meta/processors/parts/CreateSpaceAsProcessor.h" + +#include "meta/ActiveHostsMan.h" + +namespace nebula { +namespace meta { + +void CreateSpaceAsProcessor::process(const cpp2::CreateSpaceAsReq &req) { + SCOPE_EXIT { + if (rc_ != nebula::cpp2::ErrorCode::SUCCEEDED) { + handleErrorCode(rc_); + } + onFinished(); + }; + + folly::SharedMutex::WriteHolder wHolder(LockUtils::spaceLock()); + auto oldSpaceName = req.get_old_space_name(); + auto newSpaceName = req.get_new_space_name(); + auto oldSpaceId = getSpaceId(oldSpaceName); + auto newSpaceId = getSpaceId(newSpaceName); + + if (!nebula::ok(oldSpaceId)) { + rc_ = nebula::error(oldSpaceId); + LOG(ERROR) << "Create Space [" << newSpaceName << "] as [" << oldSpaceName + << "] failed. Old space does not exists. rc = " + << apache::thrift::util::enumNameSafe(rc_); + return; + } + + if (nebula::ok(newSpaceId)) { + rc_ = nebula::cpp2::ErrorCode::E_EXISTED; + LOG(ERROR) << "Create Space [" << newSpaceName << "] as [" << oldSpaceName + << "] failed. New space already exists."; + return; + } + + newSpaceId = autoIncrementId(); + if (!nebula::ok(newSpaceId)) { + rc_ = nebula::error(newSpaceId); + LOG(ERROR) << "Create Space Failed : Generate new space id failed"; + return; + } + + std::vector data; + + auto newSpaceData = + makeNewSpaceData(nebula::value(oldSpaceId), nebula::value(newSpaceId), newSpaceName); + if (nebula::ok(newSpaceData)) { + data.insert(data.end(), nebula::value(newSpaceData).begin(), nebula::value(newSpaceData).end()); + } else { + rc_ = nebula::error(newSpaceData); + LOG(ERROR) << "make new space data failed, " << apache::thrift::util::enumNameSafe(rc_); + return; + } + + auto newTags = makeNewTags(nebula::value(oldSpaceId), nebula::value(newSpaceId)); + if (nebula::ok(newTags)) { + data.insert(data.end(), nebula::value(newTags).begin(), nebula::value(newTags).end()); + } else { + rc_ = nebula::error(newTags); + LOG(ERROR) << "make new tags failed, " << apache::thrift::util::enumNameSafe(rc_); + return; + } + + auto newEdges = makeNewEdges(nebula::value(oldSpaceId), nebula::value(newSpaceId)); + if (nebula::ok(newEdges)) { + data.insert(data.end(), nebula::value(newEdges).begin(), nebula::value(newEdges).end()); + } else { + rc_ = nebula::error(newEdges); + LOG(ERROR) << "make new edges failed, " << apache::thrift::util::enumNameSafe(rc_); + return; + } + + auto newIndexes = makeNewIndexes(nebula::value(oldSpaceId), nebula::value(newSpaceId)); + if (nebula::ok(newIndexes)) { + data.insert(data.end(), nebula::value(newIndexes).begin(), nebula::value(newIndexes).end()); + } else { + rc_ = nebula::error(newIndexes); + LOG(ERROR) << "make new indexes failed, " << apache::thrift::util::enumNameSafe(rc_); + return; + } + + resp_.set_id(to(nebula::value(newSpaceId), EntryType::SPACE)); + rc_ = doSyncPut(std::move(data)); + if (rc_ != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "put data error, " << apache::thrift::util::enumNameSafe(rc_); + return; + } + + rc_ = LastUpdateTimeMan::update(kvstore_, time::WallClock::fastNowInMilliSec()); + if (rc_ != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "update last update time error, " << apache::thrift::util::enumNameSafe(rc_); + return; + } + LOG(INFO) << "created space " << newSpaceName; +} + +ErrorOr> CreateSpaceAsProcessor::makeNewSpaceData( + GraphSpaceID oldSpaceId, GraphSpaceID newSpaceId, const std::string &spaceName) { + auto oldSpaceKey = MetaServiceUtils::spaceKey(oldSpaceId); + auto oldSpaceVal = doGet(oldSpaceKey); + if (!nebula::ok(oldSpaceVal)) { + LOG(ERROR) << "Create Space Failed : Generate new space id failed"; + rc_ = nebula::error(oldSpaceVal); + return rc_; + } + + std::vector data; + data.emplace_back(MetaServiceUtils::indexSpaceKey(spaceName), + std::string(reinterpret_cast(&newSpaceId), sizeof(newSpaceId))); + cpp2::SpaceDesc spaceDesc = MetaServiceUtils::parseSpace(nebula::value(oldSpaceVal)); + spaceDesc.set_space_name(spaceName); + data.emplace_back(MetaServiceUtils::spaceKey(newSpaceId), MetaServiceUtils::spaceVal(spaceDesc)); + + auto prefix = MetaServiceUtils::partPrefix(oldSpaceId); + auto partPrefix = doPrefix(prefix); + if (!nebula::ok(partPrefix)) { + return nebula::error(partPrefix); + } + auto iter = nebula::value(partPrefix).get(); + for (; iter->valid(); iter->next()) { + auto partId = MetaServiceUtils::parsePartKeyPartId(iter->key()); + data.emplace_back(MetaServiceUtils::partKey(newSpaceId, partId), iter->val()); + } + return data; +} + +ErrorOr> CreateSpaceAsProcessor::makeNewTags( + GraphSpaceID oldSpaceId, GraphSpaceID newSpaceId) { + folly::SharedMutex::ReadHolder rHolder(LockUtils::tagLock()); + auto prefix = MetaServiceUtils::schemaTagsPrefix(oldSpaceId); + auto tagPrefix = doPrefix(prefix); + if (!nebula::ok(tagPrefix)) { + if (nebula::error(tagPrefix) == nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND) { + // no tag is ok. + return nebula::cpp2::ErrorCode::SUCCEEDED; + } + return nebula::error(tagPrefix); + } + + std::vector data; + auto iter = nebula::value(tagPrefix).get(); + for (; iter->valid(); iter->next()) { + auto val = iter->val(); + + auto tagId = MetaServiceUtils::parseTagId(iter->key()); + auto tagNameLen = *reinterpret_cast(val.data()); + auto tagName = val.subpiece(sizeof(int32_t), tagNameLen).str(); + data.emplace_back(MetaServiceUtils::indexTagKey(newSpaceId, tagName), + std::string(reinterpret_cast(&tagId), sizeof(tagId))); + + auto tagVer = MetaServiceUtils::parseTagVersion(iter->key()); + auto key = MetaServiceUtils::schemaTagKey(newSpaceId, tagId, tagVer); + data.emplace_back(std::move(key), val.str()); + } + return data; +} + +ErrorOr> CreateSpaceAsProcessor::makeNewEdges( + GraphSpaceID oldSpaceId, GraphSpaceID newSpaceId) { + folly::SharedMutex::ReadHolder rHolder(LockUtils::edgeLock()); + auto prefix = MetaServiceUtils::schemaEdgesPrefix(oldSpaceId); + auto edgePrefix = doPrefix(prefix); + if (!nebula::ok(edgePrefix)) { + if (nebula::error(edgePrefix) == nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND) { + // no edge is ok. + return nebula::cpp2::ErrorCode::SUCCEEDED; + } + return nebula::error(edgePrefix); + } + + std::vector data; + auto iter = nebula::value(edgePrefix).get(); + for (; iter->valid(); iter->next()) { + auto val = iter->val(); + + auto edgeType = MetaServiceUtils::parseEdgeType(iter->key()); + auto edgeNameLen = *reinterpret_cast(val.data()); + auto edgeName = val.subpiece(sizeof(int32_t), edgeNameLen).str(); + data.emplace_back(MetaServiceUtils::indexTagKey(newSpaceId, edgeName), + std::string(reinterpret_cast(&edgeType), sizeof(edgeType))); + + auto ver = MetaServiceUtils::parseEdgeVersion(iter->key()); + auto key = MetaServiceUtils::schemaEdgeKey(newSpaceId, edgeType, ver); + data.emplace_back(std::move(key), val.str()); + } + return data; +} + +ErrorOr> CreateSpaceAsProcessor::makeNewIndexes( + GraphSpaceID oldSpaceId, GraphSpaceID newSpaceId) { + folly::SharedMutex::ReadHolder rHolder(LockUtils::edgeLock()); + auto prefix = MetaServiceUtils::indexPrefix(oldSpaceId); + auto indexPrefix = doPrefix(prefix); + if (!nebula::ok(indexPrefix)) { + if (nebula::error(indexPrefix) == nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND) { + // no index is ok. + return nebula::cpp2::ErrorCode::SUCCEEDED; + } + return nebula::error(indexPrefix); + } + + std::vector data; + auto iter = nebula::value(indexPrefix).get(); + for (; iter->valid(); iter->next()) { + auto val = iter->val(); + + auto indexId = MetaServiceUtils::parseIndexesKeyIndexID(iter->key()); + + cpp2::IndexItem idxItem = MetaServiceUtils::parseIndex(val.str()); + auto indexName = idxItem.get_index_name(); + + data.emplace_back(MetaServiceUtils::indexIndexKey(newSpaceId, indexName), + std::string(reinterpret_cast(&indexId), sizeof(indexId))); + + data.emplace_back(MetaServiceUtils::indexKey(newSpaceId, indexId), + MetaServiceUtils::indexVal(idxItem)); + } + return data; +} + +} // namespace meta +} // namespace nebula diff --git a/src/meta/processors/parts/CreateSpaceAsProcessor.h b/src/meta/processors/parts/CreateSpaceAsProcessor.h new file mode 100644 index 00000000000..4ef036e9966 --- /dev/null +++ b/src/meta/processors/parts/CreateSpaceAsProcessor.h @@ -0,0 +1,44 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License, + * attached with Common Clause Condition 1.0, found in the LICENSES directory. + */ +#pragma once + +#include "meta/processors/BaseProcessor.h" + +namespace nebula { +namespace meta { + +using Hosts = std::vector; + +class CreateSpaceAsProcessor : public BaseProcessor { + public: + static CreateSpaceAsProcessor* instance(kvstore::KVStore* kvstore) { + return new CreateSpaceAsProcessor(kvstore); + } + + void process(const cpp2::CreateSpaceAsReq& req); + + protected: + ErrorOr> makeNewSpaceData( + GraphSpaceID oldSpaceId, GraphSpaceID newSpaceId, const std::string& spaceName); + + ErrorOr> makeNewTags(GraphSpaceID oldSpaceId, + GraphSpaceID newSpaceId); + + ErrorOr> makeNewEdges(GraphSpaceID oldSpaceId, + GraphSpaceID newSpaceId); + + ErrorOr> makeNewIndexes( + GraphSpaceID oldSpaceId, GraphSpaceID newSpaceId); + + nebula::cpp2::ErrorCode rc_{nebula::cpp2::ErrorCode::SUCCEEDED}; + + private: + explicit CreateSpaceAsProcessor(kvstore::KVStore* kvstore) + : BaseProcessor(kvstore) {} +}; + +} // namespace meta +} // namespace nebula diff --git a/src/meta/processors/zone/ListZonesProcessor.cpp b/src/meta/processors/zone/ListZonesProcessor.cpp index f41fb240937..2faf569aa62 100644 --- a/src/meta/processors/zone/ListZonesProcessor.cpp +++ b/src/meta/processors/zone/ListZonesProcessor.cpp @@ -33,6 +33,33 @@ void ListZonesProcessor::process(const cpp2::ListZonesReq&) { iter->next(); } + const auto& groupPrefix = MetaServiceUtils::groupPrefix(); + auto groupIterRet = doPrefix(groupPrefix); + if (!nebula::ok(groupIterRet)) { + auto retCode = nebula::error(groupIterRet); + LOG(ERROR) << "Get groups failed, error: " << apache::thrift::util::enumNameSafe(retCode); + handleErrorCode(retCode); + onFinished(); + return; + } + + auto groupIter = nebula::value(groupIterRet).get(); + while (groupIter->valid()) { + auto zoneNames = MetaServiceUtils::parseZoneNames(groupIter->val()); + for (auto& name : zoneNames) { + auto it = std::find_if(zones.begin(), zones.end(), [&name](const auto& zone) { + return name == zone.get_zone_name(); + }); + if (it == zones.end()) { + cpp2::Zone zone; + zone.set_zone_name(name); + zone.set_nodes({}); + zones.emplace_back(std::move(zone)); + } + } + iter->next(); + } + handleErrorCode(nebula::cpp2::ErrorCode::SUCCEEDED); resp_.set_zones(std::move(zones)); onFinished(); diff --git a/src/meta/processors/zone/UpdateZoneProcessor.cpp b/src/meta/processors/zone/UpdateZoneProcessor.cpp index f76f30f15c7..e6982a006ac 100644 --- a/src/meta/processors/zone/UpdateZoneProcessor.cpp +++ b/src/meta/processors/zone/UpdateZoneProcessor.cpp @@ -117,6 +117,46 @@ void DropHostFromZoneProcessor::process(const cpp2::DropHostFromZoneReq& req) { return; } + const auto& spacePrefix = MetaServiceUtils::spacePrefix(); + auto spaceIterRet = doPrefix(spacePrefix); + auto spaceIter = nebula::value(spaceIterRet).get(); + while (spaceIter->valid()) { + auto spaceId = MetaServiceUtils::spaceId(spaceIter->key()); + auto spaceKey = MetaServiceUtils::spaceKey(spaceId); + auto ret = doGet(spaceKey); + if (!nebula::ok(ret)) { + auto retCode = nebula::error(ret); + LOG(ERROR) << "Get Space " << spaceId + << " error: " << apache::thrift::util::enumNameSafe(retCode); + handleErrorCode(retCode); + onFinished(); + return; + } + + auto properties = MetaServiceUtils::parseSpace(nebula::value(ret)); + if (!properties.group_name_ref().has_value()) { + spaceIter->next(); + continue; + } + + const auto& partPrefix = MetaServiceUtils::partPrefix(spaceId); + auto partIterRet = doPrefix(partPrefix); + auto partIter = nebula::value(partIterRet).get(); + while (partIter->valid()) { + auto partHosts = MetaServiceUtils::parsePartVal(partIter->val()); + for (auto& h : partHosts) { + if (h == req.get_node()) { + LOG(ERROR) << h << " is related with partition"; + handleErrorCode(nebula::cpp2::ErrorCode::E_CONFLICT); + onFinished(); + return; + } + } + partIter->next(); + } + spaceIter->next(); + } + auto hosts = MetaServiceUtils::parseZoneHosts(std::move(nebula::value(zoneValueRet))); auto host = req.get_node(); auto iter = std::find(hosts.begin(), hosts.end(), host); diff --git a/src/meta/test/GroupZoneTest.cpp b/src/meta/test/GroupZoneTest.cpp index aff85e91db5..225424e217c 100644 --- a/src/meta/test/GroupZoneTest.cpp +++ b/src/meta/test/GroupZoneTest.cpp @@ -8,6 +8,8 @@ #include "common/base/Base.h" #include "common/fs/TempDir.h" +#include "meta/processors/parts/CreateSpaceProcessor.h" +#include "meta/processors/parts/DropSpaceProcessor.h" #include "meta/processors/zone/AddGroupProcessor.h" #include "meta/processors/zone/AddZoneProcessor.h" #include "meta/processors/zone/DropGroupProcessor.h" @@ -255,18 +257,6 @@ TEST(GroupAndZoneTest, GroupAndZoneTest) { auto resp = std::move(f).get(); ASSERT_EQ(nebula::cpp2::ErrorCode::E_INVALID_PARM, resp.get_code()); } -// Drop host from zone -{ - cpp2::DropHostFromZoneReq req; - req.set_zone_name("zone_0"); - HostAddr node{"12", 12}; - req.set_node(std::move(node)); - auto* processor = DropHostFromZoneProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); -} // Drop host from zone which zone is not exist { cpp2::DropHostFromZoneReq req; @@ -303,6 +293,53 @@ TEST(GroupAndZoneTest, GroupAndZoneTest) { auto resp = std::move(f).get(); ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); } +{ + cpp2::SpaceDesc properties; + properties.set_space_name("space"); + properties.set_partition_num(12); + properties.set_replica_factor(3); + properties.set_group_name("group_0"); + cpp2::CreateSpaceReq req; + req.set_properties(std::move(properties)); + auto* processor = CreateSpaceProcessor::instance(kv.get()); + auto f = processor->getFuture(); + processor->process(req); + auto resp = std::move(f).get(); + ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); +} +// Drop host from zone +{ + cpp2::DropHostFromZoneReq req; + req.set_zone_name("zone_0"); + HostAddr node{"12", 12}; + req.set_node(std::move(node)); + auto* processor = DropHostFromZoneProcessor::instance(kv.get()); + auto f = processor->getFuture(); + processor->process(req); + auto resp = std::move(f).get(); + ASSERT_EQ(nebula::cpp2::ErrorCode::E_CONFLICT, resp.get_code()); +} +{ + cpp2::DropSpaceReq req; + req.set_space_name("space"); + req.set_if_exists(false); + auto* processor = DropSpaceProcessor::instance(kv.get()); + auto f = processor->getFuture(); + processor->process(req); + auto resp = std::move(f).get(); + ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); +} +{ + cpp2::DropHostFromZoneReq req; + req.set_zone_name("zone_0"); + HostAddr node{"12", 12}; + req.set_node(std::move(node)); + auto* processor = DropHostFromZoneProcessor::instance(kv.get()); + auto f = processor->getFuture(); + processor->process(req); + auto resp = std::move(f).get(); + ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); +} // Add Group which zone not exist { LOG(INFO) << "Add Group which zone not exist"; @@ -574,6 +611,69 @@ TEST(GroupAndZoneTest, GroupAndZoneTest) { } } // namespace nebula +TEST(GroupAndZoneTest, DropHostAndZoneTest) { + fs::TempDir rootPath("/tmp/DropHostAndZoneTest.XXXXXX"); + + // Prepare + std::unique_ptr kv(MockCluster::initMetaKV(rootPath.path())); + std::vector addresses; + for (int32_t i = 0; i < 1; i++) { + addresses.emplace_back(std::to_string(i), i); + } + TestUtils::registerHB(kv.get(), addresses); + { + cpp2::ListHostsReq req; + req.set_type(cpp2::ListHostType::STORAGE); + auto* processor = ListHostsProcessor::instance(kv.get()); + auto f = processor->getFuture(); + processor->process(req); + auto resp = std::move(f).get(); + ASSERT_EQ(1, (*resp.hosts_ref()).size()); + for (auto i = 0; i < 1; i++) { + ASSERT_EQ(std::to_string(i), (*resp.hosts_ref())[i].get_hostAddr().host); + ASSERT_EQ(i, (*resp.hosts_ref())[i].get_hostAddr().port); + ASSERT_EQ(cpp2::HostStatus::ONLINE, (*resp.hosts_ref())[i].get_status()); + } + } + + // Add Zone + { + std::vector nodes; + nodes.emplace_back("0", 0); + cpp2::AddZoneReq req; + req.set_zone_name("zone_0"); + req.set_nodes(std::move(nodes)); + auto* processor = AddZoneProcessor::instance(kv.get()); + auto f = processor->getFuture(); + processor->process(req); + auto resp = std::move(f).get(); + ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); + } + // Drop host from zone + { + cpp2::DropHostFromZoneReq req; + req.set_zone_name("zone_0"); + HostAddr node{"0", 0}; + req.set_node(std::move(node)); + auto* processor = DropHostFromZoneProcessor::instance(kv.get()); + auto f = processor->getFuture(); + processor->process(req); + auto resp = std::move(f).get(); + ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); + } + // List Zones + { + cpp2::ListZonesReq req; + auto* processor = ListZonesProcessor::instance(kv.get()); + auto f = processor->getFuture(); + processor->process(req); + auto resp = std::move(f).get(); + ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); + ASSERT_EQ(1, (*resp.zones_ref()).size()); + ASSERT_EQ("zone_0", (*resp.zones_ref())[0].get_zone_name()); + } +} + } // namespace meta } // namespace nebula diff --git a/src/meta/test/JobManagerTest.cpp b/src/meta/test/JobManagerTest.cpp index 0364772c875..f2832d8ac3d 100644 --- a/src/meta/test/JobManagerTest.cpp +++ b/src/meta/test/JobManagerTest.cpp @@ -253,7 +253,7 @@ TEST_F(JobManagerTest, showJobs) { jd2.setStatus(cpp2::JobStatus::FAILED); jobMgr->addJob(jd2, adminClient_.get()); - auto statusOrShowResult = jobMgr->showJobs(); + auto statusOrShowResult = jobMgr->showJobs(paras1.back()); LOG(INFO) << "after show jobs"; ASSERT_TRUE(nebula::ok(statusOrShowResult)); @@ -273,6 +273,34 @@ TEST_F(JobManagerTest, showJobs) { ASSERT_EQ(jobs[0].get_stop_time(), jd2.stopTime_); } +TEST_F(JobManagerTest, showJobsFromMultiSpace) { + std::vector paras1{"test_space"}; + JobDescription jd1(1, cpp2::AdminCmd::COMPACT, paras1); + jd1.setStatus(cpp2::JobStatus::RUNNING); + jd1.setStatus(cpp2::JobStatus::FINISHED); + jobMgr->addJob(jd1, adminClient_.get()); + + std::vector paras2{"test_space2"}; + JobDescription jd2(2, cpp2::AdminCmd::FLUSH, paras2); + jd2.setStatus(cpp2::JobStatus::RUNNING); + jd2.setStatus(cpp2::JobStatus::FAILED); + jobMgr->addJob(jd2, adminClient_.get()); + + auto statusOrShowResult = jobMgr->showJobs(paras2.back()); + LOG(INFO) << "after show jobs"; + ASSERT_TRUE(nebula::ok(statusOrShowResult)); + + auto& jobs = nebula::value(statusOrShowResult); + ASSERT_EQ(jobs.size(), 1); + + ASSERT_EQ(jobs[0].get_id(), jd2.id_); + ASSERT_EQ(jobs[0].get_cmd(), cpp2::AdminCmd::FLUSH); + ASSERT_EQ(jobs[0].get_paras()[0], "test_space2"); + ASSERT_EQ(jobs[0].get_status(), cpp2::JobStatus::FAILED); + ASSERT_EQ(jobs[0].get_start_time(), jd2.startTime_); + ASSERT_EQ(jobs[0].get_stop_time(), jd2.stopTime_); +} + HostAddr toHost(std::string strIp) { return HostAddr(strIp, 0); } TEST_F(JobManagerTest, showJob) { @@ -300,7 +328,7 @@ TEST_F(JobManagerTest, showJob) { jobMgr->save(td2.taskKey(), td2.taskVal()); LOG(INFO) << "before jobMgr->showJob"; - auto showResult = jobMgr->showJob(iJob); + auto showResult = jobMgr->showJob(iJob, paras.back()); LOG(INFO) << "after jobMgr->showJob"; ASSERT_TRUE(nebula::ok(showResult)); auto& jobs = nebula::value(showResult).first; @@ -328,16 +356,48 @@ TEST_F(JobManagerTest, showJob) { ASSERT_EQ(tasks[1].get_stop_time(), td2.stopTime_); } +TEST_F(JobManagerTest, showJobInOtherSpace) { + std::vector paras{"test_space"}; + + JobDescription jd(1, cpp2::AdminCmd::COMPACT, paras); + jd.setStatus(cpp2::JobStatus::RUNNING); + jd.setStatus(cpp2::JobStatus::FINISHED); + jobMgr->addJob(jd, adminClient_.get()); + + int32_t iJob = jd.id_; + int32_t task1 = 0; + auto host1 = toHost("127.0.0.1"); + + TaskDescription td1(iJob, task1, host1); + td1.setStatus(cpp2::JobStatus::RUNNING); + td1.setStatus(cpp2::JobStatus::FINISHED); + jobMgr->save(td1.taskKey(), td1.taskVal()); + + int32_t task2 = 1; + auto host2 = toHost("127.0.0.1"); + TaskDescription td2(iJob, task2, host2); + td2.setStatus(cpp2::JobStatus::RUNNING); + td2.setStatus(cpp2::JobStatus::FAILED); + jobMgr->save(td2.taskKey(), td2.taskVal()); + + LOG(INFO) << "before jobMgr->showJob"; + std::string chosenSpace = "spaceWithNoJob"; + auto showResult = jobMgr->showJob(iJob, chosenSpace); + LOG(INFO) << "after jobMgr->showJob"; + ASSERT_TRUE(!nebula::ok(showResult)); +} + TEST_F(JobManagerTest, recoverJob) { // set status to prevent running the job since AdminClient is a injector jobMgr->status_ = JobManager::JbmgrStatus::NOT_START; + auto spaceName = "test_space"; int32_t nJob = 3; for (auto i = 0; i != nJob; ++i) { - JobDescription jd(i, cpp2::AdminCmd::FLUSH, {"test_space"}); + JobDescription jd(i, cpp2::AdminCmd::FLUSH, {spaceName}); jobMgr->save(jd.jobKey(), jd.jobVal()); } - auto nJobRecovered = jobMgr->recoverJob(); + auto nJobRecovered = jobMgr->recoverJob(spaceName); ASSERT_EQ(nebula::value(nJobRecovered), 1); } diff --git a/src/mock/AdHocSchemaManager.cpp b/src/mock/AdHocSchemaManager.cpp index 34aef5fe15e..70513cb4cdb 100644 --- a/src/mock/AdHocSchemaManager.cpp +++ b/src/mock/AdHocSchemaManager.cpp @@ -198,7 +198,9 @@ StatusOr AdHocSchemaManager::toGraphSpaceID(folly::StringPiece spa } } -StatusOr AdHocSchemaManager::toGraphSpaceName(GraphSpaceID) { return "default_space"; } +StatusOr AdHocSchemaManager::toGraphSpaceName(GraphSpaceID space) { + return std::to_string(space); +} StatusOr AdHocSchemaManager::toTagID(GraphSpaceID space, folly::StringPiece tagName) { UNUSED(space); diff --git a/src/parser/AdminSentences.cpp b/src/parser/AdminSentences.cpp index c0134904e8f..0b15b7961c2 100644 --- a/src/parser/AdminSentences.cpp +++ b/src/parser/AdminSentences.cpp @@ -100,6 +100,11 @@ std::string CreateSpaceSentence::toString() const { return buf; } +std::string CreateSpaceAsSentence::toString() const { + auto buf = folly::sformat("CREATE SPACE {} AS {}", *newSpaceName_, *oldSpaceName_); + return buf; +} + std::string DropSpaceSentence::toString() const { return folly::stringPrintf("DROP SPACE %s", spaceName_.get()->c_str()); } diff --git a/src/parser/AdminSentences.h b/src/parser/AdminSentences.h index 1c0836ef353..ecd4b12a0f9 100644 --- a/src/parser/AdminSentences.h +++ b/src/parser/AdminSentences.h @@ -315,6 +315,26 @@ class CreateSpaceSentence final : public CreateSentence { std::unique_ptr comment_; }; +class CreateSpaceAsSentence final : public CreateSentence { + public: + CreateSpaceAsSentence(std::string* oldSpace, std::string* newSpace, bool ifNotExist) + : CreateSentence(ifNotExist) { + oldSpaceName_.reset(oldSpace); + newSpaceName_.reset(newSpace); + kind_ = Kind::kCreateSpaceAs; + } + + std::string getOldSpaceName() const { return *oldSpaceName_; } + + std::string getNewSpaceName() const { return *newSpaceName_; } + + std::string toString() const override; + + private: + std::unique_ptr newSpaceName_; + std::unique_ptr oldSpaceName_; +}; + class DropSpaceSentence final : public DropSentence { public: DropSpaceSentence(std::string* spaceName, bool ifExist) : DropSentence(ifExist) { diff --git a/src/parser/Sentence.h b/src/parser/Sentence.h index bb4b3298bd6..845d2e6c853 100644 --- a/src/parser/Sentence.h +++ b/src/parser/Sentence.h @@ -85,6 +85,7 @@ class Sentence { kDeleteEdges, kLookup, kCreateSpace, + kCreateSpaceAs, kDropSpace, kDescribeSpace, kYield, diff --git a/src/parser/TraverseSentences.h b/src/parser/TraverseSentences.h index b5ac8df07d4..5dcfd2d5a38 100644 --- a/src/parser/TraverseSentences.h +++ b/src/parser/TraverseSentences.h @@ -243,9 +243,7 @@ class FetchVerticesSentence final : public Sentence { yieldClause_.reset(clause); } - bool isAllTagProps() { return tags_->empty(); } - - const NameLabelList* tags() const { return tags_.get(); } + const NameLabelList* tags() const { return tags_->empty() ? nullptr : tags_.get(); } const VerticesClause* vertices() const { return vertices_.get(); } diff --git a/src/parser/parser.yy b/src/parser/parser.yy index 54888f2fec5..6022c7d917e 100644 --- a/src/parser/parser.yy +++ b/src/parser/parser.yy @@ -1048,7 +1048,17 @@ opt_argument_list ; argument_list - : expression { + : KW_VERTEX { + $$ = ArgumentList::make(qctx->objPool()); + Expression* arg = VertexExpression::make(qctx->objPool()); + $$->addArgument(arg); + } + | KW_EDGE { + $$ = ArgumentList::make(qctx->objPool()); + Expression *arg = EdgeExpression::make(qctx->objPool()); + $$->addArgument(arg); + } + | expression { $$ = ArgumentList::make(qctx->objPool()); Expression* arg = nullptr; arg = $1; @@ -3154,6 +3164,10 @@ create_space_sentence sentence->setComment($10); $$ = sentence; } + | KW_CREATE KW_SPACE opt_if_not_exists name_label KW_AS name_label { + auto sentence = new CreateSpaceAsSentence($6, $4, $3); + $$ = sentence; + } ; describe_space_sentence diff --git a/src/parser/test/CMakeLists.txt b/src/parser/test/CMakeLists.txt index 0280f76d346..260722922d4 100644 --- a/src/parser/test/CMakeLists.txt +++ b/src/parser/test/CMakeLists.txt @@ -42,6 +42,7 @@ set(PARSER_TEST_LIBS $ $ $ + $ ) nebula_add_test( diff --git a/src/storage/CMakeLists.txt b/src/storage/CMakeLists.txt index 8589e5e6a83..78782fa3d40 100644 --- a/src/storage/CMakeLists.txt +++ b/src/storage/CMakeLists.txt @@ -62,6 +62,7 @@ nebula_add_library( http/StorageHttpDownloadHandler.cpp http/StorageHttpAdminHandler.cpp http/StorageHttpStatsHandler.cpp + http/StorageHttpPropertyHandler.cpp ) nebula_add_library( diff --git a/src/storage/CommonUtils.h b/src/storage/CommonUtils.h index 52f57b0ea47..4478b95346d 100644 --- a/src/storage/CommonUtils.h +++ b/src/storage/CommonUtils.h @@ -17,6 +17,7 @@ #include "common/stats/StatsManager.h" #include "common/utils/MemoryLockWrapper.h" #include "interface/gen-cpp2/storage_types.h" +#include "kvstore/KVEngine.h" #include "kvstore/KVStore.h" namespace nebula { @@ -77,6 +78,8 @@ class StorageEnv { TransactionManager* txnMan_{nullptr}; std::unique_ptr verticesML_{nullptr}; std::unique_ptr edgesML_{nullptr}; + std::unique_ptr adminStore_{nullptr}; + int32_t adminSeqId_{0}; IndexState getIndexState(GraphSpaceID space, PartitionID part) { auto key = std::make_tuple(space, part); diff --git a/src/storage/StorageServer.cpp b/src/storage/StorageServer.cpp index 687448f3077..cd92fe8e953 100644 --- a/src/storage/StorageServer.cpp +++ b/src/storage/StorageServer.cpp @@ -13,9 +13,11 @@ #include "common/meta/ServerBasedIndexManager.h" #include "common/meta/ServerBasedSchemaManager.h" #include "common/network/NetworkUtils.h" +#include "common/ssl/SSLConfig.h" #include "common/thread/GenericThreadPool.h" #include "common/utils/Utils.h" #include "kvstore/PartManager.h" +#include "kvstore/RocksEngine.h" #include "storage/BaseProcessor.h" #include "storage/CompactionFilter.h" #include "storage/GraphStorageServiceHandler.h" @@ -25,6 +27,7 @@ #include "storage/http/StorageHttpAdminHandler.h" #include "storage/http/StorageHttpDownloadHandler.h" #include "storage/http/StorageHttpIngestHandler.h" +#include "storage/http/StorageHttpPropertyHandler.h" #include "storage/http/StorageHttpStatsHandler.h" #include "storage/transaction/TransactionManager.h" #include "version/Version.h" @@ -104,11 +107,39 @@ bool StorageServer::initWebService() { router.get("/rocksdb_stats").handler([](web::PathParams&&) { return new storage::StorageHttpStatsHandler(); }); + router.get("/rocksdb_property").handler([this](web::PathParams&&) { + return new storage::StorageHttpPropertyHandler(schemaMan_.get(), kvstore_.get()); + }); auto status = webSvc_->start(); return status.ok(); } +std::unique_ptr StorageServer::getAdminStoreInstance() { + int32_t vIdLen = NebulaKeyUtils::adminTaskKey(-1, 0, 0).size(); + std::unique_ptr re( + new kvstore::RocksEngine(0, vIdLen, dataPaths_[0], walPath_)); + return re; +} + +int32_t StorageServer::getAdminStoreSeqId() { + std::string key = NebulaKeyUtils::adminTaskKey(-1, 0, 0); + std::string val; + nebula::cpp2::ErrorCode rc = env_->adminStore_->get(key, &val); + int32_t curSeqId = 1; + if (rc == nebula::cpp2::ErrorCode::SUCCEEDED) { + int32_t lastSeqId = *reinterpret_cast(val.data()); + curSeqId = lastSeqId + 1; + } + std::string newVal; + newVal.append(reinterpret_cast(&curSeqId), sizeof(int32_t)); + auto ret = env_->adminStore_->put(key, newVal); + if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(FATAL) << "Write put in admin-storage seq id " << curSeqId << " failed."; + } + return curSeqId; +} + bool StorageServer::start() { ioThreadPool_ = std::make_shared(FLAGS_num_io_threads); workers_ = apache::thrift::concurrency::PriorityThreadManager::newPriorityThreadManager( @@ -153,12 +184,6 @@ bool StorageServer::start() { return false; } - taskMgr_ = AdminTaskManager::instance(); - if (!taskMgr_->init()) { - LOG(ERROR) << "Init task manager failed!"; - return false; - } - env_ = std::make_unique(); env_->kvstore_ = kvstore_.get(); env_->indexMan_ = indexMan_.get(); @@ -171,6 +196,13 @@ bool StorageServer::start() { env_->verticesML_ = std::make_unique(); env_->edgesML_ = std::make_unique(); + env_->adminStore_ = getAdminStoreInstance(); + env_->adminSeqId_ = getAdminStoreSeqId(); + taskMgr_ = AdminTaskManager::instance(env_.get()); + if (!taskMgr_->init()) { + LOG(ERROR) << "Init task manager failed!"; + return false; + } storageThread_.reset(new std::thread([this] { try { @@ -182,6 +214,9 @@ bool StorageServer::start() { storageServer_->setThreadManager(workers_); storageServer_->setStopWorkersOnStopListening(false); storageServer_->setInterface(std::move(handler)); + if (FLAGS_enable_ssl) { + storageServer_->setSSLConfig(nebula::sslContextConfig()); + } ServiceStatus expected = STATUS_UNINITIALIZED; if (!storageSvcStatus_.compare_exchange_strong(expected, STATUS_RUNNING)) { @@ -208,6 +243,9 @@ bool StorageServer::start() { adminServer_->setThreadManager(workers_); adminServer_->setStopWorkersOnStopListening(false); adminServer_->setInterface(std::move(handler)); + if (FLAGS_enable_ssl) { + adminServer_->setSSLConfig(nebula::sslContextConfig()); + } ServiceStatus expected = STATUS_UNINITIALIZED; if (!adminSvcStatus_.compare_exchange_strong(expected, STATUS_RUNNING)) { @@ -234,6 +272,9 @@ bool StorageServer::start() { internalStorageServer_->setThreadManager(workers_); internalStorageServer_->setStopWorkersOnStopListening(false); internalStorageServer_->setInterface(std::move(handler)); + if (FLAGS_enable_ssl) { + internalStorageServer_->setSSLConfig(nebula::sslContextConfig()); + } internalStorageSvcStatus_.store(STATUS_RUNNING); LOG(INFO) << "The internal storage service start(same with admin) on " << internalAddr; diff --git a/src/storage/StorageServer.h b/src/storage/StorageServer.h index 21d7027da08..0893bfcca42 100644 --- a/src/storage/StorageServer.h +++ b/src/storage/StorageServer.h @@ -47,6 +47,10 @@ class StorageServer final { private: std::unique_ptr getStoreInstance(); + std::unique_ptr getAdminStoreInstance(); + + int32_t getAdminStoreSeqId(); + bool initWebService(); std::shared_ptr ioThreadPool_; diff --git a/src/storage/admin/AdminTask.h b/src/storage/admin/AdminTask.h index 6576ed5c0ad..9de01721fb9 100644 --- a/src/storage/admin/AdminTask.h +++ b/src/storage/admin/AdminTask.h @@ -107,6 +107,8 @@ class AdminTask { rc_.compare_exchange_strong(suc, nebula::cpp2::ErrorCode::E_USER_CANCEL); } + meta::cpp2::AdminCmd cmdType() { return ctx_.cmd_; } + public: std::atomic unFinishedSubTask_; SubTaskQueue subtasks_; diff --git a/src/storage/admin/AdminTaskManager.cpp b/src/storage/admin/AdminTaskManager.cpp index b500cb347d4..65a831937b6 100644 --- a/src/storage/admin/AdminTaskManager.cpp +++ b/src/storage/admin/AdminTaskManager.cpp @@ -6,7 +6,13 @@ #include "storage/admin/AdminTaskManager.h" +#include +#include + +#include + #include "storage/admin/AdminTask.h" +#include "storage/admin/AdminTaskProcessor.h" DEFINE_uint32(max_concurrent_subtasks, 10, "The sub tasks could be invoked simultaneously"); @@ -25,10 +31,97 @@ bool AdminTaskManager::init() { bgThread_->addTask(&AdminTaskManager::schedule, this); shutdown_ = false; + handleUnreportedTasks(); LOG(INFO) << "exit AdminTaskManager::init()"; return true; } +void AdminTaskManager::handleUnreportedTasks() { + using futTuple = + std::tuple>>; + if (env_ == nullptr) return; + unreportedAdminThread_.reset(new std::thread([this] { + bool ifAny = true; + while (true) { + std::unique_lock lk(unreportedMutex_); + if (!ifAny) unreportedCV_.wait(lk); + ifAny = false; + std::unique_ptr iter; + auto kvRet = env_->adminStore_->scan(&iter); + if (kvRet != nebula::cpp2::ErrorCode::SUCCEEDED || iter == nullptr) continue; + std::vector keys; + std::vector futVec; + for (; iter->valid(); iter->next()) { + folly::StringPiece key = iter->key(); + int32_t seqId = *reinterpret_cast(key.data()); + if (seqId < 0) continue; + JobID jobId = *reinterpret_cast(key.data() + sizeof(int32_t)); + TaskID taskId = + *reinterpret_cast(key.data() + sizeof(int32_t) + sizeof(JobID)); + folly::StringPiece val = iter->val(); + folly::StringPiece statsVal(val.data() + sizeof(nebula::cpp2::ErrorCode), + val.size() - sizeof(nebula::cpp2::ErrorCode)); + nebula::meta::cpp2::StatsItem statsItem; + apache::thrift::CompactSerializer::deserialize(statsVal, statsItem); + nebula::meta::cpp2::JobStatus jobStatus = statsItem.get_status(); + nebula::cpp2::ErrorCode errCode = + *reinterpret_cast(val.data()); + meta::cpp2::StatsItem* pStats = nullptr; + if (errCode == nebula::cpp2::ErrorCode::SUCCEEDED) pStats = &statsItem; + LOG(INFO) << folly::sformat("reportTaskFinish(), job={}, task={}, rc={}", + jobId, + taskId, + apache::thrift::util::enumNameSafe(errCode)); + if (seqId < env_->adminSeqId_) { + if (jobStatus == nebula::meta::cpp2::JobStatus::RUNNING && pStats != nullptr) + pStats->set_status(nebula::meta::cpp2::JobStatus::FAILED); + auto fut = env_->metaClient_->reportTaskFinish(jobId, taskId, errCode, pStats); + futVec.emplace_back(std::move(jobId), std::move(taskId), std::move(key), std::move(fut)); + } else if (jobStatus != nebula::meta::cpp2::JobStatus::RUNNING) { + auto fut = env_->metaClient_->reportTaskFinish(jobId, taskId, errCode, pStats); + futVec.emplace_back(std::move(jobId), std::move(taskId), std::move(key), std::move(fut)); + } + } + for (auto& p : futVec) { + JobID jobId = std::get<0>(p); + TaskID taskId = std::get<1>(p); + std::string& key = std::get<2>(p); + auto& fut = std::get<3>(p); + auto rc = nebula::cpp2::ErrorCode::SUCCEEDED; + fut.wait(); + if (!fut.hasValue()) { + LOG(INFO) << folly::sformat( + "reportTaskFinish() got rpc error:, job={}, task={}", jobId, taskId); + ifAny = true; + continue; + } + if (!fut.value().ok()) { + LOG(INFO) << folly::sformat("reportTaskFinish() has bad status:, job={}, task={}, rc={}", + jobId, + taskId, + fut.value().status().toString()); + ifAny = true; + continue; + } + rc = fut.value().value(); + LOG(INFO) << folly::sformat("reportTaskFinish(), job={}, task={}, rc={}", + jobId, + taskId, + apache::thrift::util::enumNameSafe(rc)); + if (rc == nebula::cpp2::ErrorCode::E_LEADER_CHANGED || + rc == nebula::cpp2::ErrorCode::E_STORE_FAILURE) { + ifAny = true; + continue; + } else { + keys.emplace_back(key.data(), key.size()); + break; + } + } + env_->adminStore_->multiRemove(keys); + } + })); +} + void AdminTaskManager::addAsyncTask(std::shared_ptr task) { TaskHandle handle = std::make_pair(task->getJobId(), task->getTaskId()); auto ret = tasks_.insert(handle, task).second; @@ -49,6 +142,7 @@ nebula::cpp2::ErrorCode AdminTaskManager::cancelJob(JobID jobId) { auto handle = it->first; if (handle.first == jobId) { it->second->cancel(); + removeTaskStatus(it->second->getJobId(), it->second->getTaskId()); FLOG_INFO("task(%d, %d) cancelled", jobId, handle.second); } ++it; @@ -67,6 +161,7 @@ nebula::cpp2::ErrorCode AdminTaskManager::cancelTask(JobID jobId, TaskID taskId) ret = nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND; } else { it->second->cancel(); + removeTaskStatus(it->second->getJobId(), it->second->getTaskId()); } return ret; } @@ -85,6 +180,30 @@ void AdminTaskManager::shutdown() { LOG(INFO) << "exit AdminTaskManager::shutdown()"; } +void AdminTaskManager::saveTaskStatus(JobID jobId, + TaskID taskId, + nebula::cpp2::ErrorCode rc, + const nebula::meta::cpp2::StatsItem& result) { + if (env_ == nullptr) return; + std::string key = NebulaKeyUtils::adminTaskKey(env_->adminSeqId_, jobId, taskId); + std::string val; + val.append(reinterpret_cast(&rc), sizeof(nebula::cpp2::ErrorCode)); + std::string resVal; + apache::thrift::CompactSerializer::serialize(result, &resVal); + val.append(resVal); + auto ret = env_->adminStore_->put(key, val); + if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(FATAL) << "Write put in admin-storage job id " << jobId << "task id " << taskId + << " failed."; + } +} + +void AdminTaskManager::removeTaskStatus(JobID jobId, TaskID taskId) { + if (env_ == nullptr) return; + std::string key = NebulaKeyUtils::adminTaskKey(env_->adminSeqId_, jobId, taskId); + env_->adminStore_->remove(key); +} + // schedule void AdminTaskManager::schedule() { std::chrono::milliseconds interval{20}; // 20ms @@ -189,6 +308,8 @@ void AdminTaskManager::runSubTask(TaskHandle handle) { } } +void AdminTaskManager::notifyReporting() { unreportedCV_.notify_one(); } + bool AdminTaskManager::isFinished(JobID jobID, TaskID taskID) { auto iter = tasks_.find(std::make_pair(jobID, taskID)); // Task maybe erased when it's finished. diff --git a/src/storage/admin/AdminTaskManager.h b/src/storage/admin/AdminTaskManager.h index 5c6419164e3..70f4974c4fd 100644 --- a/src/storage/admin/AdminTaskManager.h +++ b/src/storage/admin/AdminTaskManager.h @@ -32,8 +32,9 @@ class AdminTaskManager { using TaskQueue = folly::UnboundedBlockingQueue; AdminTaskManager() = default; - static AdminTaskManager* instance() { - static AdminTaskManager sAdminTaskManager; + explicit AdminTaskManager(storage::StorageEnv* env = nullptr) : env_(env) {} + static AdminTaskManager* instance(storage::StorageEnv* env = nullptr) { + static AdminTaskManager sAdminTaskManager(env); return &sAdminTaskManager; } @@ -51,6 +52,17 @@ class AdminTaskManager { bool isFinished(JobID jobID, TaskID taskID); + void saveTaskStatus(JobID jobId, + TaskID taskId, + nebula::cpp2::ErrorCode rc, + const nebula::meta::cpp2::StatsItem& result); + + void removeTaskStatus(JobID jobId, TaskID taskId); + + void handleUnreportedTasks(); + + void notifyReporting(); + private: void schedule(); void runSubTask(TaskHandle handle); @@ -61,6 +73,10 @@ class AdminTaskManager { TaskContainer tasks_; TaskQueue taskQueue_; std::unique_ptr bgThread_; + storage::StorageEnv* env_{nullptr}; + std::unique_ptr unreportedAdminThread_; + std::mutex unreportedMutex_; + std::condition_variable unreportedCV_; }; } // namespace storage diff --git a/src/storage/admin/AdminTaskProcessor.cpp b/src/storage/admin/AdminTaskProcessor.cpp index c408cc9029e..06b1cc63965 100644 --- a/src/storage/admin/AdminTaskProcessor.cpp +++ b/src/storage/admin/AdminTaskProcessor.cpp @@ -13,57 +13,22 @@ namespace nebula { namespace storage { - void AdminTaskProcessor::process(const cpp2::AddAdminTaskRequest& req) { auto taskManager = AdminTaskManager::instance(); - auto cb = [env = env_, jobId = req.get_job_id(), taskId = req.get_task_id()]( + auto cb = [taskManager, jobId = req.get_job_id(), taskId = req.get_task_id()]( nebula::cpp2::ErrorCode errCode, nebula::meta::cpp2::StatsItem& result) { - meta::cpp2::StatsItem* pStats = nullptr; - if (errCode == nebula::cpp2::ErrorCode::SUCCEEDED && - *result.status_ref() == nebula::meta::cpp2::JobStatus::FINISHED) { - pStats = &result; - } - - LOG(INFO) << folly::sformat("reportTaskFinish(), job={}, task={}, rc={}", - jobId, - taskId, - apache::thrift::util::enumNameSafe(errCode)); - auto maxRetry = 5; - auto retry = 0; - while (retry++ < maxRetry) { - auto rc = nebula::cpp2::ErrorCode::SUCCEEDED; - auto fut = env->metaClient_->reportTaskFinish(jobId, taskId, errCode, pStats); - fut.wait(); - if (!fut.hasValue()) { - LOG(INFO) << folly::sformat( - "reportTaskFinish() got rpc error:, job={}, task={}", jobId, taskId); - continue; - } - if (!fut.value().ok()) { - LOG(INFO) << folly::sformat("reportTaskFinish() has bad status:, job={}, task={}, rc={}", - jobId, - taskId, - fut.value().status().toString()); - break; - } - rc = fut.value().value(); - LOG(INFO) << folly::sformat("reportTaskFinish(), job={}, task={}, rc={}", - jobId, - taskId, - apache::thrift::util::enumNameSafe(rc)); - if (rc == nebula::cpp2::ErrorCode::E_LEADER_CHANGED || - rc == nebula::cpp2::ErrorCode::E_STORE_FAILURE) { - continue; - } else { - break; - } - } + taskManager->saveTaskStatus(jobId, taskId, errCode, result); + taskManager->notifyReporting(); }; TaskContext ctx(req, std::move(cb)); auto task = AdminTaskFactory::createAdminTask(env_, std::move(ctx)); if (task) { + nebula::meta::cpp2::StatsItem statsItem; + statsItem.set_status(nebula::meta::cpp2::JobStatus::RUNNING); + taskManager->saveTaskStatus( + ctx.jobId_, ctx.taskId_, nebula::cpp2::ErrorCode::E_TASK_EXECUTION_FAILED, statsItem); taskManager->addAsyncTask(task); } else { cpp2::PartitionResult thriftRet; diff --git a/src/storage/admin/RebuildIndexTask.cpp b/src/storage/admin/RebuildIndexTask.cpp index aee855a8cad..1f394c316d8 100644 --- a/src/storage/admin/RebuildIndexTask.cpp +++ b/src/storage/admin/RebuildIndexTask.cpp @@ -23,7 +23,7 @@ RebuildIndexTask::RebuildIndexTask(StorageEnv* env, TaskContext&& ctx) // 1Mb (512 * 2 peers). Muliplied by the subtasks concurrency, the total send/recv traffic will be // 10Mb, which is non-trival. LOG(INFO) << "Rebuild index task is rate limited to " << FLAGS_rebuild_index_part_rate_limit - << " for each subtask"; + << " for each subtask by default"; } ErrorOr> RebuildIndexTask::genSubTasks() { @@ -78,8 +78,7 @@ ErrorOr> RebuildIndexTask::ge nebula::cpp2::ErrorCode RebuildIndexTask::invoke(GraphSpaceID space, PartitionID part, const IndexItems& items) { - auto rateLimiter = std::make_unique(FLAGS_rebuild_index_part_rate_limit, - FLAGS_rebuild_index_part_rate_limit); + auto rateLimiter = std::make_unique(); // TaskMananger will make sure that there won't be cocurrent invoke of a given part auto result = removeLegacyLogs(space, part); if (result != nebula::cpp2::ErrorCode::SUCCEEDED) { @@ -221,7 +220,9 @@ nebula::cpp2::ErrorCode RebuildIndexTask::writeData(GraphSpaceID space, kvstore::RateLimiter* rateLimiter) { folly::Baton baton; auto result = nebula::cpp2::ErrorCode::SUCCEEDED; - rateLimiter->consume(batchSize); + rateLimiter->consume(static_cast(batchSize), // toConsume + static_cast(FLAGS_rebuild_index_part_rate_limit), // rate + static_cast(FLAGS_rebuild_index_part_rate_limit)); // burstSize env_->kvstore_->asyncMultiPut( space, part, std::move(data), [&result, &baton](nebula::cpp2::ErrorCode code) { if (code != nebula::cpp2::ErrorCode::SUCCEEDED) { @@ -240,7 +241,9 @@ nebula::cpp2::ErrorCode RebuildIndexTask::writeOperation(GraphSpaceID space, folly::Baton baton; auto result = nebula::cpp2::ErrorCode::SUCCEEDED; auto encoded = encodeBatchValue(batchHolder->getBatch()); - rateLimiter->consume(batchHolder->size()); + rateLimiter->consume(static_cast(batchHolder->size()), // toConsume + static_cast(FLAGS_rebuild_index_part_rate_limit), // rate + static_cast(FLAGS_rebuild_index_part_rate_limit)); // burstSize env_->kvstore_->asyncAppendBatch( space, part, std::move(encoded), [&result, &baton](nebula::cpp2::ErrorCode code) { if (code != nebula::cpp2::ErrorCode::SUCCEEDED) { diff --git a/src/storage/exec/GetNeighborsNode.h b/src/storage/exec/GetNeighborsNode.h index b3f0eab036f..96c4557810a 100644 --- a/src/storage/exec/GetNeighborsNode.h +++ b/src/storage/exec/GetNeighborsNode.h @@ -160,7 +160,7 @@ class GetNeighborsSampleNode : public GetNeighborsNode { } RowReaderWrapper reader; - auto samples = std::move(*sampler_).samples(); + auto samples = sampler_->samples(); for (auto& sample : samples) { auto columnIdx = std::get<4>(sample); // add edge prop value to the target column diff --git a/src/storage/exec/IndexEdgeNode.h b/src/storage/exec/IndexEdgeNode.h index 6ce73afe1eb..f9daabb2aef 100644 --- a/src/storage/exec/IndexEdgeNode.h +++ b/src/storage/exec/IndexEdgeNode.h @@ -22,7 +22,7 @@ class IndexEdgeNode final : public RelNode { IndexScanNode* indexScanNode, const std::vector>& schemas, const std::string& schemaName, - int64_t limit) + int64_t limit = -1) : context_(context), indexScanNode_(indexScanNode), schemas_(schemas), @@ -42,11 +42,7 @@ class IndexEdgeNode final : public RelNode { data_.clear(); std::vector edges; auto* iter = static_cast(indexScanNode_->iterator()); - int64_t count = 0; while (iter && iter->valid()) { - if (limit_ > -1 && count++ == limit_) { - break; - } if (context_->isPlanKilled()) { return nebula::cpp2::ErrorCode::E_PLAN_IS_KILLED; } @@ -66,6 +62,7 @@ class IndexEdgeNode final : public RelNode { edges.emplace_back(std::move(edge)); iter->next(); } + int64_t count = 0; for (const auto& edge : edges) { auto key = NebulaKeyUtils::edgeKey(context_->vIdLen(), partId, @@ -82,6 +79,9 @@ class IndexEdgeNode final : public RelNode { } else { return ret; } + if (limit_ > 0 && ++count >= limit_) { + break; + } } return nebula::cpp2::ErrorCode::SUCCEEDED; } diff --git a/src/storage/exec/IndexFilterNode.h b/src/storage/exec/IndexFilterNode.h index ef5901d14fd..23d9620a4fe 100644 --- a/src/storage/exec/IndexFilterNode.h +++ b/src/storage/exec/IndexFilterNode.h @@ -26,14 +26,16 @@ class IndexFilterNode final : public RelNode { // data anymore. IndexFilterNode(RuntimeContext* context, IndexScanNode* indexScanNode, - StorageExpressionContext* exprCtx = nullptr, - Expression* exp = nullptr, - bool isEdge = false) + StorageExpressionContext* exprCtx, + Expression* exp, + bool isEdge, + int64_t limit = -1) : context_(context), indexScanNode_(indexScanNode), exprCtx_(exprCtx), filterExp_(exp), - isEdge_(isEdge) { + isEdge_(isEdge), + limit_(limit) { evalExprByIndex_ = true; RelNode::name_ = "IndexFilterNode"; } @@ -42,9 +44,14 @@ class IndexFilterNode final : public RelNode { // need to read data. IndexFilterNode(RuntimeContext* context, IndexEdgeNode* indexEdgeNode, - StorageExpressionContext* exprCtx = nullptr, - Expression* exp = nullptr) - : context_(context), indexEdgeNode_(indexEdgeNode), exprCtx_(exprCtx), filterExp_(exp) { + StorageExpressionContext* exprCtx, + Expression* exp, + int64_t limit = -1) + : context_(context), + indexEdgeNode_(indexEdgeNode), + exprCtx_(exprCtx), + filterExp_(exp), + limit_(limit) { evalExprByIndex_ = false; isEdge_ = true; } @@ -53,9 +60,14 @@ class IndexFilterNode final : public RelNode { // need to read data. IndexFilterNode(RuntimeContext* context, IndexVertexNode* indexVertexNode, - StorageExpressionContext* exprCtx = nullptr, - Expression* exp = nullptr) - : context_(context), indexVertexNode_(indexVertexNode), exprCtx_(exprCtx), filterExp_(exp) { + StorageExpressionContext* exprCtx, + Expression* exp, + int64_t limit = -1) + : context_(context), + indexVertexNode_(indexVertexNode), + exprCtx_(exprCtx), + filterExp_(exp), + limit_(limit) { evalExprByIndex_ = false; isEdge_ = false; } @@ -74,6 +86,7 @@ class IndexFilterNode final : public RelNode { } else { data = indexVertexNode_->moveData(); } + int64_t count = 0; for (const auto& k : data) { if (context_->isPlanKilled()) { return nebula::cpp2::ErrorCode::E_PLAN_IS_KILLED; @@ -81,6 +94,7 @@ class IndexFilterNode final : public RelNode { if (evalExprByIndex_) { if (check(k.first)) { data_.emplace_back(k.first, k.second); + count++; } } else { const auto& schemas = @@ -91,8 +105,12 @@ class IndexFilterNode final : public RelNode { } if (check(reader.get(), k.first)) { data_.emplace_back(k.first, k.second); + count++; } } + if (limit_ > 0 && count >= limit_) { + break; + } } return nebula::cpp2::ErrorCode::SUCCEEDED; } @@ -143,6 +161,7 @@ class IndexFilterNode final : public RelNode { Expression* filterExp_; bool isEdge_; bool evalExprByIndex_; + int64_t limit_; std::vector data_{}; }; diff --git a/src/storage/exec/IndexScanNode.h b/src/storage/exec/IndexScanNode.h index 3576b73a57b..5ca22502f91 100644 --- a/src/storage/exec/IndexScanNode.h +++ b/src/storage/exec/IndexScanNode.h @@ -21,7 +21,7 @@ class IndexScanNode : public RelNode { IndexScanNode(RuntimeContext* context, IndexID indexId, std::vector columnHints, - int64_t limit) + int64_t limit = -1) : context_(context), indexId_(indexId), columnHints_(std::move(columnHints)), limit_(limit) { /** * columnHints's elements are {scanType = PREFIX|RANGE; beginStr; endStr}, @@ -74,9 +74,6 @@ class IndexScanNode : public RelNode { data_.clear(); int64_t count = 0; while (!!iter_ && iter_->valid()) { - if (limit_ > -1 && count++ == limit_) { - break; - } if (context_->isPlanKilled()) { return {}; } @@ -89,6 +86,9 @@ class IndexScanNode : public RelNode { } } data_.emplace_back(iter_->key(), ""); + if (limit_ > 0 && ++count >= limit_) { + break; + } iter_->next(); } return std::move(data_); diff --git a/src/storage/exec/IndexVertexNode.h b/src/storage/exec/IndexVertexNode.h index c6a4ab17719..7f0eff653db 100644 --- a/src/storage/exec/IndexVertexNode.h +++ b/src/storage/exec/IndexVertexNode.h @@ -22,7 +22,7 @@ class IndexVertexNode final : public RelNode { IndexScanNode* indexScanNode, const std::vector>& schemas, const std::string& schemaName, - int64_t limit) + int64_t limit = -1) : context_(context), indexScanNode_(indexScanNode), schemas_(schemas), @@ -42,11 +42,8 @@ class IndexVertexNode final : public RelNode { data_.clear(); std::vector vids; auto* iter = static_cast(indexScanNode_->iterator()); - int64_t count = 0; + while (iter && iter->valid()) { - if (limit_ > -1 && count++ == limit_) { - break; - } if (context_->isPlanKilled()) { return nebula::cpp2::ErrorCode::E_PLAN_IS_KILLED; } @@ -61,6 +58,7 @@ class IndexVertexNode final : public RelNode { vids.emplace_back(iter->vId()); iter->next(); } + int64_t count = 0; for (const auto& vId : vids) { VLOG(1) << "partId " << partId << ", vId " << vId << ", tagId " << context_->tagId_; auto key = NebulaKeyUtils::vertexKey(context_->vIdLen(), partId, vId, context_->tagId_); @@ -73,6 +71,9 @@ class IndexVertexNode final : public RelNode { } else { return ret; } + if (limit_ > 0 && ++count >= limit_) { + break; + } } return nebula::cpp2::ErrorCode::SUCCEEDED; } diff --git a/src/storage/http/StorageHttpPropertyHandler.cpp b/src/storage/http/StorageHttpPropertyHandler.cpp new file mode 100644 index 00000000000..838a0efa90b --- /dev/null +++ b/src/storage/http/StorageHttpPropertyHandler.cpp @@ -0,0 +1,105 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License, + * attached with Common Clause Condition 1.0, found in the LICENSES directory. + */ + +#include "storage/http/StorageHttpPropertyHandler.h" + +#include +#include +#include + +#include "common/base/Base.h" + +namespace nebula { +namespace storage { + +using proxygen::HTTPMessage; +using proxygen::HTTPMethod; +using proxygen::ProxygenError; +using proxygen::ResponseBuilder; +using proxygen::UpgradeProtocol; + +void StorageHttpPropertyHandler::onRequest(std::unique_ptr headers) noexcept { + if (headers->getMethod().value() != HTTPMethod::GET) { + // Unsupported method + resp_ = "Not supported"; + err_ = HttpCode::E_UNSUPPORTED_METHOD; + return; + } + + do { + if (headers->hasQueryParam("space")) { + auto spaceName = headers->getQueryParam("space"); + auto ret = schemaMan_->toGraphSpaceID(spaceName); + if (!ret.ok()) { + resp_ = "Space not found: " + spaceName; + err_ = HttpCode::E_ILLEGAL_ARGUMENT; + break; + } + spaceId_ = ret.value(); + } else { + resp_ = + "Space should not be empty. " + "Usage: http:://ip:port/rocksdb_property?space=xxx&property=yyy"; + err_ = HttpCode::E_ILLEGAL_ARGUMENT; + break; + } + + if (headers->hasQueryParam("property")) { + folly::split(",", headers->getQueryParam("property"), properties_, true); + } else { + resp_ = + "Property should not be empty. " + "Usage: http:://ip:port/rocksdb_property?space=xxx&property=yyy"; + err_ = HttpCode::E_ILLEGAL_ARGUMENT; + break; + } + + auto result = folly::dynamic::array(); + for (const auto& property : properties_) { + auto ret = kv_->getProperty(spaceId_, property); + if (!ok(ret)) { + resp_ = "Property not found: " + property; + err_ = HttpCode::E_ILLEGAL_ARGUMENT; + return; + } else { + result.push_back(folly::parseJson(value(ret))); + } + } + resp_ = folly::toPrettyJson(result); + } while (false); +} + +void StorageHttpPropertyHandler::onBody(std::unique_ptr) noexcept { + // Do nothing, we only support GET +} + +void StorageHttpPropertyHandler::onEOM() noexcept { + switch (err_) { + case HttpCode::E_UNSUPPORTED_METHOD: + ResponseBuilder(downstream_).status(405, "Method not allowed").body(resp_).sendWithEOM(); + return; + case HttpCode::E_ILLEGAL_ARGUMENT: + ResponseBuilder(downstream_).status(400, "Illegal argument").body(resp_).sendWithEOM(); + return; + default: + break; + } + + ResponseBuilder(downstream_).status(200, "OK").body(resp_).sendWithEOM(); +} + +void StorageHttpPropertyHandler::onUpgrade(UpgradeProtocol) noexcept { + // Do nothing +} + +void StorageHttpPropertyHandler::requestComplete() noexcept { delete this; } + +void StorageHttpPropertyHandler::onError(ProxygenError error) noexcept { + LOG(ERROR) << "Web service StorageHttpHandler got error: " << proxygen::getErrorString(error); +} + +} // namespace storage +} // namespace nebula diff --git a/src/storage/http/StorageHttpPropertyHandler.h b/src/storage/http/StorageHttpPropertyHandler.h new file mode 100644 index 00000000000..2e112e82f69 --- /dev/null +++ b/src/storage/http/StorageHttpPropertyHandler.h @@ -0,0 +1,45 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License, + * attached with Common Clause Condition 1.0, found in the LICENSES directory. + */ + +#pragma once + +#include + +#include "common/base/Base.h" +#include "kvstore/KVStore.h" +#include "webservice/Common.h" + +namespace nebula { +namespace storage { + +class StorageHttpPropertyHandler : public proxygen::RequestHandler { + public: + StorageHttpPropertyHandler(meta::SchemaManager* schemaMan, kvstore::KVStore* kv) + : schemaMan_(schemaMan), kv_(kv) {} + + void onRequest(std::unique_ptr headers) noexcept override; + + void onBody(std::unique_ptr body) noexcept override; + + void onEOM() noexcept override; + + void onUpgrade(proxygen::UpgradeProtocol proto) noexcept override; + + void requestComplete() noexcept override; + + void onError(proxygen::ProxygenError err) noexcept override; + + private: + meta::SchemaManager* schemaMan_ = nullptr; + kvstore::KVStore* kv_ = nullptr; + HttpCode err_{HttpCode::SUCCEEDED}; + std::string resp_; + GraphSpaceID spaceId_; + std::vector properties_; +}; + +} // namespace storage +} // namespace nebula diff --git a/src/storage/index/LookupBaseProcessor-inl.h b/src/storage/index/LookupBaseProcessor-inl.h index a01fb095497..2feab1fa34f 100644 --- a/src/storage/index/LookupBaseProcessor-inl.h +++ b/src/storage/index/LookupBaseProcessor-inl.h @@ -167,6 +167,8 @@ template StatusOr> LookupBaseProcessor::buildPlan( IndexFilterItem* filterItem, nebula::DataSet* result) { StoragePlan plan; + // TODO(sky) : Limit is not supported yet for de-dup node. + // Related to paging scan, the de-dup execution plan needs to be refactored auto deDup = std::make_unique>(result, deDupColPos_); int32_t filterId = 0; std::unique_ptr> out; @@ -319,8 +321,8 @@ std::unique_ptr> LookupBaseProcessor::buildP auto indexId = ctx.get_index_id(); auto colHints = ctx.get_column_hints(); - auto indexScan = std::make_unique>( - context_.get(), indexId, std::move(colHints), limit_); + auto indexScan = + std::make_unique>(context_.get(), indexId, std::move(colHints)); if (context_->isEdge()) { auto edge = std::make_unique>( context_.get(), indexScan.get(), schemas_, context_->edgeName_, limit_); @@ -370,11 +372,11 @@ std::unique_ptr> LookupBaseProcessor::buildP auto indexId = ctx.get_index_id(); auto colHints = ctx.get_column_hints(); - auto indexScan = std::make_unique>( - context_.get(), indexId, std::move(colHints), limit_); + auto indexScan = + std::make_unique>(context_.get(), indexId, std::move(colHints)); auto filter = std::make_unique>( - context_.get(), indexScan.get(), exprCtx, exp, context_->isEdge()); + context_.get(), indexScan.get(), exprCtx, exp, context_->isEdge(), limit_); filter->addDependency(indexScan.get()); auto output = std::make_unique>(result, context_.get(), filter.get(), true); @@ -421,14 +423,14 @@ LookupBaseProcessor::buildPlanWithDataAndFilter(nebula::DataSet* resu auto indexId = ctx.get_index_id(); auto colHints = ctx.get_column_hints(); - auto indexScan = std::make_unique>( - context_.get(), indexId, std::move(colHints), limit_); + auto indexScan = + std::make_unique>(context_.get(), indexId, std::move(colHints)); if (context_->isEdge()) { auto edge = std::make_unique>( - context_.get(), indexScan.get(), schemas_, context_->edgeName_, limit_); + context_.get(), indexScan.get(), schemas_, context_->edgeName_); edge->addDependency(indexScan.get()); - auto filter = - std::make_unique>(context_.get(), edge.get(), exprCtx, exp); + auto filter = std::make_unique>( + context_.get(), edge.get(), exprCtx, exp, limit_); filter->addDependency(edge.get()); auto output = std::make_unique>(result, context_.get(), filter.get()); @@ -439,10 +441,10 @@ LookupBaseProcessor::buildPlanWithDataAndFilter(nebula::DataSet* resu return output; } else { auto vertex = std::make_unique>( - context_.get(), indexScan.get(), schemas_, context_->tagName_, limit_); + context_.get(), indexScan.get(), schemas_, context_->tagName_); vertex->addDependency(indexScan.get()); - auto filter = - std::make_unique>(context_.get(), vertex.get(), exprCtx, exp); + auto filter = std::make_unique>( + context_.get(), vertex.get(), exprCtx, exp, limit_); filter->addDependency(vertex.get()); auto output = std::make_unique>(result, context_.get(), filter.get()); diff --git a/src/storage/index/LookupProcessor.cpp b/src/storage/index/LookupProcessor.cpp index 8210c1b4666..9d448c69d9d 100644 --- a/src/storage/index/LookupProcessor.cpp +++ b/src/storage/index/LookupProcessor.cpp @@ -23,6 +23,11 @@ void LookupProcessor::process(const cpp2::LookupIndexRequest& req) { void LookupProcessor::doProcess(const cpp2::LookupIndexRequest& req) { auto retCode = requestCheck(req); + if (limit_ == 0) { + onProcessFinished(); + onFinished(); + return; + } if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { for (auto& p : req.get_parts()) { pushResultCode(retCode, p); diff --git a/src/storage/test/CMakeLists.txt b/src/storage/test/CMakeLists.txt index 1ee64ef9607..45dc4c0e662 100644 --- a/src/storage/test/CMakeLists.txt +++ b/src/storage/test/CMakeLists.txt @@ -47,6 +47,7 @@ set(storage_test_deps $ $ $ + $ ) nebula_add_test( @@ -511,6 +512,23 @@ nebula_add_test( gtest ) +nebula_add_test( + NAME + storage_http_property_test + SOURCES + StorageHttpPropertyHandlerTest.cpp + OBJECTS + $ + $ + ${storage_test_deps} + LIBRARIES + ${ROCKSDB_LIBRARIES} + ${THRIFT_LIBRARIES} + ${PROXYGEN_LIBRARIES} + wangle + gtest +) + nebula_add_test( NAME scan_edge_test diff --git a/src/storage/test/IndexScanLimitTest.cpp b/src/storage/test/IndexScanLimitTest.cpp index b71e3578898..3dd2482c339 100644 --- a/src/storage/test/IndexScanLimitTest.cpp +++ b/src/storage/test/IndexScanLimitTest.cpp @@ -20,6 +20,9 @@ namespace nebula { namespace storage { +ObjectPool objPool; +auto pool = &objPool; + class IndexScanLimitTest : public ::testing::Test { protected: GraphSpaceID spaceId = 1; @@ -98,36 +101,35 @@ class IndexScanLimitTest : public ::testing::Test { } // Edge and vertex have the same schema of structure, so it's good to only generate it once. - RowWriterV2 writer(tag.get()); - auto r1 = writer.setValue(0, 888); - if (r1 != WriteResult::SUCCEEDED) { - LOG(ERROR) << "Invalid prop col1"; - return false; - } - auto r2 = writer.setValue(1, "row"); - if (r2 != WriteResult::SUCCEEDED) { - LOG(ERROR) << "Invalid prop col2"; - return false; - } - auto ret = writer.finish(); - if (ret != WriteResult::SUCCEEDED) { - LOG(ERROR) << "Failed to write data"; - return false; - } - auto val = std::move(writer).moveEncodedStr(); + RowWriterV2 writer1(tag.get()); + EXPECT_EQ(WriteResult::SUCCEEDED, writer1.setValue(0, 111)); + EXPECT_EQ(WriteResult::SUCCEEDED, writer1.setValue(1, "row_111")); + EXPECT_EQ(WriteResult::SUCCEEDED, writer1.finish()); + auto val1 = std::move(writer1).moveEncodedStr(); + RowWriterV2 writer2(tag.get()); + EXPECT_EQ(WriteResult::SUCCEEDED, writer2.setValue(0, 222)); + EXPECT_EQ(WriteResult::SUCCEEDED, writer2.setValue(1, "row_222")); + EXPECT_EQ(WriteResult::SUCCEEDED, writer2.finish()); + auto val2 = std::move(writer2).moveEncodedStr(); for (auto pId : parts) { std::vector data; for (int64_t vid = pId * 1000; vid < (pId + 1) * 1000; vid++) { + int64_t col1Val = vid % 2 == 0 ? 111 : 222; + std::string val = vid % 2 == 0 ? val1 : val2; auto vertex = folly::to(vid); - auto edgeKey = NebulaKeyUtils::edgeKey(8, pId, vertex, edgeType, 0, vertex); - auto vertexKey = NebulaKeyUtils::vertexKey(8, pId, vertex, tagId); + auto edgeKey = NebulaKeyUtils::edgeKey(vertexLen, pId, vertex, edgeType, 0, vertex); + auto vertexKey = NebulaKeyUtils::vertexKey(vertexLen, pId, vertex, tagId); data.emplace_back(std::move(edgeKey), val); - data.emplace_back(std::move(vertexKey), val); + data.emplace_back(std::move(vertexKey), std::move(val)); if (indexMan_ != nullptr) { if (indexMan_->getTagIndex(spaceId, tagIndex).ok()) { - auto vertexIndexKey = IndexKeyUtils::vertexIndexKey( - vertexLen, pId, tagIndex, vertex, IndexKeyUtils::encodeValues({888}, genCols())); + auto vertexIndexKey = + IndexKeyUtils::vertexIndexKey(vertexLen, + pId, + tagIndex, + vertex, + IndexKeyUtils::encodeValues({col1Val}, genCols())); data.emplace_back(std::move(vertexIndexKey), ""); } if (indexMan_->getEdgeIndex(spaceId, edgeIndex).ok()) { @@ -138,7 +140,7 @@ class IndexScanLimitTest : public ::testing::Test { vertex, 0, vertex, - IndexKeyUtils::encodeValues({888}, genCols())); + IndexKeyUtils::encodeValues({col1Val}, genCols())); data.emplace_back(std::move(edgeIndexKey), ""); } } @@ -228,6 +230,17 @@ TEST_F(IndexScanLimitTest, LookupTagIndexLimit) { EXPECT_EQ(0, resp.get_data()->rows.size()); } + // limit == 1 + { + req.set_limit(1); + auto* processor = LookupProcessor::instance(storageEnv_.get(), nullptr, nullptr); + auto fut = processor->getFuture(); + processor->process(req); + auto resp = std::move(fut).get(); + EXPECT_EQ(0, resp.result.failed_parts.size()); + EXPECT_EQ(1 * parts.size(), resp.get_data()->rows.size()); + } + // limit 5 by each part { req.set_limit(5); @@ -239,18 +252,41 @@ TEST_F(IndexScanLimitTest, LookupTagIndexLimit) { EXPECT_EQ(5 * parts.size(), resp.get_data()->rows.size()); } - // limit 5 by each part through IndexScanNode->DataNode->FilterNode + // limit 5 by each part through IndexScanNode->DataNode { req.set_limit(5); cpp2::IndexColumnHint columnHint; - columnHint.set_begin_value(Value(888)); + columnHint.set_begin_value(Value(111)); columnHint.set_column_name("col1"); columnHint.set_scan_type(cpp2::ScanType::PREFIX); std::vector columnHints; columnHints.emplace_back(std::move(columnHint)); + req.return_columns_ref().value().emplace_back("col2"); req.indices_ref().value().contexts_ref().value().begin()->set_column_hints( std::move(columnHints)); + auto* processor = LookupProcessor::instance(storageEnv_.get(), nullptr, nullptr); + auto fut = processor->getFuture(); + processor->process(req); + auto resp = std::move(fut).get(); + EXPECT_EQ(0, resp.result.failed_parts.size()); + EXPECT_EQ(5 * parts.size(), resp.get_data()->rows.size()); + } + // limit 5 by each part through IndexScanNode->DataNode->FilterNode + { + req.set_limit(5); + cpp2::IndexColumnHint columnHint; + columnHint.set_begin_value(Value(111)); + columnHint.set_column_name("col1"); + columnHint.set_scan_type(cpp2::ScanType::PREFIX); + std::vector columnHints; + columnHints.emplace_back(std::move(columnHint)); + auto expr = RelationalExpression::makeNE(pool, + TagPropertyExpression::make(pool, "100", "col1"), + ConstantExpression::make(pool, Value(300L))); + req.indices_ref().value().contexts_ref().value().begin()->set_filter(expr->encode()); + req.indices_ref().value().contexts_ref().value().begin()->set_column_hints( + std::move(columnHints)); auto* processor = LookupProcessor::instance(storageEnv_.get(), nullptr, nullptr); auto fut = processor->getFuture(); processor->process(req); @@ -299,6 +335,17 @@ TEST_F(IndexScanLimitTest, LookupEdgeIndexLimit) { EXPECT_EQ(0, resp.get_data()->rows.size()); } + // limit == 1 + { + req.set_limit(1); + auto* processor = LookupProcessor::instance(storageEnv_.get(), nullptr, nullptr); + auto fut = processor->getFuture(); + processor->process(req); + auto resp = std::move(fut).get(); + EXPECT_EQ(0, resp.result.failed_parts.size()); + EXPECT_EQ(1 * parts.size(), resp.get_data()->rows.size()); + } + // limit 5 by each part { req.set_limit(5); @@ -310,15 +357,16 @@ TEST_F(IndexScanLimitTest, LookupEdgeIndexLimit) { EXPECT_EQ(5 * parts.size(), resp.get_data()->rows.size()); } - // limit 5 by each part through IndexScanNode->DataNode->FilterNode + // limit 5 by each part through IndexScanNode->DataNode { req.set_limit(5); cpp2::IndexColumnHint columnHint; - columnHint.set_begin_value(Value(888)); + columnHint.set_begin_value(Value(111)); columnHint.set_column_name("col1"); columnHint.set_scan_type(cpp2::ScanType::PREFIX); std::vector columnHints; columnHints.emplace_back(std::move(columnHint)); + req.return_columns_ref().value().emplace_back("col2"); req.indices_ref().value().contexts_ref().value().begin()->set_column_hints( std::move(columnHints)); @@ -329,6 +377,29 @@ TEST_F(IndexScanLimitTest, LookupEdgeIndexLimit) { EXPECT_EQ(0, resp.result.failed_parts.size()); EXPECT_EQ(5 * parts.size(), resp.get_data()->rows.size()); } + + // limit 5 by each part through IndexScanNode->DataNode->FilterNode + { + req.set_limit(5); + cpp2::IndexColumnHint columnHint; + columnHint.set_begin_value(Value(111)); + columnHint.set_column_name("col1"); + columnHint.set_scan_type(cpp2::ScanType::PREFIX); + std::vector columnHints; + columnHints.emplace_back(std::move(columnHint)); + auto expr = RelationalExpression::makeNE(pool, + EdgePropertyExpression::make(pool, "200", "col1"), + ConstantExpression::make(pool, Value(300L))); + req.indices_ref().value().contexts_ref().value().begin()->set_filter(expr->encode()); + req.indices_ref().value().contexts_ref().value().begin()->set_column_hints( + std::move(columnHints)); + auto* processor = LookupProcessor::instance(storageEnv_.get(), nullptr, nullptr); + auto fut = processor->getFuture(); + processor->process(req); + auto resp = std::move(fut).get(); + EXPECT_EQ(0, resp.result.failed_parts.size()); + EXPECT_EQ(5 * parts.size(), resp.get_data()->rows.size()); + } } } // namespace storage diff --git a/src/storage/test/StorageHttpPropertyHandlerTest.cpp b/src/storage/test/StorageHttpPropertyHandlerTest.cpp new file mode 100644 index 00000000000..3285ab35174 --- /dev/null +++ b/src/storage/test/StorageHttpPropertyHandlerTest.cpp @@ -0,0 +1,117 @@ +/* Copyright (c) 2020 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License, + * attached with Common Clause Condition 1.0, found in the LICENSES directory. + */ + +#include +#include + +#include "common/base/Base.h" +#include "common/fs/TempDir.h" +#include "kvstore/RocksEngineConfig.h" +#include "mock/MockCluster.h" +#include "storage/http/StorageHttpPropertyHandler.h" +#include "storage/test/TestUtils.h" +#include "webservice/Router.h" +#include "webservice/WebService.h" +#include "webservice/test/TestUtils.h" + +namespace nebula { +namespace storage { + +class StorageHttpStatsHandlerTestEnv : public ::testing::Environment { + public: + void SetUp() override { + FLAGS_ws_ip = "127.0.0.1"; + FLAGS_ws_http_port = 0; + FLAGS_ws_h2_port = 0; + FLAGS_enable_rocksdb_statistics = true; + rootPath_ = std::make_unique("/tmp/StorageHttpPropertyHandler.XXXXXX"); + cluster_ = std::make_unique(); + cluster_->initStorageKV(rootPath_->path()); + + VLOG(1) << "Starting web service..."; + webSvc_ = std::make_unique(); + auto& router = webSvc_->router(); + router.get("/rocksdb_property").handler([this](nebula::web::PathParams&&) { + return new storage::StorageHttpPropertyHandler(cluster_->storageEnv_->schemaMan_, + cluster_->storageEnv_->kvstore_); + }); + auto status = webSvc_->start(); + ASSERT_TRUE(status.ok()) << status; + } + + void TearDown() override { + cluster_.reset(); + webSvc_.reset(); + rootPath_.reset(); + VLOG(1) << "Web service stopped"; + } + + protected: + std::unique_ptr cluster_; + std::unique_ptr webSvc_; + std::unique_ptr rootPath_; +}; + +static std::string request(const std::string& url) { + auto request = + folly::stringPrintf("http://%s:%d%s", FLAGS_ws_ip.c_str(), FLAGS_ws_http_port, url.c_str()); + auto resp = http::HttpClient::get(request); + EXPECT_TRUE(resp.ok()); + return resp.value(); +} + +static void checkInvalidRequest(const std::string& url, const std::string& errMsg) { + ASSERT_EQ(0, request(url).find(errMsg)); +} + +TEST(StorageHttpPropertyHandlerTest, InvalidRequest) { + checkInvalidRequest("/rocksdb_property", "Space should not be empty."); + checkInvalidRequest("/rocksdb_property?space=xxx", "Space not found: xxx"); + checkInvalidRequest("/rocksdb_property?space=1", "Property should not be empty."); + checkInvalidRequest("/rocksdb_property?space=1&property=yyy", "Property not found: yyy"); +} + +TEST(StorageHttpPropertyHandlerTest, ValidRequest) { + { + std::string expect = + R"([ + { + "Engine 0": "0", + "Engine 1": "0" + } +])"; + EXPECT_EQ(expect, request("/rocksdb_property?space=1&property=rocksdb.block-cache-usage")); + } + { + std::string expect = + R"([ + { + "Engine 0": "0", + "Engine 1": "0" + }, + { + "Engine 0": "0", + "Engine 1": "0" + } +])"; + EXPECT_EQ(expect, + request("/rocksdb_property?space=1&property=" + "rocksdb.block-cache-usage,rocksdb.is-write-stopped")); + } +} + +} // namespace storage +} // namespace nebula + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + folly::init(&argc, &argv, true); + google::SetStderrLogging(google::INFO); + + ::testing::AddGlobalTestEnvironment(new nebula::storage::StorageHttpStatsHandlerTestEnv()); + + return RUN_ALL_TESTS(); +} diff --git a/src/tools/db-dump/CMakeLists.txt b/src/tools/db-dump/CMakeLists.txt index ae25df40ea2..fa4587a52f3 100644 --- a/src/tools/db-dump/CMakeLists.txt +++ b/src/tools/db-dump/CMakeLists.txt @@ -43,6 +43,7 @@ set(tools_test_deps $ $ $ + $ ) nebula_add_executable( diff --git a/src/tools/db-upgrade/CMakeLists.txt b/src/tools/db-upgrade/CMakeLists.txt index 0259465280e..ed036caea97 100644 --- a/src/tools/db-upgrade/CMakeLists.txt +++ b/src/tools/db-upgrade/CMakeLists.txt @@ -51,6 +51,7 @@ nebula_add_executable( $ $ $ + $ LIBRARIES ${ROCKSDB_LIBRARIES} ${THRIFT_LIBRARIES} diff --git a/src/tools/meta-dump/CMakeLists.txt b/src/tools/meta-dump/CMakeLists.txt index b715015e61f..d6aad52c963 100644 --- a/src/tools/meta-dump/CMakeLists.txt +++ b/src/tools/meta-dump/CMakeLists.txt @@ -48,6 +48,7 @@ nebula_add_executable( $ $ $ + $ LIBRARIES ${ROCKSDB_LIBRARIES} ${THRIFT_LIBRARIES} diff --git a/src/tools/storage-perf/CMakeLists.txt b/src/tools/storage-perf/CMakeLists.txt index ee298ac6737..94e888f12bd 100644 --- a/src/tools/storage-perf/CMakeLists.txt +++ b/src/tools/storage-perf/CMakeLists.txt @@ -43,6 +43,7 @@ set(perf_test_deps $ $ $ + $ ) nebula_add_executable( diff --git a/tests/Makefile b/tests/Makefile index 36963cbf2c7..79d25120ab7 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -16,6 +16,10 @@ TEST_DIR ?= $(CURR_DIR) BUILD_DIR ?= $(CURR_DIR)/../build DEBUG ?= true J ?= 10 +ENABLE_SSL ?= false +ENABLE_GRAPH_SSL ?= false +ENABLE_META_SSL ?= false +CA_SIGNED ?= false install-deps: pip3 install --user -U setuptools wheel -i $(PYPI_MIRROR) @@ -49,7 +53,7 @@ check: up: clean @mkdir -p $(CURR_DIR)/.pytest - PYTHONPATH=$$PYTHONPATH:$(CURR_DIR)/.. $(CURR_DIR)/nebula-test-run.py --cmd=start --rm_dir=$(RM_DIR) --build_dir=$(BUILD_DIR) --debug=$(DEBUG) --multi_graphd=true + PYTHONPATH=$$PYTHONPATH:$(CURR_DIR)/.. $(CURR_DIR)/nebula-test-run.py --cmd=start --rm_dir=$(RM_DIR) --build_dir=$(BUILD_DIR) --debug=$(DEBUG) --multi_graphd=true --enable_ssl=$(ENABLE_SSL) --enable_graph_ssl=$(ENABLE_GRAPH_SSL) --enable_meta_ssl=$(ENABLE_META_SSL) --ca_signed=$(CA_SIGNED) down: PYTHONPATH=$$PYTHONPATH:$(CURR_DIR)/.. $(CURR_DIR)/nebula-test-run.py --cmd=stop --rm_dir=$(RM_DIR) diff --git a/tests/admin/test_permission.py b/tests/admin/test_permission.py index 42bb08e29d9..96152cfbb48 100644 --- a/tests/admin/test_permission.py +++ b/tests/admin/test_permission.py @@ -762,6 +762,8 @@ def test_show_roles(self): self.check_resp_succeeded(resp) time.sleep(self.delay) + ret, self.testClient = self.spawn_nebula_client_and_auth('test', 'test') + assert ret ret, self.adminClient = self.spawn_nebula_client_and_auth('admin', 'admin') assert ret ret, self.dbaClient = self.spawn_nebula_client_and_auth('dba', 'dba') @@ -771,6 +773,11 @@ def test_show_roles(self): ret, self.guestClient = self.spawn_nebula_client_and_auth('guest', 'guest') assert ret + query = 'SHOW ROLES IN space5' + expected_result = [] + resp = self.testClient.execute(query) + self.check_resp_failed(resp, ttypes.ErrorCode.E_BAD_PERMISSION) + query = 'SHOW ROLES IN space5' expected_result = [['guest', 'GUEST'], ['user', 'USER'], diff --git a/tests/cert/test.ca.key b/tests/cert/test.ca.key new file mode 100644 index 00000000000..6006d0f275f --- /dev/null +++ b/tests/cert/test.ca.key @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: DES-EDE3-CBC,6D12ED8559E80FA3 + +tv9epnwlt4dP6Q5ee0dACOyFA5BTwYTdoMykQRJrKGwfaNeXUXn+sQ/U/oFHp1Wx +O8VZE+z2aHpiFSTw+Eh6MPt86X5yVG3tpeVO6dErvr8Kd+NpuI8zn7rNoOFRh8wD +33EFcQMLQPneDl10O18hooIoi0qwp1pd63hYZPwEhB3eOrM5Mnv9OVJs65bzYfyf +Wku33YWYxeqlDvMCsou8PZnv/M2wYsr7+QoTcNmGKP45igMthMDBzwgF+q0p9ZZU +N11c6ojAs01kfuqFf3vKfHNYe6zsBiNhnUuEy8enXSxD5E7tR/OI8aEzPLdk7fmN +/UsMK2LE0Yd5iS3O1x/1ZjSBxJ+M/UzzCO692GTAiD6Hc13iJOavq/vt1mEPjfCD +neF38Bhb5DfFi+UAHrz6EHMreamGCzP82us2maIs7mSTq7nXDZfbBc7mBDLAUUnT +J6tlrTyc+DQXzkJa6jmbxJhcsWm6XvjIBEzSXVHxEDPLnZICQk3VXODjCXTD75Rg +0WaS78Ven7DW8wn07q3VzWAFDKaet3VI+TVTv7EfIavlfiA6LSshaENdFLeHahNE +s/V/j5K3Pg6+WQcZRgOsfqIwUCSQxY13R6TTdaaCkLay5BggF5iiAO3pkqsJiadf +w843Ak4USBptymJxoZgJyFtQHpQyNiFfsAbs9BaYbg2evvE7/VQhLk0gQ7HgQMeJ +wgxEQqZQKDCCSugSzY1YEGXKnrZYCKyipzyyH936mE15zNwhYp/Pi2020+gmtP3h +CDfcPs1yeLI2/1JuimafbuKsv9xchWa6ASU8p8Q7wTLtUj9ylLKyA4A/75pK0DXG +Hv/q0O+UfhAMD438SoPBle7RSvIsDU1VjUqstlNybBglBZxGIME7/18+Ms7U32wh +4xFkZwxT2nqFgyk37tXMdMz9UBh12/AXR9NU4XY37C3Ao2TDT7/0DvU6KdJhsDpv +rGcaC2zzhko+0CPrLlk52KbqP003JXiWvOSI+FylyPPDB/YGitmndJUuQblf3u/E +l+tGi9MeSBQeWKV6D3AVnO05AZjfTUzSK0vw4DgNh5YPNJvLy31B7kDAS88vyGI1 +t6MBwjW4/tz/nS/p1Go3mSzBhPkIsCrZE+ar7lH8p8JqkLl4fXIMaVKIfyfJdzyS +lkh3K7bOGDPegxxxaWdb+EnC7k+1R3EOU7uJFW61HyrGI3q6Y7kOl5aYSJ5Ge1Uv +PycFWHWVTHq/R7HRE6HIJzGe/PnLIbStXLDFeivjfcYq1YaSaF8Vl+xg+0u3ULOl +P6IuPTph6dlcgttRZVl3ETcF0T+2wfbUwgjf0ZiguCJfR2jLGhPl1KBg0Kd9cTSY +zI3YMMd2G8hApt/QFlm4Ry8CqaJUmDcjDNIJT3M+RldUgfz37NsX05cA5e9+I1AL +2406F/v5U9gWsYx7HuwJtQrDzYYDbl1GD4H+qHFJE5JYhPP4AyWYxJ1NR5dqyvrt ++3r5+xlwZrS76c10RsBWL7th8ZEzRxOZxbtLwbf4bG/tIGfQP2sTnWwA+qym6b2S +sRduqOTP+xwnhOq/ZKn8lfsDfhT8CPnKHBsd09kM9y/UWuxFe0upLydRLE/Wsb9s +-----END RSA PRIVATE KEY----- diff --git a/tests/cert/test.ca.pem b/tests/cert/test.ca.pem new file mode 100644 index 00000000000..412ba31619d --- /dev/null +++ b/tests/cert/test.ca.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIEGzCCAwOgAwIBAgIUDcmZFpL4PcdCXfLRBK8bR2vb39cwDQYJKoZIhvcNAQEL +BQAwgZwxCzAJBgNVBAYTAkNOMREwDwYDVQQIDAhaaGVqaWFuZzERMA8GA1UEBwwI +SGFuZ3pob3UxFDASBgNVBAoMC1Zlc29mdCBJbmMuMRAwDgYDVQQLDAdzZWN0aW9u +MRYwFAYDVQQDDA1zaHlsb2NrIGh1YW5nMScwJQYJKoZIhvcNAQkBFhhzaHlsb2Nr +Lmh1YW5nQHZlc29mdC5jb20wHhcNMjEwODE5MDkyNDQ3WhcNMjUwODE4MDkyNDQ3 +WjCBnDELMAkGA1UEBhMCQ04xETAPBgNVBAgMCFpoZWppYW5nMREwDwYDVQQHDAhI +YW5nemhvdTEUMBIGA1UECgwLVmVzb2Z0IEluYy4xEDAOBgNVBAsMB3NlY3Rpb24x +FjAUBgNVBAMMDXNoeWxvY2sgaHVhbmcxJzAlBgkqhkiG9w0BCQEWGHNoeWxvY2su +aHVhbmdAdmVzb2Z0LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AMEAgpamCQHl+8JnUHI6/VmJHjDLYJLTliN/CwpFrhMqIVjJ8wG57WYLpXpn91Lz +eHu52LkVzcikybIJ2a+LOTvnhNFdbmTbqDtrb+s6wM/sO+nF6tU2Av4e5zhyKoeR +LL+rHMk3nymohbdN4djySFmOOU5A1O/4b0bZz4Ylu995kUawdiaEo13BzxxOC7Ik +Gge5RyDcm0uLXZqTAPy5Sjv/zpOyj0AqL1CJUH7XBN9OMRhVU0ZX9nHWl1vgLRld +J6XT17Y9QbbHhCNEdAmFE5kEFgCvZc+MungUYABlkvoj86TLmC/FMV6fWdxQssyd +hS+ssfJFLaTDaEFz5a/Tr48CAwEAAaNTMFEwHQYDVR0OBBYEFK0GVrQx+wX1GCHy +e+6fl4X+prmYMB8GA1UdIwQYMBaAFK0GVrQx+wX1GCHye+6fl4X+prmYMA8GA1Ud +EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAHqP8P+ZUHmngviHLSSN1ln5 +Mx4BCkVeFRUaFx0yFXytV/iLXcG2HpFg3A9rAFoYgCDwi1xpsERnBZ/ShTv/eFOc +IxBY5yggx3/lGi8tAgvUdarhd7mQO67UJ0V4YU3hAkbnZ8grHHXj+4hfgUpY4ok6 +yaed6HXwknBb9W8N1jZI8ginhkhjaeRCHdMiF+fBvNCtmeR1bCml1Uz7ailrpcaT +Mf84+5VYuFEnaRZYWFNsWNCOBlJ/6/b3V10vMXzMmYHqz3xgAq0M3fVTFTzopnAX +DLSzorL/dYVdqEDCQi5XI9YAlgWN4VeGzJI+glkLOCNzHxRNP6Qev+YI+7Uxz6I= +-----END CERTIFICATE----- diff --git a/tests/cert/test.ca.srl b/tests/cert/test.ca.srl new file mode 100644 index 00000000000..877d296b7c1 --- /dev/null +++ b/tests/cert/test.ca.srl @@ -0,0 +1 @@ +4AF2EBB941EA7EE8358ECC7E51C2F1A38EE18873 diff --git a/tests/cert/test.derive.crt b/tests/cert/test.derive.crt new file mode 100644 index 00000000000..8f03073e2ff --- /dev/null +++ b/tests/cert/test.derive.crt @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIIDvjCCAqYCFEry67lB6n7oNY7MflHC8aOO4YhzMA0GCSqGSIb3DQEBCwUAMIGc +MQswCQYDVQQGEwJDTjERMA8GA1UECAwIWmhlamlhbmcxETAPBgNVBAcMCEhhbmd6 +aG91MRQwEgYDVQQKDAtWZXNvZnQgSW5jLjEQMA4GA1UECwwHc2VjdGlvbjEWMBQG +A1UEAwwNc2h5bG9jayBodWFuZzEnMCUGCSqGSIb3DQEJARYYc2h5bG9jay5odWFu +Z0B2ZXNvZnQuY29tMB4XDTIxMDgyNDEwNTExMloXDTIzMTEyNzEwNTExMlowgZkx +CzAJBgNVBAYTAkNOMREwDwYDVQQIDAhaaGVqaWFuZzERMA8GA1UEBwwISGFuZ3po +b3UxFDASBgNVBAoMC1Zlc29mdCBJbmMuMRAwDgYDVQQLDAdzZWN0aW9uMRMwEQYD +VQQDDApTaHlsb2NrIEhnMScwJQYJKoZIhvcNAQkBFhhzaHlsb2NrLmh1YW5nQHZl +c29mdC5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDHk1PQtaCG +S31nvxKuT6pzVQuOsA2hEIDzBZuoBK3blezBB16fjUWG2wHG/r9Oss5YzOly4viL +1oFLsNdYg27EFH7pcGfdSUmZa6LHILegJTmLa1aB4lRG9EsvPIxNuo637CW2z6EW +ElVKXn2N1G1vW3fpKGxJ+d1ovaFfBliO0sK+myW+vYdKrNg70WqKKCoCIlIjEWw3 +vQdrmvhuhIBbG1bXkXbJwIepBdb4wGSx8qsgs93I6/je/K/iJaPJIqdH8loo6fSo +DBUiNA87ZsQdtbBeuk7QuF71SxD5+E8wCMtFMwRGmL0vYMPwkaurKxwEs49e8eTz +RvIrNtyYgVo7AgMBAAEwDQYJKoZIhvcNAQELBQADggEBAGBpm5OLXn02kWr1ENU5 +FOOVryD41SCmPy8hLwQ2MCXd446UfTXc5TTlllksaePn373ZANLUe78vUCoVPjOh +dU5GxyOKtubXovI+yuvMS11u00KtgiAd5qa+IhX3c/P60bh4+fdKZ9ViyLsG+IpQ ++XDYT2uekLyjXXJU6h1raW7M1VY9FcDC63moXz0WgWJ/9tJgB0ZQkVcL+2UpveoZ +Whf9P0xAzCmNSrR7CMhdeRN2vBQQaHXk/64wkHncdkz/NglVl00rh4MtBKZ6Cqze +uZvgrxOJNzB4aXBMHO7sWzw1VSfS79CZm4H39hBWGiVEkr3yZYQbboDRY6F5dQyc +BZc= +-----END CERTIFICATE----- diff --git a/tests/cert/test.derive.csr b/tests/cert/test.derive.csr new file mode 100644 index 00000000000..89b26237ec7 --- /dev/null +++ b/tests/cert/test.derive.csr @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIDEjCCAfoCAQAwgZkxCzAJBgNVBAYTAkNOMREwDwYDVQQIDAhaaGVqaWFuZzER +MA8GA1UEBwwISGFuZ3pob3UxFDASBgNVBAoMC1Zlc29mdCBJbmMuMRAwDgYDVQQL +DAdzZWN0aW9uMRMwEQYDVQQDDApTaHlsb2NrIEhnMScwJQYJKoZIhvcNAQkBFhhz +aHlsb2NrLmh1YW5nQHZlc29mdC5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDHk1PQtaCGS31nvxKuT6pzVQuOsA2hEIDzBZuoBK3blezBB16fjUWG +2wHG/r9Oss5YzOly4viL1oFLsNdYg27EFH7pcGfdSUmZa6LHILegJTmLa1aB4lRG +9EsvPIxNuo637CW2z6EWElVKXn2N1G1vW3fpKGxJ+d1ovaFfBliO0sK+myW+vYdK +rNg70WqKKCoCIlIjEWw3vQdrmvhuhIBbG1bXkXbJwIepBdb4wGSx8qsgs93I6/je +/K/iJaPJIqdH8loo6fSoDBUiNA87ZsQdtbBeuk7QuF71SxD5+E8wCMtFMwRGmL0v +YMPwkaurKxwEs49e8eTzRvIrNtyYgVo7AgMBAAGgMzAVBgkqhkiG9w0BCQcxCAwG +dmVzb2Z0MBoGCSqGSIb3DQEJAjENDAtWZXNvZnQgSW5jLjANBgkqhkiG9w0BAQsF +AAOCAQEAjmyCyxziJMR8NILRAwmfYcBB90CbTFMMEyWy402KxoXcyVZBGO2eukIq +gaF2ywuh6yuTPtGsdVMVTWDQ4RLYpoQoR5Blu+M8Or8rhZSfMYXi79Ne3abSF28E +eWjBmh2Ys0GtaThlufJBWE+vWPH2iEGrSRTg1fvBLBzAW6nXU2svoTrKfDcEoY5z +xB0CKhBoewoIZ2FPBmBAnIWHfXR/vQ76QIoNdfQ4nT8iXuLRoNjRlvVU4AUDwKtu +keRDrnmJ7A5eqTlleCMzra2MAp9Na9gojXlGQP9q9V8nFtSvbjYAoH0ezWpdWj4+ +Rtu9EK4JkDymmmZcneFapExZrRLt0A== +-----END CERTIFICATE REQUEST----- diff --git a/tests/cert/test.derive.key b/tests/cert/test.derive.key new file mode 100644 index 00000000000..a011917b3af --- /dev/null +++ b/tests/cert/test.derive.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAx5NT0LWghkt9Z78Srk+qc1ULjrANoRCA8wWbqASt25XswQde +n41FhtsBxv6/TrLOWMzpcuL4i9aBS7DXWINuxBR+6XBn3UlJmWuixyC3oCU5i2tW +geJURvRLLzyMTbqOt+wlts+hFhJVSl59jdRtb1t36ShsSfndaL2hXwZYjtLCvpsl +vr2HSqzYO9FqiigqAiJSIxFsN70Ha5r4boSAWxtW15F2ycCHqQXW+MBksfKrILPd +yOv43vyv4iWjySKnR/JaKOn0qAwVIjQPO2bEHbWwXrpO0Lhe9UsQ+fhPMAjLRTME +Rpi9L2DD8JGrqyscBLOPXvHk80byKzbcmIFaOwIDAQABAoIBAEZ50URHjzs9VziW +sdsaSN/XbXBi3T0+Xbr0BQatOFPtuqBjoNeJBL9dgWArP5Vj8RhMrDekzQ5cnmYD +OdiI+UmGz1ZSGmt7YOErsFzPQejsnEiOjArryMURqacxo34jXhi27I6E/aaUrMfJ +XF8EX+zOCSct3ie1c6l0JZMv43/zbzP2vMFEdfnVfZA2Kxo5l3I4rjuxHUEWHzrb +EgM4a2+y7LQrut75zP9zWEZAqim/VEIEj24Gqj+Vocb6cHlc31KzKaEz7Ra5ha2J +kN2CQRKCzoMupVL5E6dWMiDVjUyUXdUgjSCIW2H+E1ONgvxA78jJx7+Dzj+/bWxH +h/vr3dkCgYEA9Aev7PGoGF0eapZY3crehvtCn1v4YLheh0dk4EpbpbEx0rQaG3h7 +YYCf7euxMvoTsKPETHAUG/s/RZV1DNOjxs8GKgEIVaRYEf1VZeDXudtnyKBwCMAL +5CKHRBvfmNG9n+PpQQlrIAZGej7HU+/IzEVsrD2A5DeH9IVpMNvrX10CgYEA0V1r +aydbBP+Ma/fiG5UDa8l4GdLzvAoW2cY6ZhQX4NiLTK91MwA/QOQcVMvJAN2KpPHC +kGDRT7IhMs66cMxl0ImIJ2QSnv8HRNmBBSdUtJx1S6nV2u0VfgP61oNT/YbLR/Jk +CAIl1qe7Q8IsrMbPxCbt8g+D8Wr9C3pdYYqFvncCgYEAicGdKmDwx3Apr3nYCLxx +CjnkzhkZCWCK3EsNQyA2xD5XJd7NrhxBajU2ExUuHtzVKK4KLixG7dTTTvCj9u2y +UpSjoiqbDd2MaftcrfpTTXPyDmujUw02qT5kpaomexpLtWrvTeuHMbjZKEEwPM3r +yISYaFL/49UFRp/ZVd+P63ECgYAX1B0ctf77A6bUxwK6Buy7wNNlhQful+tf39rX +sWPCWIMKOFILevS4Cv5afFMlQRG9kjKFwi8wdeKnaLX5jpnr8StI6G/iHr6SDHtN +vds7Ly9+bBcF8sPmcseC0LGngkbyqljOPIhX9QEwRhJVm88b0R511WQ7/uRMASJN +rrloIwKBgCxYlu1xvvEuQNoIux/yKAEJ1h4Ta2zc5upjw0uDKMi0UNIbNhgdFOvj +LuVbxTRU8WktrLNk3T0rsopKsTbEZVg6Yuv8ZLkEiNYTzhUbn2Y5yM3bnoVwyOns +pTtqmBtvDZxaRCYdIQG3b09IvrewDk26AOtNHdeKw883G2muP/vA +-----END RSA PRIVATE KEY----- diff --git a/tests/common/nebula_service.py b/tests/common/nebula_service.py index fd7531ca12e..13ec5690e4f 100644 --- a/tests/common/nebula_service.py +++ b/tests/common/nebula_service.py @@ -60,15 +60,33 @@ def _copy_nebula_conf(self): os.makedirs(resources_dir) shutil.copy(self.build_dir + '/../resources/gflags.json', resources_dir) - def _format_nebula_command(self, name, meta_port, ports, debug_log=True): + # cert files + shutil.copy(self.src_dir + '/tests/cert/test.ca.key', + resources_dir) + shutil.copy(self.src_dir + '/tests/cert/test.ca.pem', + resources_dir) + shutil.copy(self.src_dir + '/tests/cert/test.derive.key', + resources_dir) + shutil.copy(self.src_dir + '/tests/cert/test.derive.crt', + resources_dir) + + def _format_nebula_command(self, name, meta_port, ports, debug_log=True, ca_signed=False): params = [ "--meta_server_addrs={}", "--port={}", "--ws_http_port={}", "--ws_h2_port={}", "--heartbeat_interval_secs=1", - "--expired_time_factor=60" + "--expired_time_factor=60", ] + if ca_signed: + params.append('--ca_path=share/resources/test.ca.pem') + params.append('--cert_path=share/resources/test.derive.crt') + params.append('--key_path=share/resources/test.derive.key') + else: + params.append('--cert_path=share/resources/test.ca.pem') + params.append('--key_path=share/resources/test.ca.key') + if name == 'graphd': params.append('--local_config=false') params.append('--enable_authorize=true') @@ -151,7 +169,7 @@ def _check_servers_status(self, ports): time.sleep(1) return False - def start(self, debug_log=True, multi_graphd=False): + def start(self, debug_log=True, multi_graphd=False, enable_ssl=False, enable_graph_ssl=False, enable_meta_ssl=False, ca_signed=False): os.chdir(self.work_dir) metad_ports = self._find_free_port() @@ -184,10 +202,14 @@ def start(self, debug_log=True, multi_graphd=False): command = self._format_nebula_command(new_name, metad_ports[0], ports, - debug_log) + debug_log, + ca_signed=ca_signed) if server_name == 'graphd1': command += ' --log_dir=logs1' command += ' --pid_file=pids1/nebula-graphd.pid' + command += ' --enable_ssl={}'.format(enable_ssl) + command += ' --enable_graph_ssl={}'.format(enable_graph_ssl) + command += ' --enable_meta_ssl={}'.format(enable_meta_ssl) print("exec: " + command) p = subprocess.Popen([command], shell=True, stdout=subprocess.PIPE) p.wait() diff --git a/tests/common/types.py b/tests/common/types.py index 335eb9bd55c..7d8b64519b7 100644 --- a/tests/common/types.py +++ b/tests/common/types.py @@ -51,6 +51,14 @@ def drop_stmt(self) -> str: def is_int_vid(self) -> bool: return self.vid_type == 'int' + @property + def _name(self): + return self.name + + @_name.setter + def _name(self, _name: str): + self.name = _name + class Column: def __init__(self, index: int): diff --git a/tests/nebula-test-run.py b/tests/nebula-test-run.py index 1b38d3cba14..0399a3b2139 100755 --- a/tests/nebula-test-run.py +++ b/tests/nebula-test-run.py @@ -52,6 +52,22 @@ def init_parser(): dest='debug', default=True, help='Print verbose debug logs') + opt_parser.add_option('--enable_ssl', + dest='enable_ssl', + default=False, + help='Whether enable SSL for cluster.') + opt_parser.add_option('--enable_graph_ssl', + dest='enable_graph_ssl', + default=False, + help='Whether enable SSL for graph server.') + opt_parser.add_option('--enable_meta_ssl', + dest='enable_meta_ssl', + default=False, + help='Whether enable SSL for meta server.') + opt_parser.add_option('--ca_signed', + dest='ca_signed', + default=False, + help='Whether enable CA signed SSL/TLS mode.') return opt_parser @@ -70,7 +86,7 @@ def start_nebula(nb, configs): nb.install() address = "localhost" debug = opt_is(configs.debug, "true") - ports = nb.start(debug_log=debug, multi_graphd=configs.multi_graphd) + ports = nb.start(debug_log=debug, multi_graphd=configs.multi_graphd, enable_ssl=configs.enable_ssl, enable_graph_ssl=configs.enable_graph_ssl, enable_meta_ssl=configs.enable_meta_ssl, ca_signed=configs.ca_signed) # Load csv data pool = get_conn_pool(address, ports[0]) diff --git a/tests/requirements.txt b/tests/requirements.txt index 1b4170e593e..b61984421d1 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -11,3 +11,4 @@ pytest-yapf3==0.5.1 filelock==3.0.12 ply==3.10 pyyaml==5.4 +fastcov==1.13 diff --git a/tests/tck/conftest.py b/tests/tck/conftest.py index 1a0abf392b3..663273145b0 100644 --- a/tests/tck/conftest.py +++ b/tests/tck/conftest.py @@ -39,6 +39,7 @@ register_dict = {} register_lock = threading.Lock() + def normalize_outline_scenario(request, name): for group in example_pattern.findall(name): fixval = request.getfixturevalue(group) @@ -167,6 +168,7 @@ def new_space(request, options, session, graph_spaces): graph_spaces["space_desc"] = space_desc graph_spaces["drop_space"] = True + @given(parse("Any graph")) def new_space(request, session, graph_spaces): name = "EmptyGraph_" + space_generator() @@ -182,6 +184,7 @@ def new_space(request, session, graph_spaces): graph_spaces["space_desc"] = space_desc graph_spaces["drop_space"] = True + @given(parse('load "{data}" csv data to a new space')) def import_csv_data(request, data, graph_spaces, session, pytestconfig): data_dir = os.path.join(DATA_DIR, normalize_outline_scenario(request, data)) @@ -221,6 +224,18 @@ def try_to_execute_query(query, graph_spaces, session, request): for stmt in ngql.split(';'): exec_query(request, stmt, session, graph_spaces, True) +@when(parse("clone a new space according to current space")) +def clone_space(graph_spaces, session, request): + space_desc = graph_spaces["space_desc"] + current_space = space_desc._name + new_space = "EmptyGraph_" + space_generator() + space_desc._name = new_space + resp_ok(session, space_desc.drop_stmt(), True) + ngql = "create space " + new_space + " as " + current_space; + exec_query(request, ngql, session, graph_spaces) + resp_ok(session, space_desc.use_stmt(), True) + graph_spaces["space_desc"] = space_desc + graph_spaces["drop_space"] = True @given("wait all indexes ready") @when("wait all indexes ready") @@ -477,6 +492,7 @@ def check_plan(plan, graph_spaces): differ = PlanDiffer(resp.plan_desc(), expect) assert differ.diff(), differ.err_msg() + @when(parse("executing query via graph {index:d}:\n{query}")) def executing_query(query, index, graph_spaces, session_from_first_conn_pool, session_from_second_conn_pool, request): assert index < 2, "There exists only 0,1 graph: {}".format(index) @@ -486,12 +502,14 @@ def executing_query(query, index, graph_spaces, session_from_first_conn_pool, se else: exec_query(request, ngql, session_from_second_conn_pool, graph_spaces) + @then(parse("the result should be, the first {n:d} records in order, and register {column_name} as a list named {key}:\n{result}")) def result_should_be_in_order_and_register_key(n, column_name, key, request, result, graph_spaces): assert n > 0, f"The records number should be an positive integer: {n}" result_ds = cmp_dataset(request, graph_spaces, result, order=True, strict=True, contains=CmpType.CONTAINS, first_n_records=n) register_result_key(request.node.name, result_ds, column_name, key) + def register_result_key(test_name, result_ds, column_name, key): if column_name.encode() not in result_ds.column_names: assert False, f"{column_name} not in result columns {result_ds.column_names}." @@ -501,6 +519,7 @@ def register_result_key(test_name, result_ds, column_name, key): register_dict[test_name + key] = val; register_lock.release() + @when(parse("executing query, fill replace holders with element index of {indices} in {keys}:\n{query}")) def executing_query_with_params(query, indices, keys, graph_spaces, session, request): indices_list=[int(v) for v in indices.split(",")] diff --git a/tests/tck/features/fetch/FetchEmpty.feature b/tests/tck/features/fetch/FetchEmpty.feature index 43a5821d054..3fd183e831b 100644 --- a/tests/tck/features/fetch/FetchEmpty.feature +++ b/tests/tck/features/fetch/FetchEmpty.feature @@ -44,9 +44,8 @@ Feature: Fetch prop on empty tag/edge | ("1":zero_prop_tag_0) | When executing query: """ - GO FROM "1" OVER zero_prop_edge - YIELD zero_prop_edge._dst as id - | FETCH PROP ON zero_prop_tag_0 $-.id + GO FROM "1" OVER zero_prop_edge YIELD zero_prop_edge._dst as id | + FETCH PROP ON zero_prop_tag_0 $-.id """ Then the result should be, in any order, with relax comparison: | vertices_ | @@ -75,9 +74,8 @@ Feature: Fetch prop on empty tag/edge | edges_ | When executing query: """ - GO FROM "1" OVER zero_prop_edge - YIELD zero_prop_edge._src as src, zero_prop_edge._dst as dst - | FETCH PROP ON zero_prop_edge $-.src->$-.dst + GO FROM "1" OVER zero_prop_edge YIELD zero_prop_edge._src as src, zero_prop_edge._dst as dst | + FETCH PROP ON zero_prop_edge $-.src->$-.dst """ Then the result should be, in any order: | edges_ | diff --git a/tests/tck/features/fetch/FetchVertices.intVid.feature b/tests/tck/features/fetch/FetchVertices.intVid.feature index 30e092fcf6d..2ea116fe320 100644 --- a/tests/tck/features/fetch/FetchVertices.intVid.feature +++ b/tests/tck/features/fetch/FetchVertices.intVid.feature @@ -104,7 +104,8 @@ Feature: Fetch Int Vid Vertices Scenario: Fetch from pipe When executing query: """ - GO FROM hash('Boris Diaw') over like YIELD like._dst as id | FETCH PROP ON player $-.id YIELD player.name, player.age + GO FROM hash('Boris Diaw') over like YIELD like._dst as id | + FETCH PROP ON player $-.id YIELD player.name, player.age """ Then the result should be, in any order, and the columns 0 should be hashed: | VertexID | player.name | player.age | @@ -113,22 +114,24 @@ Feature: Fetch Int Vid Vertices # empty input When executing query: """ - GO FROM hash('NON EXIST VERTEX ID') over like YIELD like._dst as id | FETCH PROP ON player $-.id yield player.name + GO FROM hash('NON EXIST VERTEX ID') over like YIELD like._dst as id | + FETCH PROP ON player $-.id yield player.name """ Then the result should be, in any order: | VertexID | player.name | When executing query: """ - GO FROM hash('NON EXIST VERTEX ID') over serve YIELD serve._dst as id, serve.start_year as start - | YIELD $-.id as id WHERE $-.start > 20000 | FETCH PROP ON player $-.id yield player.name + GO FROM hash('NON EXIST VERTEX ID') over serve YIELD serve._dst as id, serve.start_year as start | + YIELD $-.id as id WHERE $-.start > 20000 | + FETCH PROP ON player $-.id yield player.name """ Then the result should be, in any order: | VertexID | player.name | # Fetch prop on multi tags of vertices from pipe When executing query: """ - GO FROM hash("Boris Diaw") over like YIELD like._dst as id - | FETCH PROP ON player, team, bachelor $-.id YIELD player.name, player.age, team.name, bachelor.name, bachelor.speciality + GO FROM hash("Boris Diaw") over like YIELD like._dst as id | + FETCH PROP ON player, team, bachelor $-.id YIELD player.name, player.age, team.name, bachelor.name, bachelor.speciality """ Then the result should be, in any order, and the columns 0 should be hashed: | VertexID | player.name | player.age | team.name | bachelor.name | bachelor.speciality | @@ -136,8 +139,8 @@ Feature: Fetch Int Vid Vertices | "Tony Parker" | "Tony Parker" | 36 | EMPTY | EMPTY | EMPTY | When executing query: """ - GO FROM hash('Boris Diaw') over like YIELD like._dst as id - | FETCH PROP ON player, bachelor $-.id YIELD player.name, player.age, bachelor.name, bachelor.speciality + GO FROM hash('Boris Diaw') over like YIELD like._dst as id | + FETCH PROP ON player, bachelor $-.id YIELD player.name, player.age, bachelor.name, bachelor.speciality """ Then the result should be, in any order, and the columns 0 should be hashed: | VertexID | player.name | player.age | bachelor.name | bachelor.speciality | @@ -266,8 +269,8 @@ Feature: Fetch Int Vid Vertices Scenario: Fetch vertices and then GO When executing query: """ - FETCH PROP ON player hash('Tony Parker') YIELD player.name as Name - | GO FROM $-.VertexID OVER like + FETCH PROP ON player hash('Tony Parker') YIELD player.name as Name | + GO FROM $-.VertexID OVER like """ Then the result should be, in any order, and the columns 0 should be hashed: | like._dst | @@ -276,18 +279,33 @@ Feature: Fetch Int Vid Vertices | "Tim Duncan" | Scenario: Typical errors + When executing query: + """ + FETCH PROP ON player hash('Boris Diaw') YIELD vertex + """ + Then a SyntaxError should be raised at runtime: please add alias when using vertex. near `vertex' + When executing query: + """ + FETCH PROP ON player hash('Boris Diaw') YIELD edge as a + """ + Then a SemanticError should be raised at runtime: illegal yield clauses `EDGE AS a' + When executing query: + """ + FETCH PROP ON player hash('Boris Diaw') YIELD src(edge) + """ + Then a SemanticError should be raised at runtime: illegal yield clauses `src(EDGE)' # not support get src property When executing query: """ FETCH PROP ON player hash('Boris Diaw') YIELD $^.player.name, player.age """ - Then a SemanticError should be raised at runtime: Unsupported src/dst property expression in yield. + Then a SemanticError should be raised at runtime: unsupported src/dst property expression in yield. # not support get dst property When executing query: """ FETCH PROP ON player hash('Boris Diaw') YIELD $$.player.name, player.age """ - Then a SemanticError should be raised at runtime: Unsupported src/dst property expression in yield. + Then a SemanticError should be raised at runtime: unsupported src/dst property expression in yield. # yields not existing tag When executing query: """ @@ -331,3 +349,56 @@ Feature: Fetch Int Vid Vertices GO FROM hash('NON EXIST VERTEX ID') OVER serve | FETCH PROP ON team $- """ Then a SyntaxError should be raised at runtime: + + Scenario: format yield + When executing query: + """ + FETCH PROP ON * hash('Boris Diaw') YIELD id(vertex) + """ + Then the result should be, in any order, and the columns 0, 1 should be hashed: + | VertexID | id(VERTEX) | + | "Boris Diaw" | "Boris Diaw" | + When executing query: + """ + FETCH PROP ON * hash('Boris Diaw') YIELD id(vertex), player.age + """ + Then the result should be, in any order, and the columns 0, 1 should be hashed: + | VertexID | id(VERTEX) | player.age | + | "Boris Diaw" | "Boris Diaw" | 36 | + When executing query: + """ + FETCH PROP ON * hash('Boris Diaw') YIELD id(vertex), player.age, vertex as node + """ + Then the result should be, in any order, and the columns 0, 1 should be hashed: + | VertexID | id(VERTEX) | player.age | node | + | "Boris Diaw" | "Boris Diaw" | 36 | ("Boris Diaw":player{name:"Boris Diaw", age:36}) | + When executing query: + """ + FETCH PROP ON * hash('Boris Diaw') YIELD vertex as node + """ + Then the result should be, in any order, and the columns 0 should be hashed: + | VertexID | node | + | "Boris Diaw" | ("Boris Diaw":player{name:"Boris Diaw", age:36}) | + When executing query: + """ + FETCH PROP ON * hash("Tim Duncan") YIELD player.name, player.age, team.name, bachelor.name, bachelor.speciality, vertex as node + """ + Then the result should be, in any order, and the columns 0 should be hashed: + | VertexID | player.name | player.age | team.name | bachelor.name | bachelor.speciality | node | + | "Tim Duncan" | "Tim Duncan" | 42 | EMPTY | "Tim Duncan" | "psychology" | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | + When executing query: + """ + GO FROM hash("Tim Duncan") OVER like YIELD like._dst as id | + FETCH PROP ON * $-.id YIELD VERTEX as node + """ + Then the result should be, in any order, and the columns 0 should be hashed: + | VertexID | node | + | "Manu Ginobili" | ("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"}) | + | "Tony Parker" | ("Tony Parker" :player{age: 36, name: "Tony Parker"}) | + When executing query: + """ + FETCH PROP ON * hash('NON EXIST VERTEX ID'), hash('Boris Diaw') yield player.name, id(vertex) + """ + Then the result should be, in any order, and the columns 0, 2 should be hashed: + | VertexID | player.name | id(VERTEX) | + | "Boris Diaw" | "Boris Diaw" | "Boris Diaw" | diff --git a/tests/tck/features/fetch/FetchVertices.strVid.feature b/tests/tck/features/fetch/FetchVertices.strVid.feature index 4dee9e65a1d..8758c20e853 100644 --- a/tests/tck/features/fetch/FetchVertices.strVid.feature +++ b/tests/tck/features/fetch/FetchVertices.strVid.feature @@ -213,8 +213,8 @@ Feature: Fetch String Vertices | "Tim Duncan" | "Tim Duncan" | 42 | When executing query: """ - GO FROM 'Boris Diaw' over like YIELD like._dst as id - | FETCH PROP ON player, bachelor $-.id YIELD player.name, player.age, bachelor.name, bachelor.speciality + GO FROM 'Boris Diaw' over like YIELD like._dst as id | + FETCH PROP ON player, bachelor $-.id YIELD player.name, player.age, bachelor.name, bachelor.speciality """ Then the result should be, in any order: | VertexID | player.name | player.age | bachelor.name | bachelor.speciality | @@ -223,8 +223,8 @@ Feature: Fetch String Vertices # Fetch prop on multi tags of vertices from pipe When executing query: """ - GO FROM "Boris Diaw" over like YIELD like._dst as id - | FETCH PROP ON player, team, bachelor $-.id YIELD player.name, player.age, team.name, bachelor.name, bachelor.speciality + GO FROM "Boris Diaw" over like YIELD like._dst as id | + FETCH PROP ON player, team, bachelor $-.id YIELD player.name, player.age, team.name, bachelor.name, bachelor.speciality """ Then the result should be, in any order: | VertexID | player.name | player.age | team.name | bachelor.name | bachelor.speciality | @@ -232,8 +232,8 @@ Feature: Fetch String Vertices | "Tony Parker" | "Tony Parker" | 36 | EMPTY | EMPTY | EMPTY | When executing query: """ - GO FROM 'Boris Diaw' over like YIELD like._dst as id - | FETCH PROP ON player, bachelor $-.id YIELD player.name, player.age, bachelor.name, bachelor.speciality + GO FROM 'Boris Diaw' over like YIELD like._dst as id | + FETCH PROP ON player, bachelor $-.id YIELD player.name, player.age, bachelor.name, bachelor.speciality """ Then the result should be, in any order: | VertexID | player.name | player.age | bachelor.name | bachelor.speciality | @@ -248,8 +248,9 @@ Feature: Fetch String Vertices | VertexID | player.name | When executing query: """ - GO FROM 'NON EXIST VERTEX ID' over serve YIELD serve._dst as id, serve.start_year as start - | YIELD $-.id as id WHERE $-.start > 20000 | FETCH PROP ON player $-.id yield player.name + GO FROM 'NON EXIST VERTEX ID' over serve YIELD serve._dst as id, serve.start_year as start | + YIELD $-.id as id WHERE $-.start > 20000 | + FETCH PROP ON player $-.id yield player.name """ Then the result should be, in any order: | VertexID | player.name | @@ -263,8 +264,8 @@ Feature: Fetch String Vertices | "Tim Duncan" | "Tim Duncan" | 42 | "Tim Duncan" | "psychology" | When executing query: """ - GO FROM "Boris Diaw" over like YIELD like._dst as id - | FETCH PROP ON * $-.id YIELD player.name, player.age, team.name, bachelor.name, bachelor.speciality + GO FROM "Boris Diaw" over like YIELD like._dst as id | + FETCH PROP ON * $-.id YIELD player.name, player.age, team.name, bachelor.name, bachelor.speciality """ Then the result should be, in any order: | VertexID | player.name | player.age | team.name | bachelor.name | bachelor.speciality | @@ -355,8 +356,8 @@ Feature: Fetch String Vertices | "Tracy McGrady" | When executing query: """ - FETCH PROP ON player 'Tony Parker' YIELD player.name as Name - | GO FROM $-.Name OVER like + FETCH PROP ON player 'Tony Parker' YIELD player.name as Name | + GO FROM $-.Name OVER like """ Then the result should be, in any order: | like._dst | @@ -365,8 +366,8 @@ Feature: Fetch String Vertices | "Tim Duncan" | When executing query: """ - FETCH PROP ON player 'Tony Parker' YIELD player.name as Name - | GO FROM $-.VertexID OVER like + FETCH PROP ON player 'Tony Parker' YIELD player.name as Name | + GO FROM $-.VertexID OVER like """ Then the result should be, in any order: | like._dst | @@ -375,6 +376,21 @@ Feature: Fetch String Vertices | "Tim Duncan" | Scenario: Typical errors + When executing query: + """ + FETCH PROP ON player 'Boris Diaw' YIELD vertex + """ + Then a SyntaxError should be raised at runtime: please add alias when using vertex. near `vertex' + When executing query: + """ + FETCH PROP ON player 'Boris Diaw' YIELD edge as a + """ + Then a SemanticError should be raised at runtime: illegal yield clauses `EDGE AS a' + When executing query: + """ + FETCH PROP ON player 'Boris Diaw' YIELD src(edge) + """ + Then a SemanticError should be raised at runtime: illegal yield clauses `src(EDGE)' # Fetch Vertices not support get src property When executing query: """ @@ -444,3 +460,56 @@ Feature: Fetch String Vertices GO FROM 'NON EXIST VERTEX ID' OVER serve | FETCH PROP ON team $- """ Then a SyntaxError should be raised at runtime: + + Scenario: format yield + When executing query: + """ + FETCH PROP ON * 'Boris Diaw' YIELD id(vertex) + """ + Then the result should be, in any order: + | VertexID | id(VERTEX) | + | "Boris Diaw" | "Boris Diaw" | + When executing query: + """ + FETCH PROP ON * 'Boris Diaw' YIELD id(vertex), player.age + """ + Then the result should be, in any order: + | VertexID | id(VERTEX) | player.age | + | "Boris Diaw" | "Boris Diaw" | 36 | + When executing query: + """ + FETCH PROP ON * 'Boris Diaw' YIELD id(vertex), player.age, vertex as node + """ + Then the result should be, in any order: + | VertexID | id(VERTEX) | player.age | node | + | "Boris Diaw" | "Boris Diaw" | 36 | ("Boris Diaw":player{name:"Boris Diaw", age:36}) | + When executing query: + """ + FETCH PROP ON * 'Boris Diaw' YIELD vertex as node + """ + Then the result should be, in any order: + | VertexID | node | + | "Boris Diaw" | ("Boris Diaw":player{name:"Boris Diaw", age:36}) | + When executing query: + """ + FETCH PROP ON * "Tim Duncan" YIELD player.name, player.age, team.name, bachelor.name, bachelor.speciality, vertex as node + """ + Then the result should be, in any order: + | VertexID | player.name | player.age | team.name | bachelor.name | bachelor.speciality | node | + | "Tim Duncan" | "Tim Duncan" | 42 | EMPTY | "Tim Duncan" | "psychology" | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | + When executing query: + """ + GO FROM "Tim Duncan" OVER like YIELD like._dst as id | + FETCH PROP ON * $-.id YIELD vertex as node + """ + Then the result should be, in any order: + | VertexID | node | + | "Manu Ginobili" | ("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"}) | + | "Tony Parker" | ("Tony Parker" :player{age: 36, name: "Tony Parker"}) | + When executing query: + """ + FETCH PROP ON * 'NON EXIST VERTEX ID', 'Boris Diaw' yield player.name, id(vertex) + """ + Then the result should be, in any order: + | VertexID | player.name | id(VERTEX) | + | "Boris Diaw" | "Boris Diaw" | "Boris Diaw" | diff --git a/tests/tck/features/go/Orderby.feature b/tests/tck/features/go/Orderby.feature index dee1730e2b8..989407486a6 100644 --- a/tests/tck/features/go/Orderby.feature +++ b/tests/tck/features/go/Orderby.feature @@ -38,9 +38,9 @@ Feature: Orderby Sentence | name | start | team | When executing query: """ - GO FROM "Marco Belinelli" OVER serve YIELD $^.player.name as name, serve.start_year as start, $$.team.name as team - | YIELD $-.name as name WHERE $-.start > 20000 - | ORDER BY $-.name + GO FROM "Marco Belinelli" OVER serve YIELD $^.player.name as name, serve.start_year as start, $$.team.name as team | + YIELD $-.name as name WHERE $-.start > 20000 | + ORDER BY $-.name """ Then the result should be, in order, with relax comparison: | name | diff --git a/tests/tck/features/insert/InsertEdgeOnDiffParts.feature b/tests/tck/features/insert/InsertEdgeOnDiffParts.feature new file mode 100644 index 00000000000..08c2d63ef79 --- /dev/null +++ b/tests/tck/features/insert/InsertEdgeOnDiffParts.feature @@ -0,0 +1,40 @@ +# Copyright (c) 2021 vesoft inc. All rights reserved. +# +# This source code is licensed under Apache 2.0 License, +# attached with Common Clause Condition 1.0, found in the LICENSES directory. +Feature: Insert vertex and edge with if not exists + + Scenario: insert edge with default value + Given an empty graph + And create a space with following options: + | partition_num | 9 | + | replica_factor | 1 | + | vid_type | FIXED_STRING(20) | + And having executed: + """ + CREATE TAG IF NOT EXISTS V(); + CREATE EDGE IF NOT EXISTS E(rank timestamp default timestamp()); + """ + When try to execute query: + """ + INSERT VERTEX V() VALUES "v1":() + """ + Then the execution should be successful + When try to execute query: + """ + INSERT VERTEX V() VALUES "v2":() + """ + Then the execution should be successful + When try to execute query: + """ + INSERT EDGE E() VALUES "v1"->"v2":() + """ + Then the execution should be successful + When executing query: + """ + (GO FROM "v1" over E yield E.rank union GO FROM "v2" over E REVERSELY yield E.rank) | yield count(*) AS count + """ + Then the result should be, in any order: + | count | + | 1 | + And drop the used space diff --git a/tests/tck/features/mutate/InsertWithTimeType.feature b/tests/tck/features/mutate/InsertWithTimeType.feature index 6dde75b6774..23bdc90b4b5 100644 --- a/tests/tck/features/mutate/InsertWithTimeType.feature +++ b/tests/tck/features/mutate/InsertWithTimeType.feature @@ -111,9 +111,9 @@ Feature: Insert with time-dependent types """ UPDATE VERTEX "test" SET - tag_date.f_date = Date("2018-03-04"), - tag_date.f_time = Time("22:01:00"), - tag_date.f_datetime = DateTime("2018-03-04T22:30:40") + tag_date.f_date = Date({year: 2018, month: 3, day: 4}), + tag_date.f_time = Time({hour: 22, minute: 1, second: 0, millisecond: 0, microsecond: 0}), + tag_date.f_datetime = DateTime({year: 2018, month: 3, day: 4, hour: 22, minute: 30, second: 40, millisecond: 0, microsecond: 0}) YIELD f_date, f_time, f_datetime; """ Then the result should be, in any order: diff --git a/tests/tck/features/schema/CreateSpaceAs.feature b/tests/tck/features/schema/CreateSpaceAs.feature new file mode 100644 index 00000000000..ac223c09edc --- /dev/null +++ b/tests/tck/features/schema/CreateSpaceAs.feature @@ -0,0 +1,160 @@ +# Copyright (c) 2021 vesoft inc. All rights reserved. +# +# This source code is licensed under Apache 2.0 License, +# attached with Common Clause Condition 1.0, found in the LICENSES directory. +Feature: Create space as another space + + Scenario: clone space + # Space + Given an empty graph + And create a space with following options: + | partition_num | 9 | + | replica_factor | 1 | + | vid_type | FIXED_STRING(20) | + # Schema + When executing query: + """ + create tag t1 (col1 int); + """ + Then the execution should be successful + When executing query: + """ + create tag index i1 on t1(col1); + """ + Then the execution should be successful + When executing query: + """ + create edge e1 (col1 int); + """ + Then the execution should be successful + When executing query: + """ + create edge index i2 on e1(col1); + """ + Then the execution should be successful + And wait 3 seconds + # insert + When executing query: + """ + insert vertex t1(col1) values "1": (1) + """ + Then the execution should be successful + When executing query: + """ + insert edge e1(col1) VALUES "1" -> "2":(1); + """ + Then the execution should be successful + # query + When executing query: + """ + fetch prop on t1 "1"; + """ + Then the result should be, in any order: + | vertices_ | + | ("1" :t1{col1: 1}) | + When executing query: + """ + lookup on t1 where t1.col1 == 1; + """ + Then the result should be, in any order: + | VertexID | + | "1" | + When executing query: + """ + fetch prop on e1 "1" -> "2"; + """ + Then the result should be, in any order: + | edges_ | + | [:e1 "1"->"2" @0 {col1: 1}] | + When executing query: + """ + lookup on e1 where e1.col1 == 1; + """ + Then the result should be, in any order: + | SrcVID | DstVID | Ranking | + | "1" | "2" | 0 | + # clone space + When clone a new space according to current space + Then the execution should be successful + # check schema is really cloned + When executing query: + """ + show tags; + """ + Then the result should be, in any order: + | Name | + | "t1" | + When executing query: + """ + show edges; + """ + Then the result should be, in any order: + | Name | + | "e1" | + When executing query: + """ + show tag indexes; + """ + Then the result should be, in any order: + | Index Name | By Tag | Columns | + | "i1" | "t1" | ["col1"] | + When executing query: + """ + show edge indexes; + """ + Then the result should be, in any order: + | Index Name | By Edge | Columns | + | "i2" | "e1" | ["col1"] | + # check no data in new space + When executing query: + """ + fetch prop on t1 "1"; + """ + Then the result should be, in any order: + | vertices_ | + When executing query: + """ + fetch prop on e1 "1" -> "2"; + """ + Then the result should be, in any order: + | edges_ | + # write new data into cloned space + When executing query: + """ + insert vertex t1(col1) values "1": (2) + """ + Then the execution should be successful + When executing query: + """ + insert edge e1(col1) VALUES "1" -> "2":(2); + """ + # query + When executing query: + """ + fetch prop on t1 "1"; + """ + Then the result should be, in any order: + | vertices_ | + | ("1" :t1{col1: 2}) | + When executing query: + """ + lookup on t1 where t1.col1 == 2; + """ + Then the result should be, in any order: + | VertexID | + | "1" | + When executing query: + """ + fetch prop on e1 "1" -> "2"; + """ + Then the result should be, in any order: + | edges_ | + | [:e1 "1"->"2" @0 {col1: 2}] | + When executing query: + """ + lookup on e1 where e1.col1 == 2; + """ + Then the result should be, in any order: + | SrcVID | DstVID | Ranking | + | "1" | "2" | 0 | + Then drop the used space diff --git a/tests/tck/features/schema/Schema.feature b/tests/tck/features/schema/Schema.feature index 46cb9e011de..4678480aec8 100644 --- a/tests/tck/features/schema/Schema.feature +++ b/tests/tck/features/schema/Schema.feature @@ -577,7 +577,7 @@ Feature: Insert string vid of vertex and edge """ INSERT EDGE e() VALUES "1"->"2":() """ - Then a ExecutionError should be raised at runtime: Storage Error: The not null field doesn't have a default value. + Then a SemanticError should be raised at runtime: The property `description' is not nullable and has no default value. # test alter edge with timestamp default When executing query: """ diff --git a/tests/tck/job/Job.feature b/tests/tck/job/Job.feature index fb129036420..bace1f4a71a 100644 --- a/tests/tck/job/Job.feature +++ b/tests/tck/job/Job.feature @@ -32,8 +32,30 @@ Feature: Submit job space requirements """ Then a SemanticError should be raised at runtime: - Scenario: Not existed job + Scenario: Operate job require space: Given an empty graph + When executing query: + """ + SHOW JOB 123456; + """ + Then a SemanticError should be raised at runtime: Space was not chosen. + When executing query: + """ + STOP JOB 123456; + """ + Then a SemanticError should be raised at runtime: Space was not chosen. + When executing query: + """ + RECOVER JOB; + """ + Then a SemanticError should be raised at runtime: Space was not chosen. + + Scenario: Not exist job + Given create a space with following options: + | partition_num | 9 | + | replica_factor | 1 | + | vid_type | FIXED_STRING(20) | + And wait 6 seconds When executing query: """ SHOW JOB 123456; @@ -98,6 +120,66 @@ Feature: Submit job space requirements """ Then an ExecutionError should be raised at runtime: Save job failure! + Scenario: Submit and show jobs in other space + Given create a space with following options: + | partition_num | 9 | + | replica_factor | 1 | + | vid_type | FIXED_STRING(20) | + And wait 6 seconds + When executing query: + """ + SUBMIT JOB COMPACT; + """ + Then the result should be, in any order: + | New Job Id | + | /\d+/ | + And wait 1 seconds + When executing query: + """ + SUBMIT JOB FLUSH; + """ + Then the result should be, in any order: + | New Job Id | + | /\d+/ | + And wait 1 seconds + When executing query: + """ + SUBMIT JOB STATS; + """ + Then the result should be, in any order: + | New Job Id | + | /\d+/ | + And wait 10 seconds + When executing query: + """ + SHOW JOBS; + """ + Then the result should be, the first 3 records in order, and register Job Id as a list named job_id: + | Job Id | Command | Status | Start Time | Stop Time | + | /\d+/ | "STATS" | "FINISHED" | /\w+/ | /\w+/ | + | /\d+/ | "FLUSH" | "FINISHED" | /\w+/ | /\w+/ | + | /\d+/ | "COMPACT" | "FINISHED" | /\w+/ | /\w+/ | + Given create a space with following options: + | partition_num | 9 | + | replica_factor | 1 | + | vid_type | FIXED_STRING(20) | + When executing query: + """ + SHOW JOBS; + """ + Then the result should be, in order: + | Job Id | Command | Status | Start Time | Stop Time | + When executing query, fill replace holders with element index of 0 in job_id: + """ + SHOW JOB {}; + """ + Then an ExecutionError should be raised at runtime:Job not in chosen space! + When executing query, fill replace holders with element index of 0 in job_id: + """ + STOP JOB {}; + """ + Then an ExecutionError should be raised at runtime:Job not in chosen space! + # This is skipped becuase it is hard to simulate the situation # When executing query: # """