From 3215984d318ee5533c94431a4b880d80fdeeb858 Mon Sep 17 00:00:00 2001 From: "hs.zhang" <22708345+cangfengzhs@users.noreply.github.com> Date: Sun, 27 Nov 2022 08:21:38 +0800 Subject: [PATCH 1/7] Ci listener (#4912) * start elasticsearch container in CI * setup listener in CI * adjust Co-authored-by: Sophie <84560950+Sophie-Xie@users.noreply.github.com> --- .github/workflows/nightly.yml | 12 ++++ .github/workflows/pull_request.yml | 12 ++++ conf/nebula-storaged-listener.conf.default | 58 ++++++++++++++++++ tests/README.md | 6 +- tests/common/nebula_service.py | 61 +++++++++++++------ tests/nebula-test-run.py | 1 + tests/tck/cluster/Example.feature | 6 +- tests/tck/cluster/terminate.feature | 2 +- tests/tck/conftest.py | 22 +++++-- .../tck/features/admin/Authentication.feature | 2 +- tests/tck/features/admin/Sessions.feature | 2 +- 11 files changed, 151 insertions(+), 33 deletions(-) create mode 100644 conf/nebula-storaged-listener.conf.default diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 63dd745aff7..4c02c578a8f 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -220,6 +220,18 @@ jobs: OSS_DIR: nebula-graph/package/nightly container: image: vesoft/nebula-dev:${{ matrix.os }} + services: + elasticsearch: + image: elasticsearch:7.17.7 + ports: + - 9200:9200 + env: + discovery.type: single-node + options: >- + --health-cmd "curl elasticsearch:9200" + --health-interval 10s + --health-timeout 5s + --health-retries 5 steps: - uses: webiny/action-post-run@2.0.1 with: diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index bbfa49a996b..97cf436339e 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -72,6 +72,18 @@ jobs: volumes: - /tmp/ccache/nebula/${{ matrix.os }}-${{ matrix.compiler }}:/tmp/ccache/nebula/${{ matrix.os }}-${{ matrix.compiler }} options: --cap-add=SYS_PTRACE + services: + elasticsearch: + image: elasticsearch:7.17.7 + ports: + - 9200:9200 + env: + discovery.type: single-node + options: >- + --health-cmd "curl elasticsearch:9200" + --health-interval 10s + --health-timeout 5s + --health-retries 5 steps: - uses: webiny/action-post-run@2.0.1 with: diff --git a/conf/nebula-storaged-listener.conf.default b/conf/nebula-storaged-listener.conf.default new file mode 100644 index 00000000000..2eb26d95c99 --- /dev/null +++ b/conf/nebula-storaged-listener.conf.default @@ -0,0 +1,58 @@ +########## nebula-storaged-listener ########### +########## basics ########## +# Whether to run as a daemon process +--daemonize=true +# The file to host the process id +--pid_file=pids_listener/nebula-storaged.pid +# Whether to use the configuration obtained from the configuration file +--local_config=true + +########## logging ########## +# The directory to host logging files +--log_dir=logs_listener +# Log level, 0, 1, 2, 3 for INFO, WARNING, ERROR, FATAL respectively +--minloglevel=0 +# Verbose log level, 1, 2, 3, 4, the higher of the level, the more verbose of the logging +--v=0 +# Maximum seconds to buffer the log messages +--logbufsecs=0 +# Whether to redirect stdout and stderr to separate output files +--redirect_stdout=true +# Destination filename of stdout and stderr, which will also reside in log_dir. +--stdout_log_file=storaged-stdout.log +--stderr_log_file=storaged-stderr.log +# Copy log messages at or above this level to stderr in addition to logfiles. The numbers of severity levels INFO, WARNING, ERROR, and FATAL are 0, 1, 2, and 3, respectively. +--stderrthreshold=2 +# Wether logging files' name contain timestamp. +--timestamp_in_logfile_name=true + +########## networking ########## +# Meta server address +--meta_server_addrs=127.0.0.1:9559 +# Local ip +--local_ip=127.0.0.1 +# Storage daemon listening port +--port=9789 +# HTTP service ip +--ws_ip=127.0.0.1 +# HTTP service port +--ws_http_port=19789 +# heartbeat with meta service +--heartbeat_interval_secs=10 + +########## storage ########## +# Listener wal directory. only one path is allowed. +--listener_path=data/listener +# This parameter can be ignored for compatibility. let's fill A default value of "data" +--data_path=data +# The type of part manager, [memory | meta] +--part_man_type=memory +# The default reserved bytes for one batch operation +--rocksdb_batch_size=4096 +# The default block cache size used in BlockBasedTable. +# The unit is MB. +--rocksdb_block_cache=4 +# The type of storage engine, `rocksdb', `memory', etc. +--engine_type=rocksdb +# The type of part, `simple', `consensus'... +--part_type=simple diff --git a/tests/README.md b/tests/README.md index 7dfb484b761..2055d39c317 100644 --- a/tests/README.md +++ b/tests/README.md @@ -175,7 +175,7 @@ e.g. ```gherkin Feature: Nebula service termination test Scenario: Basic termination test - Given a nebulacluster with 1 graphd and 1 metad and 1 storaged + Given a nebulacluster with 1 graphd and 1 metad and 1 storaged and 0 listener When the cluster was terminated Then no service should still running after 4s ``` @@ -183,7 +183,7 @@ Feature: Nebula service termination test ```gherkin Feature: Example Scenario: test with disable authorize - Given a nebulacluster with 1 graphd and 1 metad and 1 storaged: + Given a nebulacluster with 1 graphd and 1 metad and 1 storaged and 0 listener: """ graphd:enable_authorize=false """ @@ -201,7 +201,7 @@ Feature: Example Then the execution should be successful Scenario: test with enable authorize - Given a nebulacluster with 1 graphd and 1 metad and 1 storaged: + Given a nebulacluster with 1 graphd and 1 metad and 1 storaged and 0 listener: """ graphd:enable_authorize=true """ diff --git a/tests/common/nebula_service.py b/tests/common/nebula_service.py index 457973f5f4b..8b9f1681c51 100644 --- a/tests/common/nebula_service.py +++ b/tests/common/nebula_service.py @@ -41,6 +41,12 @@ def __init__(self, name, ports, suffix_index=0, params=None, is_standalone=False self.tcp_port, self.tcp_internal_port, self.http_port, self.https_port = ports[0:4] self.meta_port, self.meta_tcp_internal_port, self.meta_http_port, self.meta_https_port = ports[4:8] self.storage_port, self.storage_tcp_internal_port, self.storage_http_port, self.storage_https_port = ports[8:12] + if name == "listener": + self.binary_name = "storaged" + self.conf_name = "storaged-listener" + else: + self.binary_name = name + self.conf_name = name self.suffix_index = suffix_index self.params = params self.host = '127.0.0.1' @@ -56,14 +62,14 @@ def _format_nebula_command(self): if self.is_sa == False: process_params = { 'log_dir': 'logs{}'.format(self.suffix_index), - 'pid_file': 'pids{}/nebula-{}.pid'.format(self.suffix_index, self.name), + 'pid_file': 'pids{}/nebula-{}.pid'.format(self.suffix_index, self.binary_name), 'port': self.tcp_port, 'ws_http_port': self.http_port, } else: process_params = { 'log_dir': 'logs{}'.format(self.suffix_index), - 'pid_file': 'pids{}/nebula-{}.pid'.format(self.suffix_index, self.name), + 'pid_file': 'pids{}/nebula-{}.pid'.format(self.suffix_index, self.binary_name), 'port': self.tcp_port, 'ws_http_port': self.http_port, 'meta_port': self.meta_port, @@ -72,16 +78,16 @@ def _format_nebula_command(self): 'ws_storage_http_port': self.storage_http_port, } # data path - if self.name.upper() != 'GRAPHD': + if self.binary_name.upper() != 'GRAPHD': process_params['data_path'] = 'data{}/{}'.format( - self.suffix_index, self.name + self.suffix_index, self.binary_name ) process_params.update(self.params) cmd = [ - 'bin/nebula-{}'.format(self.name), + 'bin/nebula-{}'.format(self.binary_name), '--flagfile', - 'conf/nebula-{}.conf'.format(self.name), + 'conf/nebula-{}.conf'.format(self.conf_name), ] + ['--{}={}'.format(key, value) for key, value in process_params.items()] return " ".join(cmd) @@ -126,13 +132,14 @@ def __init__( metad_num=1, storaged_num=1, graphd_num=1, + listener_num=1, ca_signed=False, debug_log=True, use_standalone=False, query_concurrently=False, **kwargs, ): - assert graphd_num > 0 and metad_num > 0 and storaged_num > 0 + assert graphd_num > 0 and metad_num > 0 and storaged_num > 0 and listener_num >= 0 self.build_dir = str(build_dir) self.src_dir = str(src_dir) self.work_dir = os.path.join( @@ -140,21 +147,24 @@ def __init__( 'server_' + time.strftime('%Y-%m-%dT%H-%M-%S', time.localtime()), ) self.pids = {} - self.metad_num, self.storaged_num, self.graphd_num = ( + self.metad_num, self.storaged_num, self.graphd_num, self.listener_num = ( metad_num, storaged_num, graphd_num, + listener_num, ) - self.metad_processes, self.storaged_processes, self.graphd_processes = ( + self.metad_processes, self.storaged_processes, self.graphd_processes, self.listener_processes = ( + [], [], [], [], ) self.all_processes = [] self.all_ports = [] - self.metad_param, self.storaged_param, self.graphd_param = {}, {}, {} + self.metad_param, self.storaged_param, self.graphd_param, self.listener_param = {}, {}, {}, {} self.storaged_port = 0 self.graphd_port = 0 + self.listener_port = 0 self.ca_signed = ca_signed self.is_graph_ssl = ( kwargs.get("enable_graph_ssl", "false").upper() == "TRUE" @@ -175,7 +185,6 @@ def __init__( self._make_sa_params(**kwargs) self.init_standalone() - def init_standalone(self): process_count = self.metad_num + self.storaged_num + self.graphd_num ports_count = process_count * self.ports_per_process @@ -184,7 +193,7 @@ def init_standalone(self): index = 0 standalone = NebulaProcess( "standalone", - self.all_ports[index : index + ports_count ], + self.all_ports[index: index + ports_count], index, self.graphd_param, is_standalone=True @@ -205,7 +214,7 @@ def init_standalone(self): p.update_meta_server_addrs(meta_server_addrs) def init_process(self): - process_count = self.metad_num + self.storaged_num + self.graphd_num + process_count = self.metad_num + self.storaged_num + self.graphd_num + self.listener_num ports_count = process_count * self.ports_per_process self.all_ports = self._find_free_port(ports_count) index = 0 @@ -213,7 +222,7 @@ def init_process(self): for suffix_index in range(self.metad_num): metad = NebulaProcess( "metad", - self.all_ports[index : index + self.ports_per_process], + self.all_ports[index: index + self.ports_per_process], suffix_index, self.metad_param, ) @@ -223,7 +232,7 @@ def init_process(self): for suffix_index in range(self.storaged_num): storaged = NebulaProcess( "storaged", - self.all_ports[index : index + self.ports_per_process], + self.all_ports[index: index + self.ports_per_process], suffix_index, self.storaged_param, ) @@ -235,7 +244,7 @@ def init_process(self): for suffix_index in range(self.graphd_num): graphd = NebulaProcess( "graphd", - self.all_ports[index : index + self.ports_per_process], + self.all_ports[index: index + self.ports_per_process], suffix_index, self.graphd_param, ) @@ -244,8 +253,20 @@ def init_process(self): if suffix_index == 0: self.graphd_port = self.all_ports[0] + for suffix_index in range(self.storaged_num, self.storaged_num+self.listener_num): + listener = NebulaProcess( + "listener", + self.all_ports[index: index + self.ports_per_process], + suffix_index, + self.listener_param + ) + self.listener_processes.append(listener) + index += self.ports_per_process + if suffix_index == 0: + self.listener_port = self.all_ports[0] + self.all_processes = ( - self.metad_processes + self.storaged_processes + self.graphd_processes + self.metad_processes + self.storaged_processes + self.graphd_processes + self.listener_processes ) # update meta address meta_server_addrs = ','.join( @@ -301,11 +322,14 @@ def _make_params(self, **kwargs): self.storaged_param['raft_heartbeat_interval_secs'] = '30' self.storaged_param['skip_wait_in_rate_limiter'] = 'true' + # params for listener only + self.listener_param = copy.copy(self.storaged_param) + # params for meta only self.metad_param = copy.copy(_params) self.metad_param["default_parts_num"] = 1 - for p in [self.metad_param, self.storaged_param, self.graphd_param]: + for p in [self.metad_param, self.storaged_param, self.graphd_param, self.listener_param]: p.update(kwargs) def _make_sa_params(self, **kwargs): @@ -358,6 +382,7 @@ def _copy_nebula_conf(self): conf_path + '{}.conf.default'.format(item), self.work_dir + '/conf/{}.conf'.format(item), ) + shutil.copy(conf_path+'nebula-storaged-listener.conf.default', self.work_dir+'/conf/nebula-storaged-listener.conf') resources_dir = self.work_dir + '/share/resources/' os.makedirs(resources_dir) diff --git a/tests/nebula-test-run.py b/tests/nebula-test-run.py index d1d9dd2854c..f27850e5351 100755 --- a/tests/nebula-test-run.py +++ b/tests/nebula-test-run.py @@ -241,6 +241,7 @@ def stop_nebula(nb, configs=None): NEBULA_HOME, graphd_num=graphd_inst, storaged_num=1, + listener_num=1, debug_log=opt_is(configs.debug, "true"), ca_signed=opt_is(configs.ca_signed, "true"), enable_ssl=configs.enable_ssl, diff --git a/tests/tck/cluster/Example.feature b/tests/tck/cluster/Example.feature index 7bd274a56c7..54ca7a5df1a 100644 --- a/tests/tck/cluster/Example.feature +++ b/tests/tck/cluster/Example.feature @@ -5,7 +5,7 @@ Feature: Example Scenario: test with disable authorize - Given a nebulacluster with 1 graphd and 1 metad and 1 storaged: + Given a nebulacluster with 1 graphd and 1 metad and 1 storaged and 0 listener: """ graphd:enable_authorize=false """ @@ -23,7 +23,7 @@ Feature: Example Then an PermissionError should be raised at runtime: No permission to grant/revoke god user. Scenario: test with enable authorize - Given a nebulacluster with 1 graphd and 1 metad and 1 storaged: + Given a nebulacluster with 1 graphd and 1 metad and 1 storaged and 0 listener: """ graphd:enable_authorize=true """ @@ -41,7 +41,7 @@ Feature: Example Then an PermissionError should be raised at runtime: No permission to grant/revoke god user. Scenario: test with auth type is cloud - Given a nebulacluster with 1 graphd and 1 metad and 1 storaged: + Given a nebulacluster with 1 graphd and 1 metad and 1 storaged and 0 listener: """ graphd:auth_type=cloud """ diff --git a/tests/tck/cluster/terminate.feature b/tests/tck/cluster/terminate.feature index 491bdaf949d..158a2238cb4 100644 --- a/tests/tck/cluster/terminate.feature +++ b/tests/tck/cluster/terminate.feature @@ -5,6 +5,6 @@ Feature: Nebula service termination test # All nebula services should exit as expected after termination Scenario: Basic termination test - Given a nebulacluster with 1 graphd and 1 metad and 1 storaged + Given a nebulacluster with 1 graphd and 1 metad and 1 storaged and 0 listener When the cluster was terminated Then no service should still running after 4s diff --git a/tests/tck/conftest.py b/tests/tck/conftest.py index 4a88ea03273..ec763b2ecf8 100644 --- a/tests/tck/conftest.py +++ b/tests/tck/conftest.py @@ -280,7 +280,7 @@ def exec_query(request, ngql, exec_ctx, sess=None, need_try: bool = False, times @given( parse( - 'a nebulacluster with {graphd_num} graphd and {metad_num} metad and {storaged_num} storaged' + 'a nebulacluster with {graphd_num} graphd and {metad_num} metad and {storaged_num} storaged and {listener_num} listener' ) ) def given_nebulacluster( @@ -288,6 +288,7 @@ def given_nebulacluster( graphd_num, metad_num, storaged_num, + listener_num, class_fixture_variables, pytestconfig, ): @@ -297,6 +298,7 @@ def given_nebulacluster( graphd_num, metad_num, storaged_num, + listener_num, class_fixture_variables, pytestconfig, ) @@ -304,7 +306,7 @@ def given_nebulacluster( @given( parse( - 'a nebulacluster with {graphd_num} graphd and {metad_num} metad and {storaged_num} storaged:\n{params}' + 'a nebulacluster with {graphd_num} graphd and {metad_num} metad and {storaged_num} storaged and {listener_num} listener:\n{params}' ) ) def given_nebulacluster_with_param( @@ -313,21 +315,24 @@ def given_nebulacluster_with_param( graphd_num, metad_num, storaged_num, + listener_num, class_fixture_variables, pytestconfig, ): - graphd_param, metad_param, storaged_param = {}, {}, {} + graphd_param, metad_param, storaged_param, listener_param = {}, {}, {}, {} if params is not None: for param in params.splitlines(): module, config = param.strip().split(":") - assert module.lower() in ["graphd", "storaged", "metad"] + assert module.lower() in ["graphd", "storaged", "metad", "listener"] key, value = config.strip().split("=") if module.lower() == "graphd": graphd_param[key] = value elif module.lower() == "storaged": storaged_param[key] = value - else: + elif module.lower() == "metad": metad_param[key] = value + else: + listener_param[key] = value user = pytestconfig.getoption("user") password = pytestconfig.getoption("password") @@ -339,6 +344,7 @@ def given_nebulacluster_with_param( int(metad_num), int(storaged_num), int(graphd_num), + int(listener_num) ) for process in nebula_svc.graphd_processes: process.update_param(graphd_param) @@ -346,6 +352,8 @@ def given_nebulacluster_with_param( process.update_param(storaged_param) for process in nebula_svc.metad_processes: process.update_param(metad_param) + for process in nebula_svc.listener_processes: + process.update_param(listener_param) work_dir = os.path.join( build_dir, "C" + space_generator() + time.strftime('%Y-%m-%dT%H-%M-%S', time.localtime()), @@ -409,10 +417,12 @@ def executing_query(query, exec_ctx, request): exec_query(request, ngql, exec_ctx) # execute query multiple times + + @when(parse("executing query {times:d} times:\n{query}")) def executing_query_multiple_times(times, query, exec_ctx, request): ngql = combine_query(query) - exec_query(request, ngql, exec_ctx, times = times) + exec_query(request, ngql, exec_ctx, times=times) @when( diff --git a/tests/tck/features/admin/Authentication.feature b/tests/tck/features/admin/Authentication.feature index 27c82d342ff..7aa063872b7 100644 --- a/tests/tck/features/admin/Authentication.feature +++ b/tests/tck/features/admin/Authentication.feature @@ -4,7 +4,7 @@ Feature: Test Authentication Background: - Given a nebulacluster with 1 graphd and 1 metad and 1 storaged: + Given a nebulacluster with 1 graphd and 1 metad and 1 storaged and 0 listener: """ graphd:failed_login_attempts=5 graphd:password_lock_time_in_secs=5 diff --git a/tests/tck/features/admin/Sessions.feature b/tests/tck/features/admin/Sessions.feature index f3cbae5e922..15a14d9b662 100644 --- a/tests/tck/features/admin/Sessions.feature +++ b/tests/tck/features/admin/Sessions.feature @@ -4,7 +4,7 @@ Feature: Test sessions Background: - Given a nebulacluster with 3 graphd and 1 metad and 1 storaged + Given a nebulacluster with 3 graphd and 1 metad and 1 storaged and 0 listener @distonly Scenario: Show sessions From 645afb484d97d3af18e16318294840ec7542aa31 Mon Sep 17 00:00:00 2001 From: Doodle <13706157+critical27@users.noreply.github.com> Date: Mon, 28 Nov 2022 09:03:55 +0800 Subject: [PATCH 2/7] Minor enhancement about listener (#4931) * minor fix * remove outdate comments Co-authored-by: Yee <2520865+yixinglu@users.noreply.github.com> Co-authored-by: Sophie <84560950+Sophie-Xie@users.noreply.github.com> --- src/kvstore/Listener.cpp | 11 +++----- src/kvstore/Listener.h | 27 +++++++++---------- src/kvstore/NebulaStore.cpp | 13 +++++---- src/kvstore/NebulaStore.h | 6 +++-- .../plugins/elasticsearch/ESListener.h | 14 +--------- src/kvstore/test/NebulaListenerTest.cpp | 11 +------- 6 files changed, 29 insertions(+), 53 deletions(-) diff --git a/src/kvstore/Listener.cpp b/src/kvstore/Listener.cpp index a989e128672..2bccddd26dd 100644 --- a/src/kvstore/Listener.cpp +++ b/src/kvstore/Listener.cpp @@ -23,10 +23,7 @@ Listener::Listener(GraphSpaceID spaceId, const std::string& walPath, std::shared_ptr ioPool, std::shared_ptr workers, - std::shared_ptr handlers, - std::shared_ptr snapshotMan, - std::shared_ptr clientMan, - std::shared_ptr diskMan) + std::shared_ptr handlers) : RaftPart(FLAGS_cluster_id, spaceId, partId, @@ -35,9 +32,9 @@ Listener::Listener(GraphSpaceID spaceId, ioPool, workers, handlers, - snapshotMan, - clientMan, - diskMan) {} + nullptr, + nullptr, + nullptr) {} void Listener::start(std::vector&& peers, bool) { std::lock_guard g(raftLock_); diff --git a/src/kvstore/Listener.h b/src/kvstore/Listener.h index 5aa4640c001..3391811b02f 100644 --- a/src/kvstore/Listener.h +++ b/src/kvstore/Listener.h @@ -77,15 +77,17 @@ using RaftClient = thrift::ThriftClientManager lastCommittedLogId() * * // read last apply id from external storage, used in initialization * LogID lastApplyLogId() * - * // apply the kv to state machine - * bool apply(const std::vector& data) - * * // persist last commit log id/term and lastApplyId * bool persist(LogID, TermID, LogID) */ @@ -101,10 +103,6 @@ class Listener : public raftex::RaftPart { * @param ioPool IOThreadPool for listener * @param workers Background thread for listener * @param handlers Worker thread for listener - * @param snapshotMan Snapshot manager - * @param clientMan Client manager - * @param diskMan Disk manager - * @param schemaMan Schema manager */ Listener(GraphSpaceID spaceId, PartitionID partId, @@ -112,10 +110,7 @@ class Listener : public raftex::RaftPart { const std::string& walPath, std::shared_ptr ioPool, std::shared_ptr workers, - std::shared_ptr handlers, - std::shared_ptr snapshotMan, - std::shared_ptr clientMan, - std::shared_ptr diskMan); + std::shared_ptr handlers); /** * @brief Initialize listener, all Listener must call this method @@ -185,6 +180,13 @@ class Listener : public raftex::RaftPart { */ virtual bool persist(LogID commitLogId, TermID commitLogTerm, LogID lastApplyLogId) = 0; + /** + * @brief Main interface to process logs, listener need to apply the committed log entry to their + * state machine. Once apply succeeded, user should call persist() to make their progress + * persisted. + */ + virtual void processLogs() = 0; + /** * @brief Callback when a raft node lost leadership on term, should not happen in listener * @@ -269,9 +271,6 @@ class Listener : public raftex::RaftPart { */ void doApply(); - // Process logs and then call apply to execute - virtual void processLogs() = 0; - protected: LogID leaderCommitId_ = 0; LogID lastApplyLogId_ = 0; diff --git a/src/kvstore/NebulaStore.cpp b/src/kvstore/NebulaStore.cpp index 4fe9e8ff28e..566ff7714ab 100644 --- a/src/kvstore/NebulaStore.cpp +++ b/src/kvstore/NebulaStore.cpp @@ -582,10 +582,7 @@ void NebulaStore::removeListenerSpace(GraphSpaceID spaceId, meta::cpp2::Listener folly::RWSpinLock::WriteHolder wh(&lock_); auto spaceIt = this->spaceListeners_.find(spaceId); if (spaceIt != this->spaceListeners_.end()) { - for (const auto& partEntry : spaceIt->second->listeners_) { - CHECK(partEntry.second.empty()); - } - this->spaceListeners_.erase(spaceIt); + // Perform extra destruction of given type of listener here; } LOG(INFO) << "Listener space " << spaceId << " has been removed!"; } @@ -609,7 +606,7 @@ void NebulaStore::addListenerPart(GraphSpaceID spaceId, << " of [Space: " << spaceId << ", Part: " << partId << "] has existed!"; return; } - partIt->second.emplace(type, newListener(spaceId, partId, std::move(type), peers)); + partIt->second.emplace(type, newListener(spaceId, partId, type, peers)); LOG(INFO) << "Listener of type " << apache::thrift::util::enumNameSafe(type) << " of [Space: " << spaceId << ", Part: " << partId << "] is added"; return; @@ -619,10 +616,12 @@ std::shared_ptr NebulaStore::newListener(GraphSpaceID spaceId, PartitionID partId, meta::cpp2::ListenerType type, const std::vector& peers) { + // Lock has been acquired in addListenerPart. + // todo(doodle): we don't support start multiple type of listener in same process for now. If we + // suppport it later, the wal path may or may not need to be separated depending on how we + // implement it. auto walPath = folly::stringPrintf("%s/%d/%d/wal", options_.listenerPath_.c_str(), spaceId, partId); - // snapshot manager and client manager is set to nullptr, listener should - // never use them std::shared_ptr listener; if (type == meta::cpp2::ListenerType::ELASTICSEARCH) { listener = std::make_shared( diff --git a/src/kvstore/NebulaStore.h b/src/kvstore/NebulaStore.h index 3439991ab8f..276a4751d38 100644 --- a/src/kvstore/NebulaStore.h +++ b/src/kvstore/NebulaStore.h @@ -621,7 +621,8 @@ class NebulaStore : public KVStore, public Handler { const std::vector& files) override; /** - * @brief Add a space as listener + * @brief Add a specified type listener to space. Perform extra initialization of given type + * listener if necessary. User should call addListenerSpace first then addListenerPart. * * @param spaceId * @param type Listener type @@ -629,7 +630,8 @@ class NebulaStore : public KVStore, public Handler { void addListenerSpace(GraphSpaceID spaceId, meta::cpp2::ListenerType type) override; /** - * @brief Remove a listener space + * @brief Remove a specified type listener from space. Perform extra destruction of given type + * listener if necessary. User should call removeListenerPart first then removeListenerSpace. * * @param spaceId * @param type Listener type diff --git a/src/kvstore/plugins/elasticsearch/ESListener.h b/src/kvstore/plugins/elasticsearch/ESListener.h index 2f82943e8f4..19c9fd8c73f 100644 --- a/src/kvstore/plugins/elasticsearch/ESListener.h +++ b/src/kvstore/plugins/elasticsearch/ESListener.h @@ -27,9 +27,6 @@ class ESListener : public Listener { * @param ioPool IOThreadPool for listener * @param workers Background thread for listener * @param handlers Worker thread for listener - * @param snapshotMan Snapshot manager - * @param clientMan Client manager - * @param diskMan Disk manager * @param schemaMan Schema manager */ ESListener(GraphSpaceID spaceId, @@ -40,16 +37,7 @@ class ESListener : public Listener { std::shared_ptr workers, std::shared_ptr handlers, meta::SchemaManager* schemaMan) - : Listener(spaceId, - partId, - std::move(localAddr), - walPath, - ioPool, - workers, - handlers, - nullptr, - nullptr, - nullptr), + : Listener(spaceId, partId, std::move(localAddr), walPath, ioPool, workers, handlers), schemaMan_(schemaMan) { CHECK(!!schemaMan); lastApplyLogFile_ = std::make_unique( diff --git a/src/kvstore/test/NebulaListenerTest.cpp b/src/kvstore/test/NebulaListenerTest.cpp index 9ef87dc40ed..816c2093e80 100644 --- a/src/kvstore/test/NebulaListenerTest.cpp +++ b/src/kvstore/test/NebulaListenerTest.cpp @@ -41,16 +41,7 @@ class DummyListener : public Listener { std::shared_ptr ioPool, std::shared_ptr workers, std::shared_ptr handlers) - : Listener(spaceId, - partId, - localAddr, - walPath, - ioPool, - workers, - handlers, - nullptr, - nullptr, - nullptr) {} + : Listener(spaceId, partId, localAddr, walPath, ioPool, workers, handlers) {} std::vector data() { return data_; From f4854554dac04465d668fbd441f50c3fa49dcb0c Mon Sep 17 00:00:00 2001 From: Cheng Xuntao <7731943+xtcyclist@users.noreply.github.com> Date: Mon, 28 Nov 2022 10:48:14 +0800 Subject: [PATCH 3/7] Allow redefined aliases in with. Remove redudant columns in return. (#4868) Allow redefined aliases in with. Remove redudant columns in return. --- src/graph/validator/MatchValidator.cpp | 16 +++++++++++++--- tests/tck/features/match/RedefinedNode.feature | 17 +++++++++++++++++ 2 files changed, 30 insertions(+), 3 deletions(-) diff --git a/src/graph/validator/MatchValidator.cpp b/src/graph/validator/MatchValidator.cpp index 92ee2448bf4..aca9729dd8b 100644 --- a/src/graph/validator/MatchValidator.cpp +++ b/src/graph/validator/MatchValidator.cpp @@ -389,18 +389,28 @@ Status MatchValidator::buildColumnsForAllNamedAliases(const std::vector visitedAliases; for (auto &match : currQueryPart.matchs) { for (auto &path : match->paths) { for (size_t i = 0; i < path.edgeInfos.size(); ++i) { if (!path.nodeInfos[i].anonymous) { - columns->addColumn(makeColumn(path.nodeInfos[i].alias)); + if (visitedAliases.find(path.nodeInfos[i].alias) == visitedAliases.end()) { + columns->addColumn(makeColumn(path.nodeInfos[i].alias)); + visitedAliases.emplace(path.nodeInfos[i].alias); + } } if (!path.edgeInfos[i].anonymous) { - columns->addColumn(makeColumn(path.edgeInfos[i].alias)); + if (visitedAliases.find(path.edgeInfos[i].alias) == visitedAliases.end()) { + columns->addColumn(makeColumn(path.edgeInfos[i].alias)); + visitedAliases.emplace(path.edgeInfos[i].alias); + } } } if (!path.nodeInfos.back().anonymous) { - columns->addColumn(makeColumn(path.nodeInfos.back().alias)); + if (visitedAliases.find(path.nodeInfos.back().alias) == visitedAliases.end()) { + columns->addColumn(makeColumn(path.nodeInfos.back().alias)); + visitedAliases.emplace(path.nodeInfos.back().alias); + } } } diff --git a/tests/tck/features/match/RedefinedNode.feature b/tests/tck/features/match/RedefinedNode.feature index e623c66782f..b6837e5ef3b 100644 --- a/tests/tck/features/match/RedefinedNode.feature +++ b/tests/tck/features/match/RedefinedNode.feature @@ -192,3 +192,20 @@ Feature: Redefined symbols MATCH (v:player{name:"abc"})-[e:like]->(v1)-[e:like]->(v2) RETURN * """ Then a SemanticError should be raised at runtime: `e': Redefined alias + + Scenario: Redefined alias in with + Given an empty graph + And load "nba" csv data to a new space + And having executed: + """ + insert edge like (likeness) values "Tim Duncan"->"Tim Duncan":(100); + insert edge like (likeness) values "Carmelo Anthony"->"Carmelo Anthony":(100); + """ + When executing query: + """ + MATCH (n0)-[e0]->(n0) WHERE (id(n0) IN ["Tim Duncan", "Carmelo Anthony" ]) with * RETURN * + """ + Then the result should be, in any order: + | n0 | e0 | + | ("Tim Duncan" :player{age: 42, name: "Tim Duncan"} :bachelor{name: "Tim Duncan", speciality: "psychology"}) | [:like "Tim Duncan"->"Tim Duncan" @0 {likeness: 100}] | + | ("Carmelo Anthony" :player{age: 34, name: "Carmelo Anthony"}) | [:like "Carmelo Anthony"->"Carmelo Anthony" @0 {likeness: 100}] | From 611c6705f9ef57c2f21104dabf9c3b59d45d1e65 Mon Sep 17 00:00:00 2001 From: "pengwei.song" <90180021+pengweisong@users.noreply.github.com> Date: Mon, 28 Nov 2022 11:28:45 +0800 Subject: [PATCH 4/7] fix folly::stringPrintf when ref empty str (#4933) * fix folly::stringPrintf when ref empty str * remove static cast * fix udpate test * add an tck and some usage * fix grammer Co-authored-by: jie.wang <38901892+jievince@users.noreply.github.com> Co-authored-by: Sophie <84560950+Sophie-Xie@users.noreply.github.com> --- src/storage/exec/UpdateResultNode.h | 6 +-- src/storage/test/UpdateVertexTest.cpp | 68 ++++++++++++------------ tests/README.md | 4 ++ tests/tck/features/update/Update.feature | 19 +++++-- 4 files changed, 54 insertions(+), 43 deletions(-) diff --git a/src/storage/exec/UpdateResultNode.h b/src/storage/exec/UpdateResultNode.h index af180947552..eee921996ec 100644 --- a/src/storage/exec/UpdateResultNode.h +++ b/src/storage/exec/UpdateResultNode.h @@ -62,12 +62,10 @@ class UpdateResNode : public RelNode { std::vector row; row.emplace_back(insert_); - for (auto& retExp : returnPropsExp_) { - auto exp = static_cast(retExp); + for (auto& exp : returnPropsExp_) { auto& val = exp->eval(*expCtx_); if (exp) { - result_->colNames.emplace_back( - folly::stringPrintf("%s.%s", exp->sym().c_str(), exp->prop().c_str())); + result_->colNames.emplace_back(exp->toString()); } else { VLOG(1) << "Can't get expression name"; result_->colNames.emplace_back("NULL"); diff --git a/src/storage/test/UpdateVertexTest.cpp b/src/storage/test/UpdateVertexTest.cpp index 0b4b73108db..d7e389fe7c0 100644 --- a/src/storage/test/UpdateVertexTest.cpp +++ b/src/storage/test/UpdateVertexTest.cpp @@ -167,11 +167,11 @@ TEST(UpdateVertexTest, No_Filter_Test) { EXPECT_EQ(0, (*resp.result_ref()).failed_parts.size()); EXPECT_EQ(6, (*resp.props_ref()).colNames.size()); EXPECT_EQ("_inserted", (*resp.props_ref()).colNames[0]); - EXPECT_EQ("1.name", (*resp.props_ref()).colNames[1]); - EXPECT_EQ("1.age", (*resp.props_ref()).colNames[2]); - EXPECT_EQ("1.country", (*resp.props_ref()).colNames[3]); - EXPECT_EQ(std::string("1.").append(kVid), (*resp.props_ref()).colNames[4]); - EXPECT_EQ(std::string("1.").append(kTag), (*resp.props_ref()).colNames[5]); + EXPECT_EQ("$^.1.name", (*resp.props_ref()).colNames[1]); + EXPECT_EQ("$^.1.age", (*resp.props_ref()).colNames[2]); + EXPECT_EQ("$^.1.country", (*resp.props_ref()).colNames[3]); + EXPECT_EQ(std::string("$^.1.").append(kVid), (*resp.props_ref()).colNames[4]); + EXPECT_EQ(std::string("$^.1.").append(kTag), (*resp.props_ref()).colNames[5]); EXPECT_EQ(1, (*resp.props_ref()).rows.size()); EXPECT_EQ(6, (*resp.props_ref()).rows[0].values.size()); @@ -286,11 +286,11 @@ TEST(UpdateVertexTest, Filter_Yield_Test2) { // Note: If filtered out, the result is old EXPECT_EQ(6, (*resp.props_ref()).colNames.size()); EXPECT_EQ("_inserted", (*resp.props_ref()).colNames[0]); - EXPECT_EQ("1.name", (*resp.props_ref()).colNames[1]); - EXPECT_EQ("1.age", (*resp.props_ref()).colNames[2]); - EXPECT_EQ("1.country", (*resp.props_ref()).colNames[3]); - EXPECT_EQ(std::string("1.").append(kVid), (*resp.props_ref()).colNames[4]); - EXPECT_EQ(std::string("1.").append(kTag), (*resp.props_ref()).colNames[5]); + EXPECT_EQ("$^.1.name", (*resp.props_ref()).colNames[1]); + EXPECT_EQ("$^.1.age", (*resp.props_ref()).colNames[2]); + EXPECT_EQ("$^.1.country", (*resp.props_ref()).colNames[3]); + EXPECT_EQ(std::string("$^.1.").append(kVid), (*resp.props_ref()).colNames[4]); + EXPECT_EQ(std::string("$^.1.").append(kTag), (*resp.props_ref()).colNames[5]); EXPECT_EQ(1, (*resp.props_ref()).rows.size()); EXPECT_EQ(6, (*resp.props_ref()).rows[0].values.size()); @@ -390,11 +390,11 @@ TEST(UpdateVertexTest, Insertable_Test) { EXPECT_EQ(0, (*resp.result_ref()).failed_parts.size()); EXPECT_EQ(6, (*resp.props_ref()).colNames.size()); EXPECT_EQ("_inserted", (*resp.props_ref()).colNames[0]); - EXPECT_EQ("1.name", (*resp.props_ref()).colNames[1]); - EXPECT_EQ("1.age", (*resp.props_ref()).colNames[2]); - EXPECT_EQ("1.country", (*resp.props_ref()).colNames[3]); - EXPECT_EQ(std::string("1.").append(kVid), (*resp.props_ref()).colNames[4]); - EXPECT_EQ(std::string("1.").append(kTag), (*resp.props_ref()).colNames[5]); + EXPECT_EQ("$^.1.name", (*resp.props_ref()).colNames[1]); + EXPECT_EQ("$^.1.age", (*resp.props_ref()).colNames[2]); + EXPECT_EQ("$^.1.country", (*resp.props_ref()).colNames[3]); + EXPECT_EQ(std::string("$^.1.").append(kVid), (*resp.props_ref()).colNames[4]); + EXPECT_EQ(std::string("$^.1.").append(kTag), (*resp.props_ref()).colNames[5]); EXPECT_EQ(1, (*resp.props_ref()).rows.size()); EXPECT_EQ(6, (*resp.props_ref()).rows[0].values.size()); @@ -689,11 +689,11 @@ TEST(UpdateVertexTest, Insertable_Filter_Value_Test) { EXPECT_EQ(0, (*resp.result_ref()).failed_parts.size()); EXPECT_EQ(6, (*resp.props_ref()).colNames.size()); EXPECT_EQ("_inserted", (*resp.props_ref()).colNames[0]); - EXPECT_EQ("1.name", (*resp.props_ref()).colNames[1]); - EXPECT_EQ("1.age", (*resp.props_ref()).colNames[2]); - EXPECT_EQ("1.country", (*resp.props_ref()).colNames[3]); - EXPECT_EQ(std::string("1.").append(kVid), (*resp.props_ref()).colNames[4]); - EXPECT_EQ(std::string("1.").append(kTag), (*resp.props_ref()).colNames[5]); + EXPECT_EQ("$^.1.name", (*resp.props_ref()).colNames[1]); + EXPECT_EQ("$^.1.age", (*resp.props_ref()).colNames[2]); + EXPECT_EQ("$^.1.country", (*resp.props_ref()).colNames[3]); + EXPECT_EQ(std::string("$^.1.").append(kVid), (*resp.props_ref()).colNames[4]); + EXPECT_EQ(std::string("$^.1.").append(kTag), (*resp.props_ref()).colNames[5]); EXPECT_EQ(1, (*resp.props_ref()).rows.size()); EXPECT_EQ(true, (*resp.props_ref()).rows[0].values[0].getBool()); @@ -937,11 +937,11 @@ TEST(UpdateVertexTest, TTL_Insert_No_Exist_Test) { EXPECT_EQ(0, (*resp.result_ref()).failed_parts.size()); EXPECT_EQ(6, (*resp.props_ref()).colNames.size()); EXPECT_EQ("_inserted", (*resp.props_ref()).colNames[0]); - EXPECT_EQ("1.name", (*resp.props_ref()).colNames[1]); - EXPECT_EQ("1.age", (*resp.props_ref()).colNames[2]); - EXPECT_EQ("1.country", (*resp.props_ref()).colNames[3]); - EXPECT_EQ(std::string("1.").append(kVid), (*resp.props_ref()).colNames[4]); - EXPECT_EQ(std::string("1.").append(kTag), (*resp.props_ref()).colNames[5]); + EXPECT_EQ("$^.1.name", (*resp.props_ref()).colNames[1]); + EXPECT_EQ("$^.1.age", (*resp.props_ref()).colNames[2]); + EXPECT_EQ("$^.1.country", (*resp.props_ref()).colNames[3]); + EXPECT_EQ(std::string("$^.1.").append(kVid), (*resp.props_ref()).colNames[4]); + EXPECT_EQ(std::string("$^.1.").append(kTag), (*resp.props_ref()).colNames[5]); EXPECT_EQ(1, (*resp.props_ref()).rows.size()); EXPECT_EQ(6, (*resp.props_ref()).rows[0].values.size()); @@ -1056,11 +1056,11 @@ TEST(UpdateVertexTest, TTL_Insert_Test) { EXPECT_EQ(0, (*resp.result_ref()).failed_parts.size()); EXPECT_EQ(6, (*resp.props_ref()).colNames.size()); EXPECT_EQ("_inserted", (*resp.props_ref()).colNames[0]); - EXPECT_EQ("1.name", (*resp.props_ref()).colNames[1]); - EXPECT_EQ("1.age", (*resp.props_ref()).colNames[2]); - EXPECT_EQ("1.country", (*resp.props_ref()).colNames[3]); - EXPECT_EQ(std::string("1.").append(kVid), (*resp.props_ref()).colNames[4]); - EXPECT_EQ(std::string("1.").append(kTag), (*resp.props_ref()).colNames[5]); + EXPECT_EQ("$^.1.name", (*resp.props_ref()).colNames[1]); + EXPECT_EQ("$^.1.age", (*resp.props_ref()).colNames[2]); + EXPECT_EQ("$^.1.country", (*resp.props_ref()).colNames[3]); + EXPECT_EQ(std::string("$^.1.").append(kVid), (*resp.props_ref()).colNames[4]); + EXPECT_EQ(std::string("$^.1.").append(kTag), (*resp.props_ref()).colNames[5]); EXPECT_EQ(1, (*resp.props_ref()).rows.size()); EXPECT_EQ(6, (*resp.props_ref()).rows[0].values.size()); @@ -1242,10 +1242,10 @@ TEST(UpdateVertexTest, Insertable_In_Set_Test) { EXPECT_EQ(0, (*resp.result_ref()).failed_parts.size()); EXPECT_EQ(5, (*resp.props_ref()).colNames.size()); EXPECT_EQ("_inserted", (*resp.props_ref()).colNames[0]); - EXPECT_EQ("1.name", (*resp.props_ref()).colNames[1]); - EXPECT_EQ("1.age", (*resp.props_ref()).colNames[2]); - EXPECT_EQ(std::string("1.").append(kVid), (*resp.props_ref()).colNames[3]); - EXPECT_EQ(std::string("1.").append(kTag), (*resp.props_ref()).colNames[4]); + EXPECT_EQ("$^.1.name", (*resp.props_ref()).colNames[1]); + EXPECT_EQ("$^.1.age", (*resp.props_ref()).colNames[2]); + EXPECT_EQ(std::string("$^.1.").append(kVid), (*resp.props_ref()).colNames[3]); + EXPECT_EQ(std::string("$^.1.").append(kTag), (*resp.props_ref()).colNames[4]); EXPECT_EQ(1, (*resp.props_ref()).rows.size()); EXPECT_EQ(5, (*resp.props_ref()).rows[0].values.size()); diff --git a/tests/README.md b/tests/README.md index 2055d39c317..f73f2b25775 100644 --- a/tests/README.md +++ b/tests/README.md @@ -13,6 +13,7 @@ Nebula Test framework depends on python3(>=3.7) and some thirdparty libraries, s So you should install all these dependencies before running test cases by: ```shell +$ cd tests $ make init-all ``` @@ -51,6 +52,9 @@ $ make RM_DIR=false tck # default value of RM_DIR is true And if you want to debug only one test case, you should check the usage of `pytest` itself by `pytest --help`. For example, run the test cases related to `MATCH`, you can do it like: ```shell +# pytest will use keyword 'match' to match the Scenario name. All the Scenario whose name contains +# the keyword 'match' will be selected. +# You can also use '@keyword' to annotate a scenario and using `pytest -k 'keyword'` to run only the one scenario. $ pytest -k 'match' -m 'not skip' . ``` diff --git a/tests/tck/features/update/Update.feature b/tests/tck/features/update/Update.feature index 9242eb9bde3..acca6947375 100644 --- a/tests/tck/features/update/Update.feature +++ b/tests/tck/features/update/Update.feature @@ -6,9 +6,9 @@ Feature: Update string vid of vertex and edge Background: Prepare space Given an empty graph And create a space with following options: - | partition_num | 1 | - | replica_factor | 1 | - | vid_type | FIXED_STRING(20) | + | partition_num | 1 | + | replica_factor | 1 | + | vid_type | FIXED_STRING(200) | And having executed: """ CREATE TAG IF NOT EXISTS course(name string, credits int); @@ -727,6 +727,15 @@ Feature: Update string vid of vertex and edge Then the result should be, in any order: | Name | Credits | | 'Math' | 6 | + When executing query: + """ + UPDATE VERTEX ON course "101" + SET credits = credits + 1 + YIELD "101" AS courseId, credits AS Credits + """ + Then the result should be, in any order: + | courseId | Credits | + | '101' | 7 | When executing query: """ UPDATE VERTEX ON course "101" @@ -736,7 +745,7 @@ Feature: Update string vid of vertex and edge """ Then the result should be, in any order: | Name | Credits | - | 'Math' | 7 | + | 'Math' | 8 | When executing query: """ UPDATE VERTEX ON course "101" @@ -746,7 +755,7 @@ Feature: Update string vid of vertex and edge """ Then the result should be, in any order: | Name | Credits | - | 'Math' | 7 | + | 'Math' | 8 | When executing query: """ FETCH PROP ON select "200"->"101"@0 YIELD select.grade, select.year From 852512bb05d068627c74bba0b130d2c14819fb87 Mon Sep 17 00:00:00 2001 From: Yee <2520865+yixinglu@users.noreply.github.com> Date: Mon, 28 Nov 2022 11:59:41 +0800 Subject: [PATCH 5/7] Fix the dependency of argument plan node (#4939) * Fix the dependency of argument plan node * Add test features * make ps * Rename feature and scenario * Fix tck tests Co-authored-by: Sophie <84560950+Sophie-Xie@users.noreply.github.com> --- src/graph/executor/logic/ArgumentExecutor.cpp | 4 +-- src/graph/planner/match/MatchPathPlanner.cpp | 8 ++--- src/graph/planner/plan/Logic.cpp | 2 +- tests/Makefile | 3 ++ .../bugfix/ArgumentPlanNodeDep.feature | 30 +++++++++++++++++++ .../optimizer/PrunePropertiesRule.feature | 27 ++++++++++------- 6 files changed, 54 insertions(+), 20 deletions(-) create mode 100644 tests/tck/features/bugfix/ArgumentPlanNodeDep.feature diff --git a/src/graph/executor/logic/ArgumentExecutor.cpp b/src/graph/executor/logic/ArgumentExecutor.cpp index 0909c9fce65..61b4c6b6cbd 100644 --- a/src/graph/executor/logic/ArgumentExecutor.cpp +++ b/src/graph/executor/logic/ArgumentExecutor.cpp @@ -23,7 +23,7 @@ folly::Future ArgumentExecutor::execute() { ds.rows.reserve(iter->size()); std::unordered_set unique; for (; iter->valid(); iter->next()) { - auto val = iter->getColumn(alias); + auto &val = iter->getColumn(alias); if (!val.isVertex()) { return Status::Error("Argument only support vertex, but got %s, which is type %s, ", val.toString().c_str(), @@ -31,7 +31,7 @@ folly::Future ArgumentExecutor::execute() { } if (unique.emplace(val.getVertex().vid).second) { Row row; - row.values.emplace_back(std::move(val)); + row.values.emplace_back(val); ds.rows.emplace_back(std::move(row)); } } diff --git a/src/graph/planner/match/MatchPathPlanner.cpp b/src/graph/planner/match/MatchPathPlanner.cpp index 152be863342..9bee783feb6 100644 --- a/src/graph/planner/match/MatchPathPlanner.cpp +++ b/src/graph/planner/match/MatchPathPlanner.cpp @@ -131,9 +131,7 @@ Status MatchPathPlanner::findStarts( auto nodeFinder = finder(); if (nodeFinder->match(&nodeCtx)) { auto plan = nodeFinder->transform(&nodeCtx); - if (!plan.ok()) { - return plan.status(); - } + NG_RETURN_IF_ERROR(plan); matchClausePlan = std::move(plan).value(); startIndex = i; foundStart = true; @@ -149,9 +147,7 @@ Status MatchPathPlanner::findStarts( auto edgeFinder = finder(); if (edgeFinder->match(&edgeCtx)) { auto plan = edgeFinder->transform(&edgeCtx); - if (!plan.ok()) { - return plan.status(); - } + NG_RETURN_IF_ERROR(plan); matchClausePlan = std::move(plan).value(); startFromEdge = true; startIndex = i; diff --git a/src/graph/planner/plan/Logic.cpp b/src/graph/planner/plan/Logic.cpp index 67d30e0c49d..d9ffba8f3ff 100644 --- a/src/graph/planner/plan/Logic.cpp +++ b/src/graph/planner/plan/Logic.cpp @@ -87,7 +87,7 @@ void Argument::cloneMembers(const Argument& arg) { } std::unique_ptr Argument::explain() const { - auto desc = PlanNode::explain(); + auto desc = SingleInputNode::explain(); addDescription("inputVar", inputVar(), desc.get()); return desc; } diff --git a/tests/Makefile b/tests/Makefile index 0d4119ce3a3..46d703e9310 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -142,3 +142,6 @@ clean: kill: ps -ef | grep -P '\sbin/nebula-' | grep "$$(whoami)" | sed 's/\s\s*/ /g' | cut -f2 -d' ' | xargs kill -9 + +ps: + @ps -ef | grep -P '\sbin/nebula-' | grep "$$(whoami)" diff --git a/tests/tck/features/bugfix/ArgumentPlanNodeDep.feature b/tests/tck/features/bugfix/ArgumentPlanNodeDep.feature new file mode 100644 index 00000000000..dc276ba1636 --- /dev/null +++ b/tests/tck/features/bugfix/ArgumentPlanNodeDep.feature @@ -0,0 +1,30 @@ +Feature: Fix Argument plan node dependency + + Background: + Given a graph with space named "nba" + + Scenario: fix argument plan node dependency in issue 4938 + When profiling query: + """ + MATCH (a:player) + WHERE id(a)=='Tim Duncan' + MATCH (a)-[:like]-(b) + RETURN count(*) AS cnt + """ + Then the result should be, in any order: + | cnt | + | 12 | + And the execution plan should be: + | id | name | dependencies | operator info | + | 11 | Aggregate | 10 | | + | 10 | BiInnerJoin | 9,4 | | + | 9 | Project | 8 | | + | 8 | AppendVertices | 7 | | + | 7 | Dedup | 6 | | + | 6 | PassThrough | 5 | | + | 5 | Start | | | + | 4 | Project | 3 | | + | 3 | AppendVertices | 2 | | + | 2 | Traverse | 1 | | + | 1 | Argument | 0 | | + | 0 | Start | | | diff --git a/tests/tck/features/optimizer/PrunePropertiesRule.feature b/tests/tck/features/optimizer/PrunePropertiesRule.feature index 87810327a73..5429bf2fb82 100644 --- a/tests/tck/features/optimizer/PrunePropertiesRule.feature +++ b/tests/tck/features/optimizer/PrunePropertiesRule.feature @@ -183,7 +183,8 @@ Feature: Prune Properties rule | 23 | Project | 8 | | | 8 | AppendVertices | 7 | { "props": "[{\"tagId\":9,\"props\":[\"name\"]}, {\"tagId\":10,\"props\":[\"name\"]}]" } | | 7 | Traverse | 6 | { "vertexProps": "[{\"tagId\":9,\"props\":[\"name\"]}]", "edgeProps": "[{\"type\": -5, \"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"type\": 5, \"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": -3}, {\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": 3}, {\"type\": -4, \"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": 4}]" } | - | 6 | Argument | | | + | 6 | Argument | 0 | | + | 0 | Start | | | When profiling query: """ MATCH (m)-[]-(n), (n)-[]-(l), (l)-[]-(h) WHERE id(m)=="Tim Duncan" @@ -217,12 +218,13 @@ Feature: Prune Properties rule | 29 | Project | 8 | | | 8 | AppendVertices | 7 | { "props": "[{\"tagId\":10,\"props\":[\"name\"]}]" } | | 7 | Traverse | 6 | { "vertexProps": "[{\"tagId\":9,\"props\":[\"name\"]}]", "edgeProps": "[{\"type\": -5, \"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"type\": 5, \"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": -3}, {\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": 3}, {\"type\": -4, \"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": 4}]" } | - | 6 | Argument | | | + | 6 | Argument | 31 | | | 31 | Start | | | | 30 | Project | 12 | | | 12 | AppendVertices | 11 | { "props": "[{\"props\":[\"name\"],\"tagId\":9}]" } | | 11 | Traverse | 10 | { "vertexProps": "[{\"props\":[\"name\"],\"tagId\":10}]", "edgeProps": "[{\"type\": -5, \"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"type\": 5, \"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": -3}, {\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": 3}, {\"type\": -4, \"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": 4}]" } | - | 10 | Argument | | | + | 10 | Argument | 0 | | + | 0 | Start | | | # The schema id is not fixed in standalone cluster, so we skip it @distonly @@ -262,7 +264,7 @@ Feature: Prune Properties rule | 11 | Project | 10 | | | 10 | AppendVertices | 9 | { "props": "[{\"tagId\":9,\"props\":[\"name\"]}, {\"tagId\":10,\"props\":[\"name\"]}]" } | | 9 | Traverse | 8 | { "vertexProps": "[{\"tagId\":9,\"props\":[\"name\"]}]" } | - | 8 | Argument | | | + | 8 | Argument | 33 | | | 33 | Start | | | When profiling query: """ @@ -298,12 +300,12 @@ Feature: Prune Properties rule | 33 | Project | 10 | | | 10 | AppendVertices | 9 | { "props": "[{\"tagId\":10,\"props\":[\"name\"]}]" } | | 9 | Traverse | 8 | { "vertexProps": "[{\"tagId\":9,\"props\":[\"name\"]}]" } | - | 8 | Argument | | | + | 8 | Argument | 35 | | | 35 | Start | | | | 34 | Project | 13 | | | 13 | AppendVertices | 12 | { "props": "[{\"tagId\":9,\"props\":[\"name\"]}]" } | | 12 | Traverse | 11 | { "vertexProps": "[{\"tagId\":10,\"props\":[\"name\"]}]", "edgeProps": "[{\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": -5}, {\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": 5}, {\"type\": -3, \"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": 3}, {\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": -4}, {\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": 4}]" } | - | 11 | Argument | | | + | 11 | Argument | 36 | | | 36 | Start | | | When profiling query: """ @@ -327,7 +329,8 @@ Feature: Prune Properties rule | 9 | Project | 8 | | | 8 | AppendVertices | 7 | { "props": "[{\"props\":[\"name\"],\"tagId\":9}]" } | | 7 | Traverse | 6 | { "vertexProps": "", "edgeProps": "[{\"type\": -5, \"props\": [\"_type\", \"_rank\", \"_dst\"]}, {\"props\": [\"_type\", \"_rank\", \"_dst\"], \"type\": -3}, {\"props\": [\"_type\", \"_rank\", \"_dst\"], \"type\": -4}]" } | - | 6 | Argument | | | + | 6 | Argument | 0 | | + | 0 | Start | | | # The schema id is not fixed in standalone cluster, so we skip it @distonly @@ -365,7 +368,8 @@ Feature: Prune Properties rule | 11 | Project | 10 | | | 10 | AppendVertices | 9 | { "props": "[{\"props\":[\"name\", \"age\", \"_tag\"],\"tagId\":9}, {\"props\":[\"name\", \"speciality\", \"_tag\"],\"tagId\":8}, {\"props\":[\"name\", \"_tag\"],\"tagId\":10}]" } | | 9 | Traverse | 8 | { "vertexProps": "[{\"props\":[\"name\"],\"tagId\":9}]" } | - | 8 | Argument | | | + | 8 | Argument | 0 | | + | 0 | Start | | | When profiling query: """ MATCH (m:player{name:"Tim Duncan"})-[:like]-(n)--() @@ -388,7 +392,8 @@ Feature: Prune Properties rule | 11 | Project | 10 | | | 10 | AppendVertices | 9 | { "props": "[{\"props\":[\"_tag\"],\"tagId\":8}, {\"props\":[\"_tag\"],\"tagId\":9}, {\"props\":[\"_tag\"],\"tagId\":10}]" } | | 9 | Traverse | 8 | { "vertexProps": "" } | - | 8 | Argument | | | + | 8 | Argument | 0 | | + | 0 | Start | | | @distonly Scenario: return function @@ -478,7 +483,7 @@ Feature: Prune Properties rule | 12 | Project | 18 | | | 18 | AppendVertices | 10 | { "props": "[{\"props\":[\"_tag\"],\"tagId\":10}]" } | | 10 | Traverse | 8 | {"vertexProps": "[{\"props\":[\"name\", \"age\", \"_tag\"],\"tagId\":9}, {\"props\":[\"name\", \"speciality\", \"_tag\"],\"tagId\":8}, {\"props\":[\"name\", \"_tag\"],\"tagId\":10}]", "edgeProps": "[{\"type\": 4, \"props\": [\"_type\", \"_rank\", \"_dst\"]}]" } | - | 8 | Argument | | | + | 8 | Argument | 9 | | | 9 | Start | | | @distonly @@ -553,7 +558,7 @@ Feature: Prune Properties rule | 15 | Traverse | 14 | {"vertexProps": "[{\"props\":[\"age\"],\"tagId\":9}]", "edgeProps": "[{\"type\": 4, \"props\": [\"_type\", \"_rank\", \"_dst\"]}]" } | | 14 | Traverse | 13 | {"vertexProps": "", "edgeProps": "[{\"type\": -3, \"props\": [\"_type\", \"_rank\", \"_dst\"]}]" } | | 13 | Traverse | 11 | {"vertexProps": "", "edgeProps": "[{\"type\": -3, \"props\": [\"_type\", \"_rank\", \"_dst\"]}, {\"type\": 3, \"props\": [\"_type\", \"_rank\", \"_dst\"]}]" } | - | 11 | Argument | | | + | 11 | Argument | 12 | | | 12 | Start | | | @distonly From aa624162abaf92eeb6c2e5a8c35d31acc9366846 Mon Sep 17 00:00:00 2001 From: Cheng Xuntao <7731943+xtcyclist@users.noreply.github.com> Date: Mon, 28 Nov 2022 15:54:00 +0800 Subject: [PATCH 6/7] Remove all UNKNOWN_PROP as a type of null. (#4907) * remove all UNKNOWN_PROP as a type of null. * fix format * update ut. * update tck Co-authored-by: kyle.cao Co-authored-by: Yee <2520865+yixinglu@users.noreply.github.com> --- src/codec/RowReaderV1.cpp | 4 +-- src/codec/RowReaderV2.cpp | 2 +- src/common/datatypes/Map.h | 2 +- src/common/datatypes/Value.cpp | 4 --- src/common/datatypes/Value.h | 6 ++-- src/common/datatypes/test/ValueTest.cpp | 2 -- src/common/datatypes/test/ValueToJsonTest.cpp | 2 +- src/common/expression/AttributeExpression.cpp | 2 +- .../test/AttributeExpressionTest.cpp | 6 ++-- .../test/SubscriptExpressionTest.cpp | 1 - src/common/time/TimeUtils.h | 6 ++-- src/common/utils/IndexKeyUtils.cpp | 5 +-- src/interface/common.thrift | 1 - src/storage/exec/IndexEdgeScanNode.cpp | 2 +- src/storage/exec/IndexVertexScanNode.cpp | 2 +- src/storage/exec/QueryUtils.h | 2 +- src/tools/db-upgrade/DbUpgrader.cpp | 2 +- tests/common/nebula_test_suite.py | 4 +-- .../tck/features/expression/Attribute.feature | 12 +++---- tests/tck/features/match/With.feature | 4 +-- .../optimizer/PrunePropertiesRule.feature | 34 +++++++++---------- tests/tck/features/parser/nebula.feature | 2 +- tests/tck/utils/nbv.py | 12 +++---- 23 files changed, 53 insertions(+), 66 deletions(-) diff --git a/src/codec/RowReaderV1.cpp b/src/codec/RowReaderV1.cpp index 877e1e35cee..b56b831a228 100644 --- a/src/codec/RowReaderV1.cpp +++ b/src/codec/RowReaderV1.cpp @@ -198,11 +198,11 @@ Value RowReaderV1::getValueByName(const std::string& prop) const noexcept { Value RowReaderV1::getValueByIndex(const int64_t index) const noexcept { if (index < 0 || static_cast(index) >= schema_->getNumFields()) { - return Value(NullType::UNKNOWN_PROP); + return Value(NullType::__NULL__); } auto vType = getSchema()->getFieldType(index); if (PropertyType::UNKNOWN == vType) { - return Value(NullType::UNKNOWN_PROP); + return Value(NullType::__NULL__); } switch (vType) { case PropertyType::BOOL: diff --git a/src/codec/RowReaderV2.cpp b/src/codec/RowReaderV2.cpp index fc213cbf568..91db3933fec 100644 --- a/src/codec/RowReaderV2.cpp +++ b/src/codec/RowReaderV2.cpp @@ -52,7 +52,7 @@ Value RowReaderV2::getValueByName(const std::string& prop) const noexcept { Value RowReaderV2::getValueByIndex(const int64_t index) const noexcept { if (index < 0 || static_cast(index) >= schema_->getNumFields()) { - return Value(NullType::UNKNOWN_PROP); + return Value(NullType::__NULL__); } auto field = schema_->field(index); diff --git a/src/common/datatypes/Map.h b/src/common/datatypes/Map.h index 91134a48387..9a55f28318c 100644 --- a/src/common/datatypes/Map.h +++ b/src/common/datatypes/Map.h @@ -69,7 +69,7 @@ struct Map { const Value& at(const std::string& key) const { auto iter = kvs.find(key); if (iter == kvs.end()) { - return Value::kNullUnknownProp; + return Value::kNullValue; } return iter->second; } diff --git a/src/common/datatypes/Value.cpp b/src/common/datatypes/Value.cpp index 7de3ece5103..c49adf90290 100644 --- a/src/common/datatypes/Value.cpp +++ b/src/common/datatypes/Value.cpp @@ -29,7 +29,6 @@ const Value Value::kNullNaN(NullType::NaN); const Value Value::kNullBadData(NullType::BAD_DATA); const Value Value::kNullBadType(NullType::BAD_TYPE); const Value Value::kNullOverflow(NullType::ERR_OVERFLOW); -const Value Value::kNullUnknownProp(NullType::UNKNOWN_PROP); const Value Value::kNullDivByZero(NullType::DIV_BY_ZERO); const Value Value::kNullOutOfRange(NullType::OUT_OF_RANGE); @@ -320,7 +319,6 @@ const std::string& Value::typeName() const { {NullType::BAD_DATA, "BAD_DATA"}, {NullType::BAD_TYPE, "BAD_TYPE"}, {NullType::ERR_OVERFLOW, "ERR_OVERFLOW"}, - {NullType::UNKNOWN_PROP, "UNKNOWN_PROP"}, {NullType::DIV_BY_ZERO, "DIV_BY_ZERO"}, }; @@ -1564,8 +1562,6 @@ std::string Value::toString() const { return "__NULL_OVERFLOW__"; case NullType::NaN: return "__NULL_NaN__"; - case NullType::UNKNOWN_PROP: - return "__NULL_UNKNOWN_PROP__"; case NullType::OUT_OF_RANGE: return "__NULL_OUT_OF_RANGE__"; } diff --git a/src/common/datatypes/Value.h b/src/common/datatypes/Value.h index 06f4e8b2241..b55397a1566 100644 --- a/src/common/datatypes/Value.h +++ b/src/common/datatypes/Value.h @@ -40,7 +40,6 @@ enum class NullType { BAD_DATA = 2, BAD_TYPE = 3, ERR_OVERFLOW = 4, - UNKNOWN_PROP = 5, DIV_BY_ZERO = 6, OUT_OF_RANGE = 7, }; @@ -52,7 +51,6 @@ struct Value { static const Value kNullBadData; static const Value kNullBadType; static const Value kNullOverflow; - static const Value kNullUnknownProp; static const Value kNullDivByZero; static const Value kNullOutOfRange; @@ -157,8 +155,8 @@ struct Value { } auto& null = value_.nVal; return null == NullType::NaN || null == NullType::BAD_DATA || null == NullType::BAD_TYPE || - null == NullType::ERR_OVERFLOW || null == NullType::UNKNOWN_PROP || - null == NullType::DIV_BY_ZERO || null == NullType::OUT_OF_RANGE; + null == NullType::ERR_OVERFLOW || null == NullType::DIV_BY_ZERO || + null == NullType::OUT_OF_RANGE; } bool isNumeric() const { return type_ == Type::INT || type_ == Type::FLOAT; diff --git a/src/common/datatypes/test/ValueTest.cpp b/src/common/datatypes/test/ValueTest.cpp index 73abce84640..571c74f463f 100644 --- a/src/common/datatypes/test/ValueTest.cpp +++ b/src/common/datatypes/test/ValueTest.cpp @@ -1565,7 +1565,6 @@ TEST(Value, typeName) { EXPECT_EQ("BAD_DATA", Value::kNullBadData.typeName()); EXPECT_EQ("BAD_TYPE", Value::kNullBadType.typeName()); EXPECT_EQ("ERR_OVERFLOW", Value::kNullOverflow.typeName()); - EXPECT_EQ("UNKNOWN_PROP", Value::kNullUnknownProp.typeName()); EXPECT_EQ("DIV_BY_ZERO", Value::kNullDivByZero.typeName()); } @@ -1582,7 +1581,6 @@ TEST(Value, DecodeEncode) { Value(NullType::BAD_DATA), Value(NullType::ERR_OVERFLOW), Value(NullType::OUT_OF_RANGE), - Value(NullType::UNKNOWN_PROP), // int Value(0), diff --git a/src/common/datatypes/test/ValueToJsonTest.cpp b/src/common/datatypes/test/ValueToJsonTest.cpp index 457b666051d..a2e8923e2a5 100644 --- a/src/common/datatypes/test/ValueToJsonTest.cpp +++ b/src/common/datatypes/test/ValueToJsonTest.cpp @@ -225,7 +225,7 @@ TEST(ValueToJson, DecodeEncode) { Value(NullType::BAD_DATA), Value(NullType::ERR_OVERFLOW), Value(NullType::OUT_OF_RANGE), - Value(NullType::UNKNOWN_PROP), + Value(NullType::__NULL__), // int Value(0), diff --git a/src/common/expression/AttributeExpression.cpp b/src/common/expression/AttributeExpression.cpp index 92425be7892..4f8f7b9c147 100644 --- a/src/common/expression/AttributeExpression.cpp +++ b/src/common/expression/AttributeExpression.cpp @@ -33,7 +33,7 @@ const Value &AttributeExpression::eval(ExpressionContext &ctx) { return iter->second; } } - return Value::kNullUnknownProp; + return Value::kNullValue; } case Value::Type::EDGE: { DCHECK(!rvalue.getStr().empty()); diff --git a/src/common/expression/test/AttributeExpressionTest.cpp b/src/common/expression/test/AttributeExpressionTest.cpp index 5b3dbbe5870..1258e953bfa 100644 --- a/src/common/expression/test/AttributeExpressionTest.cpp +++ b/src/common/expression/test/AttributeExpressionTest.cpp @@ -134,7 +134,7 @@ TEST_F(AttributeExpressionTest, DateTimeAttribute) { auto *right = LabelExpression::make(&pool, "not exist attribute"); auto expr = AttributeExpression::make(&pool, left, right); auto value = Expression::eval(expr, gExpCtxt); - ASSERT_EQ(Value::kNullUnknownProp, value); + ASSERT_EQ(Value::kNullValue, value); } { auto *left = ConstantExpression::make(&pool, Value(dt)); @@ -148,7 +148,7 @@ TEST_F(AttributeExpressionTest, DateTimeAttribute) { auto *right = LabelExpression::make(&pool, "not exist attribute"); auto expr = AttributeExpression::make(&pool, left, right); auto value = Expression::eval(expr, gExpCtxt); - ASSERT_EQ(Value::kNullUnknownProp, value); + ASSERT_EQ(Value::kNullValue, value); } { auto *left = ConstantExpression::make(&pool, Value(d)); @@ -162,7 +162,7 @@ TEST_F(AttributeExpressionTest, DateTimeAttribute) { auto *right = LabelExpression::make(&pool, "not exist attribute"); auto expr = AttributeExpression::make(&pool, left, right); auto value = Expression::eval(expr, gExpCtxt); - ASSERT_EQ(Value::kNullUnknownProp, value); + ASSERT_EQ(Value::kNullValue, value); } { auto *left = ConstantExpression::make(&pool, Value(t)); diff --git a/src/common/expression/test/SubscriptExpressionTest.cpp b/src/common/expression/test/SubscriptExpressionTest.cpp index 622557d7b5a..7d7f7ead0cf 100644 --- a/src/common/expression/test/SubscriptExpressionTest.cpp +++ b/src/common/expression/test/SubscriptExpressionTest.cpp @@ -338,7 +338,6 @@ TEST_F(SubscriptExpressionTest, MapSubscript) { auto expr = SubscriptExpression::make(&pool, map, key); auto value = Expression::eval(expr, gExpCtxt); ASSERT_TRUE(value.isNull()); - ASSERT_TRUE(value.isBadNull()); } // {"key1":1,"key2":2, "key3":3}[0] { diff --git a/src/common/time/TimeUtils.h b/src/common/time/TimeUtils.h index 94570877ad3..e2c4bba09ed 100644 --- a/src/common/time/TimeUtils.h +++ b/src/common/time/TimeUtils.h @@ -124,7 +124,7 @@ class TimeUtils { } else if (lowerProp == "microsecond") { return static_cast(dt.microsec); } else { - return Value::kNullUnknownProp; + return Value::kNullValue; } } @@ -160,7 +160,7 @@ class TimeUtils { } else if (lowerProp == "day") { return d.day; } else { - return Value::kNullUnknownProp; + return Value::kNullValue; } } @@ -203,7 +203,7 @@ class TimeUtils { } else if (lowerProp == "microsecond") { return t.microsec; } else { - return Value::kNullUnknownProp; + return Value::kNullValue; } } diff --git a/src/common/utils/IndexKeyUtils.cpp b/src/common/utils/IndexKeyUtils.cpp index 3707d85f52e..5e984ddb54e 100644 --- a/src/common/utils/IndexKeyUtils.cpp +++ b/src/common/utils/IndexKeyUtils.cpp @@ -204,7 +204,7 @@ StatusOr IndexKeyUtils::readValueWithLatestSche(RowReader* reader, const std::string propName, const meta::SchemaProviderIf* latestSchema) { auto value = reader->getValueByName(propName); - if (latestSchema == nullptr || !value.isNull() || value.getNull() != NullType::UNKNOWN_PROP) { + if (latestSchema == nullptr || !value.isNull() || value.getNull() != NullType::__NULL__) { return value; } auto field = latestSchema->field(propName); @@ -230,9 +230,6 @@ Status IndexKeyUtils::checkValue(const Value& v, bool isNullable) { } switch (v.getNull()) { - case nebula::NullType::UNKNOWN_PROP: { - return Status::Error("Unknown prop"); - } case nebula::NullType::__NULL__: { if (!isNullable) { return Status::Error("Not allowed to be null"); diff --git a/src/interface/common.thrift b/src/interface/common.thrift index 808ffaf769f..8b55a4e4030 100644 --- a/src/interface/common.thrift +++ b/src/interface/common.thrift @@ -95,7 +95,6 @@ enum NullType { BAD_DATA = 2, BAD_TYPE = 3, ERR_OVERFLOW = 4, - UNKNOWN_PROP = 5, DIV_BY_ZERO = 6, OUT_OF_RANGE = 7, } (cpp.enum_strict, cpp.type = "nebula::NullType") diff --git a/src/storage/exec/IndexEdgeScanNode.cpp b/src/storage/exec/IndexEdgeScanNode.cpp index 9eb9fb26cb2..d4dcfd67ecd 100644 --- a/src/storage/exec/IndexEdgeScanNode.cpp +++ b/src/storage/exec/IndexEdgeScanNode.cpp @@ -122,7 +122,7 @@ Map IndexEdgeScanNode::decodeFromBase(const std::string& key case QueryUtils::ReturnColType::kOther: { auto field = edge_.back()->field(col); if (field == nullptr) { - values[col] = Value::kNullUnknownProp; + values[col] = Value::kNullValue; } else { auto retVal = QueryUtils::readValue(reader.get(), col, field); if (!retVal.ok()) { diff --git a/src/storage/exec/IndexVertexScanNode.cpp b/src/storage/exec/IndexVertexScanNode.cpp index 4dcfd01075c..fd2eed2ae0c 100644 --- a/src/storage/exec/IndexVertexScanNode.cpp +++ b/src/storage/exec/IndexVertexScanNode.cpp @@ -99,7 +99,7 @@ Map IndexVertexScanNode::decodeFromBase(const std::string& k case QueryUtils::ReturnColType::kOther: { auto field = tag_.back()->field(col); if (field == nullptr) { - values[col] = Value::kNullUnknownProp; + values[col] = Value::kNullValue; } else { auto retVal = QueryUtils::readValue(reader.get(), col, field); if (!retVal.ok()) { diff --git a/src/storage/exec/QueryUtils.h b/src/storage/exec/QueryUtils.h index 4a3c1499727..1fe1103cf17 100644 --- a/src/storage/exec/QueryUtils.h +++ b/src/storage/exec/QueryUtils.h @@ -79,7 +79,7 @@ class QueryUtils final { // read null value auto nullType = value.getNull(); - if (nullType == NullType::UNKNOWN_PROP) { + if (nullType == NullType::__NULL__) { VLOG(1) << "Fail to read prop " << propName; if (!field) { return value; diff --git a/src/tools/db-upgrade/DbUpgrader.cpp b/src/tools/db-upgrade/DbUpgrader.cpp index c0834d87339..822f9cc7899 100644 --- a/src/tools/db-upgrade/DbUpgrader.cpp +++ b/src/tools/db-upgrade/DbUpgrader.cpp @@ -867,7 +867,7 @@ std::string UpgraderSpace::encodeRowVal(const RowReader* reader, LOG(ERROR) << "Write rowWriterV2 failed"; return ""; } - } else if (nullType != NullType::UNKNOWN_PROP) { + } else if (nullType != NullType::__NULL__) { // nullType == NullType::kNullUnknownProp, indicates that the field is // only in the latest schema, maybe use default value or null value. LOG(ERROR) << "Data is illegal in " << name << " field"; diff --git a/tests/common/nebula_test_suite.py b/tests/common/nebula_test_suite.py index 83b746704b3..efc0c1f128c 100644 --- a/tests/common/nebula_test_suite.py +++ b/tests/common/nebula_test_suite.py @@ -34,8 +34,8 @@ T_NULL_BAD_DATA.set_nVal(CommonTtypes.NullType.BAD_DATA) T_NULL_BAD_TYPE = CommonTtypes.Value() T_NULL_BAD_TYPE.set_nVal(CommonTtypes.NullType.BAD_TYPE) -T_NULL_UNKNOWN_PROP = CommonTtypes.Value() -T_NULL_UNKNOWN_PROP.set_nVal(CommonTtypes.NullType.UNKNOWN_PROP) +T_NULL___NULL__ = CommonTtypes.Value() +T_NULL___NULL__.set_nVal(CommonTtypes.NullType.__NULL__) T_NULL_UNKNOWN_DIV_BY_ZERO = CommonTtypes.Value() T_NULL_UNKNOWN_DIV_BY_ZERO.set_nVal(CommonTtypes.NullType.DIV_BY_ZERO) diff --git a/tests/tck/features/expression/Attribute.feature b/tests/tck/features/expression/Attribute.feature index c471c3554c4..bfff234892f 100644 --- a/tests/tck/features/expression/Attribute.feature +++ b/tests/tck/features/expression/Attribute.feature @@ -61,8 +61,8 @@ Feature: Attribute RETURN {k1 : 1, k2: true}.K1 AS k """ Then the result should be, in any order: - | k | - | UNKNOWN_PROP | + | k | + | __NULL__ | When executing query: """ MATCH (v) WHERE id(v) == 'Tim Duncan' RETURN v.player.name @@ -101,28 +101,28 @@ Feature: Attribute """ Then the result should be, in any order: | not_exists_attr | - | UNKNOWN_PROP | + | __NULL__ | When executing query: """ RETURN time("02:59:40").not_exists_attr AS not_exists_attr """ Then the result should be, in any order: | not_exists_attr | - | UNKNOWN_PROP | + | __NULL__ | When executing query: """ RETURN datetime("2021-07-19T02:59:40").not_exists_attr AS not_exists_attr """ Then the result should be, in any order: | not_exists_attr | - | UNKNOWN_PROP | + | __NULL__ | When executing query: """ RETURN {k1 : 1, k2: true}.not_exists_attr AS not_exists_attr """ Then the result should be, in any order: | not_exists_attr | - | UNKNOWN_PROP | + | __NULL__ | When executing query: """ MATCH (v) WHERE id(v) == 'Tim Duncan' RETURN v.player.not_exists_attr diff --git a/tests/tck/features/match/With.feature b/tests/tck/features/match/With.feature index 8cf68f1edd1..9f76a59e9b7 100644 --- a/tests/tck/features/match/With.feature +++ b/tests/tck/features/match/With.feature @@ -94,8 +94,8 @@ Feature: With clause RETURN x.c """ Then the result should be, in any order: - | x.c | - | UNKNOWN_PROP | + | x.c | + | __NULL__ | Scenario: match with return When executing query: diff --git a/tests/tck/features/optimizer/PrunePropertiesRule.feature b/tests/tck/features/optimizer/PrunePropertiesRule.feature index 5429bf2fb82..856301020e9 100644 --- a/tests/tck/features/optimizer/PrunePropertiesRule.feature +++ b/tests/tck/features/optimizer/PrunePropertiesRule.feature @@ -747,9 +747,9 @@ Feature: Prune Properties rule """ Then the result should be, in order, with relax comparison: | properties(src_v).age | properties(e).degree | name | src_v.player.sex | e.start_year | dst_v.player.age | - | 41 | UNKNOWN_PROP | "Dejounte Murray" | "男" | 2022 | 29 | + | 41 | __NULL__ | "Dejounte Murray" | "男" | 2022 | 29 | | 41 | 88 | "Spurs" | "男" | 2002 | NULL | - | 41 | UNKNOWN_PROP | "Tiago Splitter" | "男" | 2022 | 34 | + | 41 | __NULL__ | "Tiago Splitter" | "男" | 2022 | 34 | When executing query: """ match (src_v:player{name:"Manu Ginobili"})-[e*2]-(dst_v) @@ -759,10 +759,10 @@ Feature: Prune Properties rule Then the result should be, in order, with relax comparison: | properties(src_v).sex | properties(e[0]).degree | properties(dst_v).name | age | e[1].start_year | dst_v.player.age | | "男" | 88 | "Danny Green" | 41 | 2010 | 31 | - | "男" | UNKNOWN_PROP | "Danny Green" | 41 | 2022 | 31 | - | "男" | UNKNOWN_PROP | "LeBron James" | 41 | 2022 | 34 | + | "男" | __NULL__ | "Danny Green" | 41 | 2022 | 31 | + | "男" | __NULL__ | "LeBron James" | 41 | 2022 | 34 | | "男" | 88 | "Cory Joseph" | 41 | 2011 | 27 | - | "男" | UNKNOWN_PROP | "76ers" | 41 | 2017 | NULL | + | "男" | __NULL__ | "76ers" | 41 | 2017 | NULL | When executing query: """ match (src_v:player{name:"Manu Ginobili"})-[e:like*2..3]-(dst_v) @@ -771,11 +771,11 @@ Feature: Prune Properties rule """ Then the result should be, in order, with relax comparison: | properties(src_v).sex | properties(e[0]).degree | properties(dst_v).name | age | e[1].start_year | dst_v.player.age | - | "男" | UNKNOWN_PROP | "Danny Green" | 41 | 2022 | 31 | - | "男" | UNKNOWN_PROP | "Danny Green" | 41 | 2022 | 31 | - | "男" | UNKNOWN_PROP | "Kyle Anderson" | 41 | 2022 | 25 | - | "男" | UNKNOWN_PROP | "LeBron James" | 41 | 2022 | 34 | - | "男" | UNKNOWN_PROP | "Kevin Durant" | 41 | 2022 | 30 | + | "男" | __NULL__ | "Danny Green" | 41 | 2022 | 31 | + | "男" | __NULL__ | "Danny Green" | 41 | 2022 | 31 | + | "男" | __NULL__ | "Kyle Anderson" | 41 | 2022 | 25 | + | "男" | __NULL__ | "LeBron James" | 41 | 2022 | 34 | + | "男" | __NULL__ | "Kevin Durant" | 41 | 2022 | 30 | When executing query: """ match (v1)-->(v2)-->(v3) where id(v1)=="Manu Ginobili" @@ -841,9 +841,9 @@ Feature: Prune Properties rule Then the result should be, in order, with relax comparison: | properties(e).degree | degree | | 88 | 88 | - | UNKNOWN_PROP | 88 | + | __NULL__ | 88 | | 88 | 88 | - | UNKNOWN_PROP | 88 | + | __NULL__ | 88 | | 88 | 88 | When executing query: """ @@ -851,9 +851,9 @@ Feature: Prune Properties rule """ Then the result should be, in order, with relax comparison: | properties(e).degree1 | properties(e).degree1 | e2.a | dst_v.p.name | dst_v.player.sex1 | properties(src_v).name2 | - | UNKNOWN_PROP | UNKNOWN_PROP | NULL | NULL | NULL | UNKNOWN_PROP | - | UNKNOWN_PROP | UNKNOWN_PROP | NULL | NULL | NULL | UNKNOWN_PROP | - | UNKNOWN_PROP | UNKNOWN_PROP | NULL | NULL | NULL | UNKNOWN_PROP | - | UNKNOWN_PROP | UNKNOWN_PROP | NULL | NULL | NULL | UNKNOWN_PROP | - | UNKNOWN_PROP | UNKNOWN_PROP | NULL | NULL | NULL | UNKNOWN_PROP | + | __NULL__ | __NULL__ | NULL | NULL | NULL | __NULL__ | + | __NULL__ | __NULL__ | NULL | NULL | NULL | __NULL__ | + | __NULL__ | __NULL__ | NULL | NULL | NULL | __NULL__ | + | __NULL__ | __NULL__ | NULL | NULL | NULL | __NULL__ | + | __NULL__ | __NULL__ | NULL | NULL | NULL | __NULL__ | Then drop the used space diff --git a/tests/tck/features/parser/nebula.feature b/tests/tck/features/parser/nebula.feature index 7e802ce4155..9d9fc4e09e1 100644 --- a/tests/tck/features/parser/nebula.feature +++ b/tests/tck/features/parser/nebula.feature @@ -17,7 +17,7 @@ Feature: Value parsing | BAD_DATA | BAD_DATA | | BAD_TYPE | BAD_TYPE | | OVERFLOW | ERR_OVERFLOW | - | UNKNOWN_PROP | UNKNOWN_PROP | + | __NULL__ | NULL | | DIV_BY_ZERO | DIV_BY_ZERO | | OUT_OF_RANGE | OUT_OF_RANGE | | 123 | iVal | diff --git a/tests/tck/utils/nbv.py b/tests/tck/utils/nbv.py index 6bc4e3a1714..fb75b0569ae 100644 --- a/tests/tck/utils/nbv.py +++ b/tests/tck/utils/nbv.py @@ -42,7 +42,7 @@ 'BAD_DATA', 'BAD_TYPE', 'OVERFLOW', - 'UNKNOWN_PROP', + '__NULL__', 'DIV_BY_ZERO', 'OUT_OF_RANGE', 'FLOAT', @@ -100,9 +100,9 @@ def t_OVERFLOW(t): return t -def t_UNKNOWN_PROP(t): - r'UNKNOWN_PROP' - t.value = Value(nVal=NullType.UNKNOWN_PROP) +def t___NULL__(t): + r'__NULL__' + t.value = Value(nVal=NullType.__NULL__) return t @@ -239,7 +239,7 @@ def p_expr(p): | BAD_DATA | BAD_TYPE | OVERFLOW - | UNKNOWN_PROP + | __NULL__ | DIV_BY_ZERO | OUT_OF_RANGE | INT @@ -587,7 +587,7 @@ def parse_row(row): expected['BAD_DATA'] = Value(nVal=NullType.BAD_DATA) expected['BAD_TYPE'] = Value(nVal=NullType.BAD_TYPE) expected['OVERFLOW'] = Value(nVal=NullType.ERR_OVERFLOW) - expected['UNKNOWN_PROP'] = Value(nVal=NullType.UNKNOWN_PROP) + expected['__NULL__'] = Value(nVal=NullType.__NULL__) expected['DIV_BY_ZERO'] = Value(nVal=NullType.DIV_BY_ZERO) expected['OUT_OF_RANGE'] = Value(nVal=NullType.OUT_OF_RANGE) expected['123'] = Value(iVal=123) From 298c7d9baff37472ed22f8d88d3eb6e4d008c62b Mon Sep 17 00:00:00 2001 From: codesigner Date: Tue, 29 Nov 2022 10:04:28 +0800 Subject: [PATCH 7/7] fix SubGraph edge type in filter does not exist in tranversed edge type (#4941) * fix SubGraph edge type in filter does not exist in tranversed edge type * fix tck * fix tck * fix tck * minor --- src/graph/context/ast/QueryAstContext.h | 1 + src/graph/validator/GetSubgraphValidator.cpp | 40 +++++++++++++++----- tests/tck/features/subgraph/subgraph.feature | 27 +++++++++++++ 3 files changed, 59 insertions(+), 9 deletions(-) diff --git a/src/graph/context/ast/QueryAstContext.h b/src/graph/context/ast/QueryAstContext.h index 8850c435ff6..f50658a439b 100644 --- a/src/graph/context/ast/QueryAstContext.h +++ b/src/graph/context/ast/QueryAstContext.h @@ -132,6 +132,7 @@ struct SubgraphContext final : public AstContext { Expression* tagFilter{nullptr}; Expression* edgeFilter{nullptr}; std::vector colNames; + std::unordered_set edgeNames; std::unordered_set edgeTypes; std::unordered_set biDirectEdgeTypes; std::vector colType; diff --git a/src/graph/validator/GetSubgraphValidator.cpp b/src/graph/validator/GetSubgraphValidator.cpp index e2350c1d326..4e2989fe20d 100644 --- a/src/graph/validator/GetSubgraphValidator.cpp +++ b/src/graph/validator/GetSubgraphValidator.cpp @@ -33,6 +33,7 @@ Status GetSubgraphValidator::validateImpl() { // Validate in-bound edge types Status GetSubgraphValidator::validateInBound(InBoundClause* in) { auto& edgeTypes = subgraphCtx_->edgeTypes; + auto& edgeNames = subgraphCtx_->edgeNames; if (in != nullptr) { auto space = vctx_->whichSpace(); auto edges = in->edges(); @@ -42,8 +43,10 @@ Status GetSubgraphValidator::validateInBound(InBoundClause* in) { return Status::SemanticError("Get Subgraph not support rename edge name."); } - auto et = qctx_->schemaMng()->toEdgeType(space.id, *e->edge()); + std::string edgeName = *e->edge(); + auto et = qctx_->schemaMng()->toEdgeType(space.id, edgeName); NG_RETURN_IF_ERROR(et); + edgeNames.emplace(edgeName); auto v = -et.value(); edgeTypes.emplace(v); @@ -56,6 +59,7 @@ Status GetSubgraphValidator::validateInBound(InBoundClause* in) { // Validate out-bound edge types Status GetSubgraphValidator::validateOutBound(OutBoundClause* out) { auto& edgeTypes = subgraphCtx_->edgeTypes; + auto& edgeNames = subgraphCtx_->edgeNames; if (out != nullptr) { auto space = vctx_->whichSpace(); auto edges = out->edges(); @@ -64,10 +68,10 @@ Status GetSubgraphValidator::validateOutBound(OutBoundClause* out) { if (e->alias() != nullptr) { return Status::SemanticError("Get Subgraph not support rename edge name."); } - - auto et = qctx_->schemaMng()->toEdgeType(space.id, *e->edge()); + std::string edgeName = *e->edge(); + auto et = qctx_->schemaMng()->toEdgeType(space.id, edgeName); NG_RETURN_IF_ERROR(et); - + edgeNames.emplace(edgeName); edgeTypes.emplace(et.value()); } } @@ -79,8 +83,9 @@ Status GetSubgraphValidator::validateOutBound(OutBoundClause* out) { Status GetSubgraphValidator::validateBothInOutBound(BothInOutClause* out) { auto& edgeTypes = subgraphCtx_->edgeTypes; auto& biEdgeTypes = subgraphCtx_->biDirectEdgeTypes; + auto& edgeNames = subgraphCtx_->edgeNames; if (out != nullptr) { - auto space = vctx_->whichSpace(); + auto& space = vctx_->whichSpace(); auto edges = out->edges(); edgeTypes.reserve(edgeTypes.size() + edges.size() * 2); biEdgeTypes.reserve(edges.size() * 2); @@ -88,10 +93,10 @@ Status GetSubgraphValidator::validateBothInOutBound(BothInOutClause* out) { if (e->alias() != nullptr) { return Status::SemanticError("Get Subgraph not support rename edge name."); } - - auto et = qctx_->schemaMng()->toEdgeType(space.id, *e->edge()); + std::string edgeName = *e->edge(); + auto et = qctx_->schemaMng()->toEdgeType(space.id, edgeName); NG_RETURN_IF_ERROR(et); - + edgeNames.emplace(edgeName); auto v = et.value(); edgeTypes.emplace(v); edgeTypes.emplace(-v); @@ -146,12 +151,29 @@ Status GetSubgraphValidator::validateWhere(WhereClause* where) { NG_RETURN_IF_ERROR(deduceProps(filter, subgraphCtx_->exprProps)); + // check EdgeFilter's edge type is in the edge type list + // e.g. "like" is not in the edge list ["serve"] + // GET SUBGRAPH FROM 'xxx' both serve WHERE like.likeness < 90 YIELD vertices as v, edges as e + if (!subgraphCtx_->edgeNames.empty()) { + for (auto edgeProp : subgraphCtx_->exprProps.edgeProps()) { + auto filterEdgeName = qctx_->schemaMng()->toEdgeName(vctx_->whichSpace().id, edgeProp.first); + NG_RETURN_IF_ERROR(filterEdgeName); + if (subgraphCtx_->edgeNames.find(filterEdgeName.value()) == subgraphCtx_->edgeNames.end()) { + return Status::SemanticError( + fmt::format("Edge type \"{}\" in filter \"{}\" is not in the edge types [{}]", + filterEdgeName.value(), + filter->toString(), + folly::join(",", subgraphCtx_->edgeNames))); + } + } + } + auto condition = filter->clone(); if (ExpressionUtils::findAny(expr, {Expression::Kind::kDstProperty})) { auto visitor = ExtractFilterExprVisitor::makePushGetVertices(qctx_->objPool()); filter->accept(&visitor); if (!visitor.ok()) { - return Status::SemanticError("filter error"); + return Status::SemanticError("Push target vertices filter error: " + expr->toString()); } subgraphCtx_->edgeFilter = visitor.remainedExpr(); auto tagFilter = visitor.extractedExpr() ? visitor.extractedExpr() : filter; diff --git a/tests/tck/features/subgraph/subgraph.feature b/tests/tck/features/subgraph/subgraph.feature index 3bd67c539ea..9169159d30f 100644 --- a/tests/tck/features/subgraph/subgraph.feature +++ b/tests/tck/features/subgraph/subgraph.feature @@ -1018,6 +1018,33 @@ Feature: subgraph | <[vertex4]> | <[edge4]> | | <[vertex5]> | [] | + Scenario: Filter on edge type + When executing query: + """ + GET SUBGRAPH FROM 'Tim Duncan' BOTH serve WHERE like.likeness < 90 YIELD vertices as v, edges as e + """ + Then a SemanticError should be raised at runtime: Edge type "like" in filter "(like.likeness<90)" is not in the edge types [serve] + When executing query: + """ + GET SUBGRAPH FROM 'Tim Duncan' BOTH serve,teammate WHERE like.likeness >= 90 YIELD vertices as v, edges as e + """ + Then a SemanticError should be raised at runtime: Edge type "like" in filter "(like.likeness>=90)" is not in the edge types [teammate,serve] + When executing query: + """ + GET SUBGRAPH FROM 'Tim Duncan' BOTH serve WHERE serve.start_year < 1997 YIELD vertices as v, edges as e + """ + Then the result should be, in any order, with relax comparison: + | v | e | + | [("Tim Duncan" :player{} :bachelor{})] | [] | + When executing query: + """ + GET SUBGRAPH FROM 'Tim Duncan' BOTH serve WHERE serve.start_year >= 1997 YIELD vertices as v, edges as e + """ + Then the result should be, in any order, with relax comparison: + | v | e | + | [("Tim Duncan" :player{} :bachelor{})] | [[:serve "Tim Duncan"->"Spurs" @0 {start_year: 1997}]] | + | [("Spurs" :team{})] | [] | + Scenario: Get subgraph in a space which doesn't have edge schema Given an empty graph And create a space with following options: