diff --git a/src/graph/executor/admin/SessionExecutor.cpp b/src/graph/executor/admin/SessionExecutor.cpp index 543d27e56db..b4e4a852288 100644 --- a/src/graph/executor/admin/SessionExecutor.cpp +++ b/src/graph/executor/admin/SessionExecutor.cpp @@ -18,9 +18,8 @@ folly::Future ShowSessionsExecutor::execute() { auto *showNode = asNode(node()); if (showNode->isSetSessionID()) { return getSession(showNode->getSessionId()); - } else { - return listSessions(); } + return showNode->isLocalCommand() ? listLocalSessions() : listSessions(); } folly::Future ShowSessionsExecutor::listSessions() { @@ -31,6 +30,7 @@ folly::Future ShowSessionsExecutor::listSessions() { return Status::Error("Show sessions failed: %s.", resp.status().toString().c_str()); } auto sessions = resp.value().get_sessions(); + // Construct result column names DataSet result({"SessionId", "UserName", "SpaceName", @@ -40,21 +40,28 @@ folly::Future ShowSessionsExecutor::listSessions() { "Timezone", "ClientIp"}); for (auto &session : sessions) { - Row row; - row.emplace_back(session.get_session_id()); - row.emplace_back(session.get_user_name()); - row.emplace_back(session.get_space_name()); - row.emplace_back(microSecToDateTime(session.get_create_time())); - row.emplace_back(microSecToDateTime(session.get_update_time())); - row.emplace_back(network::NetworkUtils::toHostsStr({session.get_graph_addr()})); - row.emplace_back(session.get_timezone()); - row.emplace_back(session.get_client_ip()); - result.emplace_back(std::move(row)); + addSessions(session, result); } return finish(ResultBuilder().value(Value(std::move(result))).build()); }); } +folly::Future ShowSessionsExecutor::listLocalSessions() { + auto localSessions = qctx_->rctx()->sessionMgr()->getSessionFromLocalCache(); + DataSet result({"SessionId", + "UserName", + "SpaceName", + "CreateTime", + "UpdateTime", + "GraphAddr", + "Timezone", + "ClientIp"}); + for (auto &session : localSessions) { + addSessions(session, result); + } + return finish(ResultBuilder().value(Value(std::move(result))).build()); +} + folly::Future ShowSessionsExecutor::getSession(SessionID sessionId) { return qctx()->getMetaClient()->getSession(sessionId).via(runner()).thenValue( [this, sessionId](StatusOr resp) { @@ -64,20 +71,34 @@ folly::Future ShowSessionsExecutor::getSession(SessionID sessionId) { "Get session `%ld' failed: %s.", sessionId, resp.status().toString().c_str()); } auto session = resp.value().get_session(); - DataSet result({"VariableName", "Value"}); - result.emplace_back(Row({"SessionID", session.get_session_id()})); - result.emplace_back(Row({"UserName", session.get_user_name()})); - result.emplace_back(Row({"SpaceName", session.get_space_name()})); - result.emplace_back(Row({"CreateTime", microSecToDateTime(session.get_create_time())})); - result.emplace_back(Row({"UpdateTime", microSecToDateTime(session.get_update_time())})); - result.emplace_back( - Row({"GraphAddr", network::NetworkUtils::toHostsStr({session.get_graph_addr()})})); - result.emplace_back(Row({"Timezone", session.get_timezone()})); - result.emplace_back(Row({"ClientIp", session.get_client_ip()})); + + // Construct result column names + DataSet result({"SessionId", + "UserName", + "SpaceName", + "CreateTime", + "UpdateTime", + "GraphAddr", + "Timezone", + "ClientIp"}); + addSessions(session, result); return finish(ResultBuilder().value(Value(std::move(result))).build()); }); } +void ShowSessionsExecutor::addSessions(const meta::cpp2::Session &session, DataSet &dataSet) const { + Row row; + row.emplace_back(session.get_session_id()); + row.emplace_back(session.get_user_name()); + row.emplace_back(session.get_space_name()); + row.emplace_back(microSecToDateTime(session.get_create_time())); + row.emplace_back(microSecToDateTime(session.get_update_time())); + row.emplace_back(network::NetworkUtils::toHostsStr({session.get_graph_addr()})); + row.emplace_back(session.get_timezone()); + row.emplace_back(session.get_client_ip()); + dataSet.emplace_back(std::move(row)); +} + folly::Future UpdateSessionExecutor::execute() { VLOG(1) << "Update sessions to metad"; SCOPED_TIMER(&execTime_); diff --git a/src/graph/executor/admin/SessionExecutor.h b/src/graph/executor/admin/SessionExecutor.h index 79dc9909619..110fbf687c3 100644 --- a/src/graph/executor/admin/SessionExecutor.h +++ b/src/graph/executor/admin/SessionExecutor.h @@ -21,11 +21,16 @@ class ShowSessionsExecutor final : public Executor { folly::Future execute() override; private: + // List sessions in the cluster folly::Future listSessions(); + // List sessions in the current graph node + folly::Future listLocalSessions(); folly::Future getSession(SessionID sessionId); + // Add session info into dataset + void addSessions(const meta::cpp2::Session &session, DataSet &dataSet) const; - DateTime microSecToDateTime(int64_t microSec) { + DateTime microSecToDateTime(const int64_t microSec) const { auto dateTime = time::TimeConversion::unixSecondsToDateTime(microSec / 1000000); dateTime.microsec = microSec % 1000000; return dateTime; diff --git a/src/graph/planner/plan/Admin.h b/src/graph/planner/plan/Admin.h index 796c57d2145..b81a8306490 100644 --- a/src/graph/planner/plan/Admin.h +++ b/src/graph/planner/plan/Admin.h @@ -1325,14 +1325,18 @@ class ShowSessions final : public SingleInputNode { static ShowSessions* make(QueryContext* qctx, PlanNode* input, bool isSetSessionID, - SessionID sessionId) { - return qctx->objPool()->add(new ShowSessions(qctx, input, isSetSessionID, sessionId)); + SessionID sessionId, + bool isLocalCommand) { + return qctx->objPool()->add( + new ShowSessions(qctx, input, isSetSessionID, sessionId, isLocalCommand)); } bool isSetSessionID() const { return isSetSessionID_; } - + bool isLocalCommand() const { + return isLocalCommand_; + } SessionID getSessionId() const { return sessionId_; } @@ -1341,15 +1345,18 @@ class ShowSessions final : public SingleInputNode { explicit ShowSessions(QueryContext* qctx, PlanNode* input, bool isSetSessionID, - SessionID sessionId) + SessionID sessionId, + bool isLocalCommand) : SingleInputNode(qctx, Kind::kShowSessions, input) { - isSetSessionID_ = isSetSessionID; sessionId_ = sessionId; + isSetSessionID_ = isSetSessionID; + isLocalCommand_ = isLocalCommand; } private: - bool isSetSessionID_{false}; SessionID sessionId_{-1}; + bool isSetSessionID_{false}; + bool isLocalCommand_{false}; }; class UpdateSession final : public SingleInputNode { diff --git a/src/graph/session/GraphSessionManager.cpp b/src/graph/session/GraphSessionManager.cpp index 282f04a3b6d..a7ebdccbbc6 100644 --- a/src/graph/session/GraphSessionManager.cpp +++ b/src/graph/session/GraphSessionManager.cpp @@ -96,6 +96,15 @@ folly::Future>> GraphSessionManager::fin return metaClient_->getSession(id).via(runner).thenValue(addSession); } +std::vector GraphSessionManager::getSessionFromLocalCache() const { + std::vector sessions; + sessions.reserve(activeSessions_.size()); + for (auto& it : activeSessions_) { + sessions.emplace_back(it.second->getSession()); + } + return sessions; +} + folly::Future>> GraphSessionManager::createSession( const std::string userName, const std::string clientIp, folly::Executor* runner) { auto createCB = [this, diff --git a/src/graph/session/GraphSessionManager.h b/src/graph/session/GraphSessionManager.h index ee941a422f5..0c7ea3d0c56 100644 --- a/src/graph/session/GraphSessionManager.h +++ b/src/graph/session/GraphSessionManager.h @@ -14,6 +14,7 @@ #include "common/thrift/ThriftTypes.h" #include "graph/session/ClientSession.h" #include "interface/gen-cpp2/GraphService.h" +#include "interface/gen-cpp2/meta_types.h" /** * GraphSessionManager manages the client sessions, e.g. create new, find @@ -60,6 +61,9 @@ class GraphSessionManager final : public SessionManager { */ std::shared_ptr findSessionFromCache(SessionID id); + // get all seesions in the local cache + std::vector getSessionFromLocalCache() const; + private: folly::Future>> findSessionFromMetad( SessionID id, folly::Executor* runner); diff --git a/src/graph/validator/AdminValidator.cpp b/src/graph/validator/AdminValidator.cpp index 2d00a52a46d..ed9d40866c5 100644 --- a/src/graph/validator/AdminValidator.cpp +++ b/src/graph/validator/AdminValidator.cpp @@ -666,8 +666,11 @@ Status ShowSpaceServiceClientsValidator::toPlan() { Status ShowSessionsValidator::toPlan() { auto sentence = static_cast(sentence_); - auto *node = - ShowSessions::make(qctx_, nullptr, sentence->isSetSessionID(), sentence->getSessionID()); + auto *node = ShowSessions::make(qctx_, + nullptr, + sentence->isSetSessionID(), + sentence->getSessionID(), + sentence->isLocalCommand()); root_ = node; tail_ = root_; return Status::OK(); diff --git a/src/parser/AdminSentences.cpp b/src/parser/AdminSentences.cpp index 867cad32a73..ef469794d5c 100644 --- a/src/parser/AdminSentences.cpp +++ b/src/parser/AdminSentences.cpp @@ -510,13 +510,14 @@ std::string ShowSessionsSentence::toString() const { if (isSetSessionID()) { return folly::stringPrintf("SHOW SESSION %ld", sessionId_); } + if (isLocalCommand()) return "SHOW LOCAL SESSIONS"; return "SHOW SESSIONS"; } std::string ShowQueriesSentence::toString() const { std::string buf = "SHOW"; - if (isAll()) { - buf += " ALL"; + if (!isAll()) { + buf += " LOCAL"; } buf += " QUERIES"; return buf; diff --git a/src/parser/AdminSentences.h b/src/parser/AdminSentences.h index 816ee7feb0d..7c6c3f03f3f 100644 --- a/src/parser/AdminSentences.h +++ b/src/parser/AdminSentences.h @@ -822,10 +822,19 @@ class ShowSessionsSentence final : public Sentence { setSessionId_ = true; } + explicit ShowSessionsSentence(bool isLocalCommand) { + kind_ = Kind::kShowSessions; + isLocalCommand_ = isLocalCommand; + } + bool isSetSessionID() const { return setSessionId_; } + bool isLocalCommand() const { + return isLocalCommand_; + } + SessionID getSessionID() const { return sessionId_; } @@ -835,6 +844,7 @@ class ShowSessionsSentence final : public Sentence { private: SessionID sessionId_{0}; bool setSessionId_{false}; + bool isLocalCommand_{false}; }; class ShowQueriesSentence final : public Sentence { diff --git a/src/parser/parser.yy b/src/parser/parser.yy index c36737884db..69cef927f7b 100644 --- a/src/parser/parser.yy +++ b/src/parser/parser.yy @@ -203,6 +203,7 @@ static constexpr size_t kCommentLengthLimit = 256; %token KW_TEXT KW_SEARCH KW_CLIENTS KW_SIGN KW_SERVICE KW_TEXT_SEARCH %token KW_ANY KW_SINGLE KW_NONE %token KW_REDUCE +%token KW_LOCAL %token KW_SESSIONS KW_SESSION %token KW_KILL KW_QUERY KW_QUERIES KW_TOP %token KW_GEOGRAPHY KW_POINT KW_LINESTRING KW_POLYGON @@ -546,6 +547,7 @@ unreserved_keyword | KW_S2_MAX_CELLS { $$ = new std::string("s2_max_cells"); } | KW_SESSION { $$ = new std::string("session"); } | KW_SESSIONS { $$ = new std::string("sessions"); } + | KW_LOCAL { $$ = new std::string("local"); } | KW_SAMPLE { $$ = new std::string("sample"); } | KW_QUERIES { $$ = new std::string("queries"); } | KW_QUERY { $$ = new std::string("query"); } @@ -3317,10 +3319,10 @@ job_concurrency ; show_queries_sentence - : KW_SHOW KW_QUERIES { + : KW_SHOW KW_LOCAL KW_QUERIES { $$ = new ShowQueriesSentence(); } - | KW_SHOW KW_ALL KW_QUERIES { + | KW_SHOW KW_QUERIES { $$ = new ShowQueriesSentence(true); } ; @@ -3418,9 +3420,14 @@ show_sentence | KW_SHOW KW_FULLTEXT KW_INDEXES { $$ = new ShowFTIndexesSentence(); } + // List sessions in the cluster | KW_SHOW KW_SESSIONS { $$ = new ShowSessionsSentence(); } + // List sessions in the current graph node + | KW_SHOW KW_LOCAL KW_SESSIONS { + $$ = new ShowSessionsSentence(true); + } | KW_SHOW KW_SESSION legal_integer { $$ = new ShowSessionsSentence($3); } diff --git a/src/parser/scanner.lex b/src/parser/scanner.lex index 40f19c9482e..1669ad74025 100644 --- a/src/parser/scanner.lex +++ b/src/parser/scanner.lex @@ -274,6 +274,7 @@ LABEL_FULL_WIDTH {CN_EN_FULL_WIDTH}{CN_EN_NUM_FULL_WIDTH}* "COMMENT" { return TokenType::KW_COMMENT; } "S2_MAX_LEVEL" { return TokenType::KW_S2_MAX_LEVEL; } "S2_MAX_CELLS" { return TokenType::KW_S2_MAX_CELLS; } +"LOCAL" { return TokenType::KW_LOCAL; } "SESSIONS" { return TokenType::KW_SESSIONS; } "SESSION" { return TokenType::KW_SESSION; } "SAMPLE" { return TokenType::KW_SAMPLE; } @@ -518,7 +519,7 @@ LABEL_FULL_WIDTH {CN_EN_FULL_WIDTH}{CN_EN_NUM_FULL_WIDTH}* * including the non-ascii ones, which are negative * in terms of type of `signed char'. At the same time, because * Bison translates all negative tokens to EOF(i.e. YY_NULL), - * so we have to cast illegal characters to type of `unsinged char' + * so we have to cast illegal characters to type of `unsigned char' * This will make Bison receive an unknown token, which leads to * a syntax error. * diff --git a/src/parser/test/ParserTest.cpp b/src/parser/test/ParserTest.cpp index 71431c7471d..76e82948b33 100644 --- a/src/parser/test/ParserTest.cpp +++ b/src/parser/test/ParserTest.cpp @@ -3253,6 +3253,12 @@ TEST_F(ParserTest, SessionTest) { ASSERT_TRUE(result.ok()) << result.status(); ASSERT_EQ(result.value()->toString(), "SHOW SESSIONS"); } + { + std::string query = "SHOW LOCAL SESSIONS"; + auto result = parse(query); + ASSERT_TRUE(result.ok()) << result.status(); + ASSERT_EQ(result.value()->toString(), "SHOW LOCAL SESSIONS"); + } { std::string query = "SHOW SESSION 123"; auto result = parse(query); @@ -3301,10 +3307,10 @@ TEST_F(ParserTest, ShowAndKillQueryTest) { ASSERT_EQ(result.value()->toString(), "SHOW QUERIES"); } { - std::string query = "SHOW ALL QUERIES"; + std::string query = "SHOW LOCAL QUERIES"; auto result = parse(query); ASSERT_TRUE(result.ok()) << result.status(); - ASSERT_EQ(result.value()->toString(), "SHOW ALL QUERIES"); + ASSERT_EQ(result.value()->toString(), "SHOW LOCAL QUERIES"); } { std::string query = "KILL QUERY (plan=123)"; diff --git a/tests/conftest.py b/tests/conftest.py index 7117b4d2c68..661154f0440 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -115,14 +115,18 @@ def class_fixture_variables(): """save class scope fixture, used for session update. """ # cluster is the instance of NebulaService + # current_session is the session currently using + # sessions is a list of all sessions in the cluster res = dict( pool=None, - session=None, + current_session=None, cluster=None, + sessions=[], ) yield res - if res["session"] is not None: - res["session"].release() + for sess in res["sessions"]: + if sess is not None: + sess.release() if res["pool"] is not None: res["pool"].close() if res["cluster"] is not None: @@ -176,8 +180,8 @@ def session_from_second_conn_pool(conn_pool_to_second_graph_service, pytestconfi @pytest.fixture(scope="class") def session(session_from_first_conn_pool, class_fixture_variables): - if class_fixture_variables.get('session', None) is not None: - return class_fixture_variables.get('session') + if class_fixture_variables.get('current_session', None) is not None: + return class_fixture_variables.get('current_session') return session_from_first_conn_pool diff --git a/tests/job/test_session.py b/tests/job/test_session.py index 7f89a960633..5c9736c1dbf 100644 --- a/tests/job/test_session.py +++ b/tests/job/test_session.py @@ -102,19 +102,25 @@ def test_sessions(self): time.sleep(3) resp = self.execute('SHOW SESSION {}'.format(session_id)) self.check_resp_succeeded(resp) - expect_col_names = ['VariableName', 'Value'] - expect_result = [['SessionID'], - ['UserName'], - ['SpaceName'], - ['CreateTime'], - ['UpdateTime'], - ['GraphAddr'], - ['Timezone'], - ['ClientIp']] + expect_col_names = ['SessionId', + 'UserName', + 'SpaceName', + 'CreateTime', + 'UpdateTime', + 'GraphAddr', + 'Timezone', + 'ClientIp'] + self.check_column_names(resp, expect_col_names) - self.check_result(resp, expect_result, ignore_col=[1]) - assert resp.rows()[1].values[1].get_sVal() == b'session_user' - assert resp.rows()[2].values[1].get_sVal() == b'nba' + + assert len(resp.rows()) == 1 + + row = resp.rows()[0] + assert row.values[0].get_iVal() == session_id + assert row.values[1].get_sVal() == b'session_user' + assert row.values[2].get_sVal() == b'nba' + assert row.values[3].getType() == ttypes.Value.DTVAL, f"resp: {resp}" + assert row.values[4].getType() == ttypes.Value.DTVAL, f"resp: {resp}" # 5: test expired session resp = self.execute('UPDATE CONFIGS graph:session_idle_timeout_secs = 5') @@ -190,9 +196,9 @@ def do_test(connection, sid, num): def test_out_of_max_connections(self): resp = self.execute('SHOW SESSIONS') self.check_resp_succeeded(resp) - current_sessions = len(resp.rows()) + sessions = len(resp.rows()) - resp = self.execute('UPDATE CONFIGS graph:max_allowed_connections = {}'.format(current_sessions)) + resp = self.execute('UPDATE CONFIGS graph:max_allowed_connections = {}'.format(sessions)) self.check_resp_succeeded(resp) time.sleep(3) diff --git a/tests/tck/conftest.py b/tests/tck/conftest.py index f1f71a321d8..b2fad24152d 100644 --- a/tests/tck/conftest.py +++ b/tests/tck/conftest.py @@ -299,14 +299,14 @@ def given_nebulacluster_with_param( class_fixture_variables, pytestconfig, ): - grpahd_param, metad_param, storaged_param = {}, {}, {} + graphd_param, metad_param, storaged_param = {}, {}, {} if params is not None: for param in params.splitlines(): module, config = param.strip().split(":") assert module.lower() in ["graphd", "storaged", "metad"] key, value = config.strip().split("=") if module.lower() == "graphd": - grpahd_param[key] = value + graphd_param[key] = value elif module.lower() == "storaged": storaged_param[key] = value else: @@ -324,7 +324,7 @@ def given_nebulacluster_with_param( int(graphd_num), ) for process in nebula_svc.graphd_processes: - process.update_param(grpahd_param) + process.update_param(graphd_param) for process in nebula_svc.storaged_processes: process.update_param(storaged_param) for process in nebula_svc.metad_processes: @@ -339,7 +339,8 @@ def given_nebulacluster_with_param( graph_port = nebula_svc.graphd_processes[0].tcp_port pool = get_conn_pool(graph_ip, graph_port) sess = pool.get_session(user, password) - class_fixture_variables["session"] = sess + class_fixture_variables["current_session"] = sess + class_fixture_variables["sessions"].append(sess) class_fixture_variables["cluster"] = nebula_svc class_fixture_variables["pool"] = pool @@ -356,7 +357,8 @@ def when_login_graphd(graph, user, password, class_fixture_variables, pytestconf sess = pool.get_session(user, password) # do not release original session, as we may have cases to test multiple sessions. # connection could be released after cluster stopped. - class_fixture_variables["session"] = sess + class_fixture_variables["current_session"] = sess + class_fixture_variables["sessions"].append(sess) class_fixture_variables["pool"] = pool @when(parse("executing query:\n{query}")) diff --git a/tests/tck/features/admin/Sessions.feature b/tests/tck/features/admin/Sessions.feature index af935515c4a..da1181fd5b7 100644 --- a/tests/tck/features/admin/Sessions.feature +++ b/tests/tck/features/admin/Sessions.feature @@ -12,12 +12,12 @@ Feature: Test sessions SHOW SESSIONS; """ Then the result should contain: - | SessionId | UserName | SpaceName | CreateTime | UpdateTime | GraphAddr | Timezone | ClientIp | - | /\d+/ | "root" | "" | /.*/ | /.*/ | /.*/ | 0 | /.*(127.0.0.1)$/ | + | SessionId | UserName | SpaceName | CreateTime | UpdateTime | GraphAddr | Timezone | ClientIp | + | /\d+/ | "root" | "" | /.*/ | /.*/ | /.*/ | 0 | /.*(127\.0\.0\.1)$/ | When executing query: """ CREATE USER user1 WITH PASSWORD 'nebula1'; - CREATE SPACE s1(vid_type=int); + CREATE SPACE IF NOT EXISTS s1(vid_type=int); USE s1; """ Then the execution should be successful @@ -28,6 +28,45 @@ Feature: Test sessions SHOW SESSIONS; """ Then the result should contain, replace the holders with cluster info: - | SessionId | UserName | SpaceName | CreateTime | UpdateTime | GraphAddr | Timezone | ClientIp | - | /\d+/ | "root" | "s1" | /.*/ | /.*/ | "127.0.0.1:${cluster.graphd_processes[0].tcp_port}" | 0 | /.*(127.0.0.1)$/ | - | /\d+/ | "user1" | "" | /.*/ | /.*/ | "127.0.0.1:${cluster.graphd_processes[1].tcp_port}" | 0 | /.*(127.0.0.1)$/ | + | SessionId | UserName | SpaceName | CreateTime | UpdateTime | GraphAddr | Timezone | ClientIp | + | /\d+/ | "root" | "s1" | /.*/ | /.*/ | "127.0.0.1:${cluster.graphd_processes[0].tcp_port}" | 0 | /.*(127\.0\.0\.1)$/ | + | /\d+/ | "user1" | "" | /.*/ | /.*/ | "127.0.0.1:${cluster.graphd_processes[1].tcp_port}" | 0 | /.*(127\.0\.0\.1)$/ | + + Scenario: Show local sessions + When executing query: + """ + SHOW SESSIONS; + """ + Then the result should contain: + | SessionId | UserName | SpaceName | CreateTime | UpdateTime | GraphAddr | Timezone | ClientIp | + | /\d+/ | "root" | "" | /.*/ | /.*/ | /.*/ | 0 | /.*(127\.0\.0\.1)$/ | + When executing query: + """ + CREATE USER IF NOT EXISTS user1 WITH PASSWORD 'nebula1'; + CREATE USER IF NOT EXISTS user2 WITH PASSWORD 'nebula2'; + CREATE SPACE IF NOT EXISTS root_space(vid_type=int); + USE root_space; + """ + Then the execution should be successful + And wait 3 seconds + When login "graphd[1]" with "user1" and "nebula1" + Then the execution should be successful + When login "graphd[2]" with "user2" and "nebula2" + Then the execution should be successful + And wait 3 seconds + When executing query: + """ + SHOW SESSIONS; + """ + Then the result should contain, replace the holders with cluster info: + | SessionId | UserName | SpaceName | CreateTime | UpdateTime | GraphAddr | Timezone | ClientIp | + | /\d+/ | "root" | "root_space" | /.*/ | /.*/ | "127.0.0.1:${cluster.graphd_processes[0].tcp_port}" | 0 | /.*(127\.0\.0\.1)$/ | + | /\d+/ | "user1" | "" | /.*/ | /.*/ | "127.0.0.1:${cluster.graphd_processes[1].tcp_port}" | 0 | /.*(127\.0\.0\.1)$/ | + | /\d+/ | "user2" | "" | /.*/ | /.*/ | "127.0.0.1:${cluster.graphd_processes[2].tcp_port}" | 0 | /.*(127\.0\.0\.1)$/ | + When executing query: + """ + SHOW LOCAL SESSIONS; + """ + Then the result should contain, replace the holders with cluster info: + | SessionId | UserName | SpaceName | CreateTime | UpdateTime | GraphAddr | Timezone | ClientIp | + | /\d+/ | "root" | "root_space" | /.*/ | /.*/ | "127.0.0.1:${cluster.graphd_processes[0].tcp_port}" | 0 | /.*(127\.0\.0\.1)$/ | diff --git a/tests/tck/slowquery/KillSlowQueryViaDiffrentService.feature b/tests/tck/slowquery/KillSlowQueryViaDiffrentService.feature index c2998b71d90..63a6fe57642 100644 --- a/tests/tck/slowquery/KillSlowQueryViaDiffrentService.feature +++ b/tests/tck/slowquery/KillSlowQueryViaDiffrentService.feature @@ -16,26 +16,26 @@ Feature: Slow Query Test Scenario: Show all queries and kill all slow queries at second graph service When executing query via graph 1: """ - SHOW QUERIES + SHOW LOCAL QUERIES """ Then the execution should be successful When executing query via graph 1: """ - SHOW ALL QUERIES + SHOW QUERIES """ Then the execution should be successful # In case that rebuild indexes cost too much time. And wait 10 seconds When executing query via graph 1: """ - SHOW ALL QUERIES + SHOW QUERIES """ Then the result should be, in order: | SessionID | ExecutionPlanID | User | Host | StartTime | DurationInUSec | Status | Query | | /\d+/ | /\d+/ | "root" | /.*/ | /.*/ | /\d+/ | "RUNNING" | "GO 100000 STEPS FROM \"Tim Duncan\" OVER like YIELD like._dst" | When executing query via graph 1: """ - SHOW ALL QUERIES + SHOW QUERIES | YIELD $-.SessionID AS sid, $-.ExecutionPlanID AS eid, $-.DurationInUSec AS dur WHERE $-.DurationInUSec > 1000000 AND $-.`Query` CONTAINS "GO 100000 STEPS"; """ @@ -74,7 +74,7 @@ Feature: Slow Query Test Then an SemanticError should be raised at runtime: `$-.eid', not exist prop `eid' When executing query via graph 1: """ - SHOW ALL QUERIES + SHOW QUERIES | YIELD $-.SessionID AS sid, $-.`Query` AS eid, $-.DurationInUSec AS dur WHERE $-.DurationInUSec > 10000000 | ORDER BY $-.dur | KILL QUERY (session=$-.sid, plan=$-.eid) @@ -82,7 +82,7 @@ Feature: Slow Query Test Then an SemanticError should be raised at runtime: $-.eid, Session ID must be an integer but was STRING When executing query via graph 1: """ - SHOW ALL QUERIES + SHOW QUERIES | YIELD $-.SessionID AS sid, $-.ExecutionPlanID AS eid, $-.DurationInUSec AS dur WHERE $-.DurationInUSec > 1000000 AND $-.`Query` CONTAINS "GO" | ORDER BY $-.dur diff --git a/tests/tck/slowquery/KillSlowQueryViaSameService.feature b/tests/tck/slowquery/KillSlowQueryViaSameService.feature index 448962e2646..55e70185f60 100644 --- a/tests/tck/slowquery/KillSlowQueryViaSameService.feature +++ b/tests/tck/slowquery/KillSlowQueryViaSameService.feature @@ -16,26 +16,26 @@ Feature: Slow Query Test Scenario: Show all queries and kill all slow queries When executing query: """ - SHOW QUERIES + SHOW LOCAL QUERIES """ Then the execution should be successful When executing query: """ - SHOW ALL QUERIES + SHOW QUERIES """ Then the execution should be successful # In case that rebuild indexes cost too much time. And wait 10 seconds When executing query: """ - SHOW ALL QUERIES + SHOW QUERIES """ Then the result should be, in order: | SessionID | ExecutionPlanID | User | Host | StartTime | DurationInUSec | Status | Query | | /\d+/ | /\d+/ | "root" | /.*/ | /.*/ | /\d+/ | "RUNNING" | "GO 100000 STEPS FROM \"Tim Duncan\" OVER like YIELD like._dst" | When executing query: """ - SHOW ALL QUERIES + SHOW QUERIES | YIELD $-.SessionID AS sid, $-.ExecutionPlanID AS eid, $-.DurationInUSec AS dur WHERE $-.DurationInUSec > 1000000 AND $-.`Query` CONTAINS "GO 100000 STEPS"; """ @@ -74,7 +74,7 @@ Feature: Slow Query Test Then an SemanticError should be raised at runtime: `$-.eid', not exist prop `eid' When executing query: """ - SHOW ALL QUERIES + SHOW QUERIES | YIELD $-.SessionID AS sid, $-.`Query` AS eid, $-.DurationInUSec AS dur WHERE $-.DurationInUSec > 10000000 | ORDER BY $-.dur | KILL QUERY (session=$-.sid, plan=$-.eid) @@ -82,7 +82,7 @@ Feature: Slow Query Test Then an SemanticError should be raised at runtime: $-.eid, Session ID must be an integer but was STRING When executing query: """ - SHOW ALL QUERIES + SHOW QUERIES | YIELD $-.SessionID AS sid, $-.ExecutionPlanID AS eid, $-.DurationInUSec AS dur WHERE $-.DurationInUSec > 1000000 AND $-.`Query` CONTAINS "GO" | ORDER BY $-.dur