diff --git a/src/clients/meta/MetaClient.cpp b/src/clients/meta/MetaClient.cpp index 352989f6369..13a10550fdd 100644 --- a/src/clients/meta/MetaClient.cpp +++ b/src/clients/meta/MetaClient.cpp @@ -933,8 +933,8 @@ Status MetaClient::handleResponse(const RESP& resp) { return Status::Error("Task report is out of date!"); case nebula::cpp2::ErrorCode::E_BACKUP_FAILED: return Status::Error("Backup failure!"); - case nebula::cpp2::ErrorCode::E_BACKUP_BUILDING_INDEX: - return Status::Error("Backup building indexes!"); + case nebula::cpp2::ErrorCode::E_BACKUP_RUNNING_JOBS: + return Status::Error("Backup encounter running or queued jobs!"); case nebula::cpp2::ErrorCode::E_BACKUP_SPACE_NOT_FOUND: return Status::Error("The space is not found when backup!"); case nebula::cpp2::ErrorCode::E_RESTORE_FAILURE: @@ -2567,7 +2567,8 @@ folly::Future> MetaClient::heartbeat() { if (FileBasedClusterIdMan::persistInFile(resp.get_cluster_id(), FLAGS_cluster_id_path)) { options_.clusterId_.store(resp.get_cluster_id()); } else { - LOG(FATAL) << "Can't persist the clusterId in file " << FLAGS_cluster_id_path; + LOG(DFATAL) << "Can't persist the clusterId in file " << FLAGS_cluster_id_path; + return false; } } heartbeatTime_ = time::WallClock::fastNowInMilliSec(); diff --git a/src/codec/RowReaderV1.cpp b/src/codec/RowReaderV1.cpp index b56b831a228..6a73458539c 100644 --- a/src/codec/RowReaderV1.cpp +++ b/src/codec/RowReaderV1.cpp @@ -143,7 +143,8 @@ int64_t RowReaderV1::skipToNext(int64_t index, int64_t offset) const noexcept { } default: { // TODO - LOG(FATAL) << "Unimplemented"; + LOG(DFATAL) << "Unimplemented"; + return -1; } } diff --git a/src/codec/RowReaderV2.cpp b/src/codec/RowReaderV2.cpp index 91db3933fec..fe23bd2e9a1 100644 --- a/src/codec/RowReaderV2.cpp +++ b/src/codec/RowReaderV2.cpp @@ -208,7 +208,9 @@ Value RowReaderV2::getValueByIndex(const int64_t index) const noexcept { case PropertyType::UNKNOWN: break; } - LOG(FATAL) << "Should not reach here"; + LOG(DFATAL) << "Should not reach here, illegal property type: " + << static_cast(field->type()); + return Value::kNullBadType; } int64_t RowReaderV2::getTimestamp() const noexcept { diff --git a/src/codec/RowReaderWrapper.cpp b/src/codec/RowReaderWrapper.cpp index 87b8beeb22a..77abcf81323 100644 --- a/src/codec/RowReaderWrapper.cpp +++ b/src/codec/RowReaderWrapper.cpp @@ -89,7 +89,9 @@ RowReaderWrapper::RowReaderWrapper(const meta::SchemaProviderIf* schema, readerV2_.resetImpl(schema, row); currReader_ = &readerV2_; } else { - LOG(FATAL) << "Should not reach here"; + LOG(DFATAL) << "Should not reach here"; + readerV2_.resetImpl(schema, row); + currReader_ = &readerV2_; } } diff --git a/src/codec/RowWriterV2.cpp b/src/codec/RowWriterV2.cpp index 1368661ae53..c6466191f55 100644 --- a/src/codec/RowWriterV2.cpp +++ b/src/codec/RowWriterV2.cpp @@ -63,7 +63,9 @@ RowWriterV2::RowWriterV2(const meta::SchemaProviderIf* schema) header = 0x0F; // 0x08 | 0x07, seven bytes for the schema version headerLen_ = 8; } else { - LOG(FATAL) << "Schema version too big"; + LOG(DFATAL) << "Schema version too big"; + header = 0x0F; // 0x08 | 0x07, seven bytes for the schema version + headerLen_ = 8; } buf_.append(&header, 1); buf_.append(reinterpret_cast(&ver), buf_[0] & 0x07); @@ -137,7 +139,9 @@ RowWriterV2::RowWriterV2(RowReader& reader) : RowWriterV2(reader.getSchema()) { set(i, v.moveDuration()); break; default: - LOG(FATAL) << "Invalid data: " << v << ", type: " << v.typeName(); + LOG(DFATAL) << "Invalid data: " << v << ", type: " << v.typeName(); + isSet_[i] = false; + continue; } isSet_[i] = true; } @@ -852,9 +856,10 @@ WriteResult RowWriterV2::checkUnsetFields() noexcept { r = write(i, defVal.getDuration()); break; default: - LOG(FATAL) << "Unsupported default value type: " << defVal.typeName() - << ", default value: " << defVal - << ", default value expr: " << field->defaultValue(); + LOG(DFATAL) << "Unsupported default value type: " << defVal.typeName() + << ", default value: " << defVal + << ", default value expr: " << field->defaultValue(); + return WriteResult::TYPE_MISMATCH; } } else { // Set NULL diff --git a/src/common/base/Arena.cpp b/src/common/base/Arena.cpp index 0775438bdfe..ff2f49e6806 100644 --- a/src/common/base/Arena.cpp +++ b/src/common/base/Arena.cpp @@ -16,7 +16,7 @@ void* Arena::allocateAligned(const std::size_t alloc) { kAlignment - (reinterpret_cast(currentPtr_) & (kAlignment - 1)); const std::size_t consumption = alloc + pad; if (UNLIKELY(consumption > kMaxChunkSize)) { - DLOG(FATAL) << "Arena can't allocate so large memory."; + LOG(DFATAL) << "Arena can't allocate so large memory."; return nullptr; } if (LIKELY(consumption <= availableSize_)) { diff --git a/src/common/base/Status.cpp b/src/common/base/Status.cpp index d80c31cf370..3954b113e94 100644 --- a/src/common/base/Status.cpp +++ b/src/common/base/Status.cpp @@ -100,7 +100,7 @@ const char *Status::toString(Code code) { case kSessionNotFound: return "SessionNotFound"; } - DLOG(FATAL) << "Invalid status code: " << static_cast(code); + LOG(DFATAL) << "Invalid status code: " << static_cast(code); return ""; } diff --git a/src/common/datatypes/Value.cpp b/src/common/datatypes/Value.cpp index 324756777f1..c8530d9a783 100644 --- a/src/common/datatypes/Value.cpp +++ b/src/common/datatypes/Value.cpp @@ -1505,7 +1505,8 @@ folly::dynamic Value::toJson() const { // no default so the compiler will warning when lack } - LOG(FATAL) << "Unknown value type " << static_cast(type_); + LOG(DFATAL) << "Unknown value type " << static_cast(type_); + return folly::dynamic(nullptr); } folly::dynamic Value::getMetaData() const { @@ -1550,7 +1551,8 @@ folly::dynamic Value::getMetaData() const { break; } - LOG(FATAL) << "Unknown value type " << static_cast(type_); + LOG(DFATAL) << "Unknown value type " << static_cast(type_); + return folly::dynamic(nullptr); } std::string Value::toString() const { @@ -1575,7 +1577,8 @@ std::string Value::toString() const { case NullType::OUT_OF_RANGE: return "__NULL_OUT_OF_RANGE__"; } - LOG(FATAL) << "Unknown Null type " << static_cast(getNull()); + LOG(DFATAL) << "Unknown Null type " << static_cast(getNull()); + return "__NULL_BAD_TYPE__"; } case Value::Type::BOOL: { return getBool() ? "true" : "false"; @@ -1628,7 +1631,8 @@ std::string Value::toString() const { // no default so the compiler will warning when lack } - LOG(FATAL) << "Unknown value type " << static_cast(type_); + LOG(DFATAL) << "Unknown value type " << static_cast(type_); + return "__NULL_BAD_TYPE__"; } Value Value::toBool() const { @@ -1850,7 +1854,7 @@ Value Value::lessThan(const Value& v) const { return kNullBadType; } } - DLOG(FATAL) << "Unknown type " << static_cast(v.type()); + LOG(DFATAL) << "Unknown type " << static_cast(v.type()); return Value::kNullBadType; } @@ -1949,7 +1953,7 @@ Value Value::equal(const Value& v) const { return false; } } - DLOG(FATAL) << "Unknown type " << static_cast(v.type()); + LOG(DFATAL) << "Unknown type " << static_cast(v.type()); return Value::kNullBadType; } @@ -1971,7 +1975,8 @@ bool Value::implicitBool() const { case Type::LIST: return !getList().empty(); default: - LOG(FATAL) << "Impossible to reach here!"; + LOG(DFATAL) << "Impossible to reach here!"; + return false; } } @@ -2253,7 +2258,8 @@ Value operator+(const Value& lhs, const Value& rhs) { return Value::kNullValue; } } - LOG(FATAL) << "Unknown type: " << rhs.type(); + LOG(DFATAL) << "Unknown type: " << rhs.type(); + return Value::kNullBadType; } case Value::Type::VERTEX: { switch (rhs.type()) { @@ -2686,7 +2692,7 @@ bool operator<(const Value& lhs, const Value& rhs) { return lhs.getGeography() < rhs.getGeography(); } case Value::Type::DURATION: { - DLOG(FATAL) << "Duration is not comparable."; + LOG(DFATAL) << "Duration is not comparable."; return false; } case Value::Type::NULLVALUE: @@ -2694,7 +2700,7 @@ bool operator<(const Value& lhs, const Value& rhs) { return false; } } - DLOG(FATAL) << "Unknown type " << static_cast(lType); + LOG(DFATAL) << "Unknown type " << static_cast(lType); return false; } @@ -2790,7 +2796,7 @@ bool Value::equals(const Value& rhs) const { return false; } } - DLOG(FATAL) << "Unknown type " << static_cast(type()); + LOG(DFATAL) << "Unknown type " << static_cast(type()); return false; } @@ -2848,12 +2854,13 @@ std::size_t Value::hash() const { return std::hash()(getDuration()); } case Type::DATASET: { - LOG(FATAL) << "Hash for DATASET has not been implemented"; + LOG(DFATAL) << "Hash for DATASET has not been implemented"; } default: { - LOG(FATAL) << "Unknown type"; + LOG(DFATAL) << "Unknown type"; } } + return ~0UL; } bool operator!=(const Value& lhs, const Value& rhs) { diff --git a/src/common/expression/ArithmeticExpression.cpp b/src/common/expression/ArithmeticExpression.cpp index 58f5991933c..492d63cf3a8 100644 --- a/src/common/expression/ArithmeticExpression.cpp +++ b/src/common/expression/ArithmeticExpression.cpp @@ -30,7 +30,8 @@ const Value& ArithmeticExpression::eval(ExpressionContext& ctx) { result_ = lhs % rhs; break; default: - LOG(FATAL) << "Unknown type: " << kind_; + LOG(DFATAL) << "Unknown type: " << kind_; + result_ = Value::kNullBadType; } return result_; } @@ -54,6 +55,7 @@ std::string ArithmeticExpression::toString() const { op = "%"; break; default: + LOG(DFATAL) << "Illegal kind for arithmetic expression: " << static_cast(kind()); op = "illegal symbol "; } std::stringstream out; diff --git a/src/common/expression/Expression.cpp b/src/common/expression/Expression.cpp index 9e10e13a1f6..2a6ef5dbfb1 100644 --- a/src/common/expression/Expression.cpp +++ b/src/common/expression/Expression.cpp @@ -395,7 +395,7 @@ Expression* Expression::decode(ObjectPool* pool, Expression::Decoder& decoder) { return exp; } case Expression::Kind::kInputProperty: { - LOG(FATAL) << "Should not decode input property expression"; + LOG(DFATAL) << "Should not decode input property expression"; return exp; } case Expression::Kind::kVarProperty: { @@ -459,7 +459,7 @@ Expression* Expression::decode(ObjectPool* pool, Expression::Decoder& decoder) { return exp; } case Expression::Kind::kVersionedVar: { - LOG(FATAL) << "Should not decode version variable expression"; + LOG(DFATAL) << "Should not decode version variable expression"; return exp; } case Expression::Kind::kUUID: { @@ -516,17 +516,18 @@ Expression* Expression::decode(ObjectPool* pool, Expression::Decoder& decoder) { case Expression::Kind::kTSWildcard: case Expression::Kind::kTSRegexp: case Expression::Kind::kTSFuzzy: { - LOG(FATAL) << "Should not decode text search expression"; + LOG(DFATAL) << "Should not decode text search expression"; return exp; } case Expression::Kind::kMatchPathPattern: { - LOG(FATAL) << "Should not decode match path pattern expression."; + LOG(DFATAL) << "Should not decode match path pattern expression."; return exp; } // no default so the compiler will warning when lack } - LOG(FATAL) << "Unknown expression: " << decoder.getHexStr(); + LOG(DFATAL) << "Unknown expression: " << decoder.getHexStr(); + return exp; } std::ostream& operator<<(std::ostream& os, Expression::Kind kind) { diff --git a/src/common/expression/LabelAttributeExpression.h b/src/common/expression/LabelAttributeExpression.h index 40cee0d7fb0..43105f0c04f 100644 --- a/src/common/expression/LabelAttributeExpression.h +++ b/src/common/expression/LabelAttributeExpression.h @@ -34,7 +34,7 @@ class LabelAttributeExpression final : public Expression { } const Value& eval(ExpressionContext&) override { - DLOG(FATAL) << "LabelAttributeExpression has to be rewritten"; + DLOG(DFATAL) << "LabelAttributeExpression has to be rewritten"; return Value::kNullBadData; } @@ -76,11 +76,11 @@ class LabelAttributeExpression final : public Expression { } void writeTo(Encoder&) const override { - LOG(FATAL) << "LabelAttributeExpression not supported to encode."; + LOG(DFATAL) << "LabelAttributeExpression not supported to encode."; } void resetFrom(Decoder&) override { - LOG(FATAL) << "LabelAttributeExpression not supported to decode."; + LOG(DFATAL) << "LabelAttributeExpression not supported to decode."; } private: diff --git a/src/common/expression/LogicalExpression.cpp b/src/common/expression/LogicalExpression.cpp index add1ac192c4..a157f040347 100644 --- a/src/common/expression/LogicalExpression.cpp +++ b/src/common/expression/LogicalExpression.cpp @@ -19,7 +19,8 @@ const Value &LogicalExpression::eval(ExpressionContext &ctx) { case Kind::kLogicalXor: return evalXor(ctx); default: - LOG(FATAL) << "Illegal kind for logical expression: " << static_cast(kind()); + LOG(DFATAL) << "Illegal kind for logical expression: " << static_cast(kind()); + return Value::kNullBadType; } } @@ -115,7 +116,8 @@ std::string LogicalExpression::toString() const { op = " XOR "; break; default: - LOG(FATAL) << "Illegal kind for logical expression: " << static_cast(kind()); + LOG(DFATAL) << "Illegal kind for logical expression: " << static_cast(kind()); + op = " illegal symbol "; } std::string buf; buf.reserve(256); diff --git a/src/common/expression/LogicalExpression.h b/src/common/expression/LogicalExpression.h index 91685c5bce1..c1aa889b8e4 100644 --- a/src/common/expression/LogicalExpression.h +++ b/src/common/expression/LogicalExpression.h @@ -99,7 +99,7 @@ class LogicalExpression final : public Expression { } else if (kind_ == Kind::kLogicalOr) { kind_ = Kind::kLogicalAnd; } else { - LOG(FATAL) << "Should not reverse logical expression except and/or kind."; + LOG(DFATAL) << "Should not reverse logical expression except and/or kind."; } } diff --git a/src/common/expression/MatchPathPatternExpression.h b/src/common/expression/MatchPathPatternExpression.h index 1438ddcc293..c4547a51d76 100644 --- a/src/common/expression/MatchPathPatternExpression.h +++ b/src/common/expression/MatchPathPatternExpression.h @@ -60,12 +60,12 @@ class MatchPathPatternExpression final : public Expression { // This expression contains variable implicitly, so we don't support persist or transform it. void writeTo(Encoder&) const override { - LOG(FATAL) << "Not implemented"; + LOG(DFATAL) << "Not implemented"; } // This expression contains variable implicitly, so we don't support persist or transform it. void resetFrom(Decoder&) override { - LOG(FATAL) << "Not implemented"; + LOG(DFATAL) << "Not implemented"; } private: diff --git a/src/common/expression/PropertyExpression.cpp b/src/common/expression/PropertyExpression.cpp index de1d0b7cc1f..81afd944433 100644 --- a/src/common/expression/PropertyExpression.cpp +++ b/src/common/expression/PropertyExpression.cpp @@ -46,7 +46,8 @@ void PropertyExpression::resetFrom(Decoder& decoder) { const Value& PropertyExpression::eval(ExpressionContext& ctx) { // TODO maybe cypher need it. UNUSED(ctx); - LOG(FATAL) << "Unimplemented"; + LOG(DFATAL) << "Unimplemented"; + return Value::kNullBadType; } const Value& EdgePropertyExpression::eval(ExpressionContext& ctx) { diff --git a/src/common/expression/RelationalExpression.cpp b/src/common/expression/RelationalExpression.cpp index 2baedf5ca25..0c328f72d86 100644 --- a/src/common/expression/RelationalExpression.cpp +++ b/src/common/expression/RelationalExpression.cpp @@ -195,7 +195,8 @@ const Value& RelationalExpression::eval(ExpressionContext& ctx) { break; } default: - LOG(FATAL) << "Unknown type: " << kind_; + LOG(DFATAL) << "Unknown type: " << kind_; + result_ = Value::kNullBadType; } return result_; } @@ -249,6 +250,7 @@ std::string RelationalExpression::toString() const { op = " NOT ENDS WITH "; break; default: + LOG(DFATAL) << "Illegal kind for relational expression: " << static_cast(kind()); op = " illegal symbol "; } std::stringstream out; diff --git a/src/common/expression/TextSearchExpression.cpp b/src/common/expression/TextSearchExpression.cpp index 69d339d9264..f10557cc4b2 100644 --- a/src/common/expression/TextSearchExpression.cpp +++ b/src/common/expression/TextSearchExpression.cpp @@ -66,7 +66,8 @@ std::string TextSearchExpression::toString() const { break; } default: { - LOG(FATAL) << "Unimplemented"; + LOG(DFATAL) << "Illegal kind for text search expression: " << static_cast(kind()); + buf = "illegal symbol("; } } buf += arg_ ? arg_->toString() : ""; diff --git a/src/common/expression/TextSearchExpression.h b/src/common/expression/TextSearchExpression.h index 0bd3b78477c..281c8a15bb6 100644 --- a/src/common/expression/TextSearchExpression.h +++ b/src/common/expression/TextSearchExpression.h @@ -114,12 +114,12 @@ class TextSearchExpression : public Expression { bool operator==(const Expression& rhs) const override; const Value& eval(ExpressionContext&) override { - LOG(FATAL) << "TextSearchExpression has to be rewritten"; + LOG(DFATAL) << "TextSearchExpression has to be rewritten"; return Value::kNullBadData; } void accept(ExprVisitor*) override { - LOG(FATAL) << "TextSearchExpression has to be rewritten"; + LOG(DFATAL) << "TextSearchExpression has to be rewritten"; } std::string toString() const override; @@ -149,11 +149,11 @@ class TextSearchExpression : public Expression { } void writeTo(Encoder&) const override { - LOG(FATAL) << "TextSearchExpression has to be rewritten"; + LOG(DFATAL) << "TextSearchExpression has to be rewritten"; } void resetFrom(Decoder&) override { - LOG(FATAL) << "TextSearchExpression has to be reset"; + LOG(DFATAL) << "TextSearchExpression has to be reset"; } private: diff --git a/src/common/expression/UnaryExpression.cpp b/src/common/expression/UnaryExpression.cpp index 935afc81204..75223036d05 100644 --- a/src/common/expression/UnaryExpression.cpp +++ b/src/common/expression/UnaryExpression.cpp @@ -86,8 +86,10 @@ const Value& UnaryExpression::eval(ExpressionContext& ctx) { result_ = (operand_->eval(ctx)).empty() ? false : true; break; } - default: - LOG(FATAL) << "Unknown type: " << kind_; + default: { + LOG(DFATAL) << "Unknown type: " << kind_; + result_ = Value::kNullBadType; + } } return result_; } @@ -119,6 +121,7 @@ std::string UnaryExpression::toString() const { case Kind::kIsNotEmpty: return (operand_ ? operand_->toString() : "") + " IS NOT EMPTY"; default: + LOG(DFATAL) << "Illegal kind for unary expression: " << static_cast(kind()); op = "illegal symbol "; } std::stringstream out; diff --git a/src/common/expression/VariableExpression.h b/src/common/expression/VariableExpression.h index 52e6a3907f0..5c73000d15c 100644 --- a/src/common/expression/VariableExpression.h +++ b/src/common/expression/VariableExpression.h @@ -107,11 +107,11 @@ class VersionedVariableExpression final : public Expression { : Expression(pool, Kind::kVersionedVar), var_(var), version_(version) {} void writeTo(Encoder&) const override { - LOG(FATAL) << "VersionedVariableExpression not support to encode."; + LOG(DFATAL) << "VersionedVariableExpression not support to encode."; } void resetFrom(Decoder&) override { - LOG(FATAL) << "VersionedVariableExpression not support to decode."; + LOG(DFATAL) << "VersionedVariableExpression not support to decode."; } private: diff --git a/src/common/function/FunctionManager.cpp b/src/common/function/FunctionManager.cpp index 048ba23769d..6746885bb2e 100644 --- a/src/common/function/FunctionManager.cpp +++ b/src/common/function/FunctionManager.cpp @@ -1692,7 +1692,8 @@ FunctionManager::FunctionManager() { } } default: - LOG(FATAL) << "Unexpected arguments count " << args.size(); + LOG(DFATAL) << "Unexpected arguments count " << args.size(); + return Value::kNullBadType; } }; } @@ -1732,7 +1733,8 @@ FunctionManager::FunctionManager() { } } default: - LOG(FATAL) << "Unexpected arguments count " << args.size(); + LOG(DFATAL) << "Unexpected arguments count " << args.size(); + return Value::kNullBadType; } }; } @@ -1773,7 +1775,8 @@ FunctionManager::FunctionManager() { } } default: - LOG(FATAL) << "Unexpected arguments count " << args.size(); + LOG(DFATAL) << "Unexpected arguments count " << args.size(); + return Value::kNullBadType; } }; } @@ -2789,8 +2792,8 @@ FunctionManager::FunctionManager() { const auto &p = args[0].get().getPath(); const std::size_t nodeIndex = args[1].get().getInt(); if (nodeIndex < 0 || nodeIndex >= (1 + p.steps.size())) { - DLOG(FATAL) << "Out of range node index."; - return Value::kNullBadData; + LOG(DFATAL) << "Out of range node index."; + return Value::kNullOutOfRange; } if (nodeIndex == 0) { return p.src.vid; diff --git a/src/common/geo/GeoIndex.cpp b/src/common/geo/GeoIndex.cpp index d09d355cd22..67080c169fd 100644 --- a/src/common/geo/GeoIndex.cpp +++ b/src/common/geo/GeoIndex.cpp @@ -118,7 +118,7 @@ std::vector GeoIndex::dWithin(const Geography& g, double distance) co return intersects(gBuffer); } default: - LOG(FATAL) + LOG(DFATAL) << "Geography shapes other than Point/LineString/Polygon are not currently supported"; return {}; } diff --git a/src/common/geo/GeoUtils.h b/src/common/geo/GeoUtils.h index f0299778e47..f140fe10138 100644 --- a/src/common/geo/GeoUtils.h +++ b/src/common/geo/GeoUtils.h @@ -47,7 +47,7 @@ class GeoUtils final { return std::make_unique(std::move(s2Loops), S2Debug::DISABLE); } default: - LOG(FATAL) + LOG(DFATAL) << "Geography shapes other than Point/LineString/Polygon are not currently supported"; return nullptr; } diff --git a/src/common/geo/io/wkb/WKBReader.cpp b/src/common/geo/io/wkb/WKBReader.cpp index d650c5143e2..434c0d13c0e 100644 --- a/src/common/geo/io/wkb/WKBReader.cpp +++ b/src/common/geo/io/wkb/WKBReader.cpp @@ -31,7 +31,7 @@ StatusOr WKBReader::read(const std::string &wkb) { return readPolygon(); } default: - LOG(FATAL) + LOG(DFATAL) << "Geography shapes other than Point/LineString/Polygon are not currently supported"; return Status::Error( "Geography shapes other than Point/LineString/Polygon are not currently supported"); diff --git a/src/common/geo/io/wkb/WKBWriter.cpp b/src/common/geo/io/wkb/WKBWriter.cpp index 499d3c27411..cac1d44ec2e 100644 --- a/src/common/geo/io/wkb/WKBWriter.cpp +++ b/src/common/geo/io/wkb/WKBWriter.cpp @@ -32,7 +32,7 @@ std::string WKBWriter::write(const Geography& geog, ByteOrder byteOrder) { return os_.str(); } default: - LOG(FATAL) + LOG(DFATAL) << "Geometry shapes other than Point/LineString/Polygon are not currently supported"; return ""; } diff --git a/src/common/graph/Response.h b/src/common/graph/Response.h index 80f7254dcd7..37fbd6a8628 100644 --- a/src/common/graph/Response.h +++ b/src/common/graph/Response.h @@ -113,7 +113,7 @@ X(E_INVALID_JOB, -2065) \ \ /* Backup Failure */ \ - X(E_BACKUP_BUILDING_INDEX, -2066) \ + X(E_BACKUP_RUNNING_JOBS, -2066) \ X(E_BACKUP_SPACE_NOT_FOUND, -2067) \ \ /* RESTORE Failure */ \ diff --git a/src/common/id/Snowflake.cpp b/src/common/id/Snowflake.cpp index 021330009f9..4aacedb5b43 100644 --- a/src/common/id/Snowflake.cpp +++ b/src/common/id/Snowflake.cpp @@ -6,14 +6,16 @@ #include "common/id/Snowflake.h" namespace nebula { -void Snowflake::initWorkerId(meta::MetaClient* client) { +bool Snowflake::initWorkerId(meta::MetaClient* client) { const std::string& ip = client->getLocalIp(); auto result = client->getWorkerId(ip).get(); if (!result.ok()) { - LOG(FATAL) << "Failed to get workerId from meta server"; + LOG(DFATAL) << "Failed to get workerId from meta server"; + return false; } workerId_ = result.value(); LOG(INFO) << "WorkerId init success: " << workerId_; + return true; } int64_t Snowflake::getId() { diff --git a/src/common/id/Snowflake.h b/src/common/id/Snowflake.h index ae45f2e5a93..01edf13bdfd 100644 --- a/src/common/id/Snowflake.h +++ b/src/common/id/Snowflake.h @@ -20,7 +20,7 @@ class Snowflake { public: Snowflake() = default; - static void initWorkerId(meta::MetaClient* client); + static bool initWorkerId(meta::MetaClient* client); int64_t getId(); diff --git a/src/common/meta/GflagsManager.cpp b/src/common/meta/GflagsManager.cpp index 24c63104ceb..a25d2708fef 100644 --- a/src/common/meta/GflagsManager.cpp +++ b/src/common/meta/GflagsManager.cpp @@ -209,7 +209,8 @@ std::string GflagsManager::ValueToGflagString(const Value& val) { return os.str(); } default: { - LOG(FATAL) << "Unsupported type for gflags"; + LOG(DFATAL) << "Unsupported type for gflags"; + return ""; } } } diff --git a/src/common/meta/NebulaSchemaProvider.cpp b/src/common/meta/NebulaSchemaProvider.cpp index a6499db99e8..5cb9a891fdf 100644 --- a/src/common/meta/NebulaSchemaProvider.cpp +++ b/src/common/meta/NebulaSchemaProvider.cpp @@ -176,7 +176,8 @@ std::size_t NebulaSchemaProvider::fieldSize(PropertyType type, std::size_t fixed case PropertyType::UNKNOWN: break; } - LOG(FATAL) << "Incorrect field type " << static_cast(type); + LOG(DFATAL) << "Incorrect field type " << static_cast(type); + return 0; } void NebulaSchemaProvider::setProp(cpp2::SchemaProp schemaProp) { diff --git a/src/common/stats/StatsManager-inl.h b/src/common/stats/StatsManager-inl.h index 05ac7d404d0..0415735e2f4 100644 --- a/src/common/stats/StatsManager-inl.h +++ b/src/common/stats/StatsManager-inl.h @@ -26,7 +26,8 @@ StatsManager::VT StatsManager::readValue(StatsHolder& stats, return stats.template rate(level); } - LOG(FATAL) << "Unknown statistic method"; + LOG(DFATAL) << "Unknown statistic method"; + return StatsManager::VT(0); } } // namespace stats diff --git a/src/common/stats/StatsManager.h b/src/common/stats/StatsManager.h index 0bb4e2f3d06..b6bd4f038a9 100644 --- a/src/common/stats/StatsManager.h +++ b/src/common/stats/StatsManager.h @@ -33,7 +33,7 @@ class CounterId final { } if (valid()) { // Already assigned - LOG(FATAL) << "CounterId cannot be assigned twice"; + LOG(DFATAL) << "CounterId cannot be assigned twice"; } index_ = right.index_; isHisto_ = right.isHisto_; @@ -41,14 +41,7 @@ class CounterId final { } CounterId& operator=(const std::string& right) { - if (right == "") { - LOG(FATAL) << "Invalid counter id"; - } - if (valid()) { - // Already assigned - LOG(FATAL) << "CounterId cannot be assigned twice"; - } - index_ = right; + this->operator=(CounterId(right)); return *this; } diff --git a/src/common/utils/DefaultValueContext.h b/src/common/utils/DefaultValueContext.h index fb5a5b19782..89a5aad4d9b 100644 --- a/src/common/utils/DefaultValueContext.h +++ b/src/common/utils/DefaultValueContext.h @@ -14,66 +14,66 @@ namespace nebula { class DefaultValueContext final : public ExpressionContext { public: const Value& getVar(const std::string&) const override { - LOG(FATAL) << "Not allowed to call"; + LOG(DFATAL) << "Not allowed to call"; return Value::kEmpty; } void setInnerVar(const std::string&, Value) override { - LOG(FATAL) << "Not allowed to call"; + LOG(DFATAL) << "Not allowed to call"; } const Value& getInnerVar(const std::string&) const override { - LOG(FATAL) << "Not allowed to call"; + LOG(DFATAL) << "Not allowed to call"; return Value::kEmpty; } const Value& getVersionedVar(const std::string&, int64_t) const override { - LOG(FATAL) << "Not allowed to call"; + LOG(DFATAL) << "Not allowed to call"; return Value::kEmpty; } const Value& getVarProp(const std::string&, const std::string&) const override { - LOG(FATAL) << "Not allowed to call"; + LOG(DFATAL) << "Not allowed to call"; return Value::kEmpty; } StatusOr getVarPropIndex(const std::string&, const std::string&) const override { - LOG(FATAL) << "Not allowed to call"; + LOG(DFATAL) << "Not allowed to call"; return Status::Error("Unimplemented"); } Value getEdgeProp(const std::string&, const std::string&) const override { - LOG(FATAL) << "Not allowed to call"; + LOG(DFATAL) << "Not allowed to call"; return Value::kEmpty; } Value getTagProp(const std::string&, const std::string&) const override { - LOG(FATAL) << "Not allowed to call"; + LOG(DFATAL) << "Not allowed to call"; return Value::kEmpty; } Value getSrcProp(const std::string&, const std::string&) const override { - LOG(FATAL) << "Not allowed to call"; + LOG(DFATAL) << "Not allowed to call"; return Value::kEmpty; } const Value& getDstProp(const std::string&, const std::string&) const override { - LOG(FATAL) << "Not allowed to call"; + LOG(DFATAL) << "Not allowed to call"; return Value::kEmpty; } const Value& getInputProp(const std::string&) const override { - LOG(FATAL) << "Not allowed to call"; + LOG(DFATAL) << "Not allowed to call"; return Value::kEmpty; } StatusOr getInputPropIndex(const std::string&) const override { - LOG(FATAL) << "Not allowed to call"; + LOG(DFATAL) << "Not allowed to call"; return Status::Error("Unimplemented"); } const Value& getColumn(int32_t) const override { - LOG(FATAL) << "Not allowed to call"; + LOG(DFATAL) << "Not allowed to call"; return Value::kEmpty; } @@ -81,12 +81,12 @@ class DefaultValueContext final : public ExpressionContext { Value getVertex(const std::string& name = "") const override { UNUSED(name); - LOG(FATAL) << "Not allowed to call"; + LOG(DFATAL) << "Not allowed to call"; return Value::kEmpty; } Value getEdge() const override { - LOG(FATAL) << "Not allowed to call"; + LOG(DFATAL) << "Not allowed to call"; return Value::kEmpty; } }; diff --git a/src/common/utils/IndexKeyUtils.cpp b/src/common/utils/IndexKeyUtils.cpp index 5e984ddb54e..728f4b52374 100644 --- a/src/common/utils/IndexKeyUtils.cpp +++ b/src/common/utils/IndexKeyUtils.cpp @@ -255,7 +255,8 @@ Status IndexKeyUtils::checkValue(const Value& v, bool isNullable) { return Status::Error("Out of range"); } } - LOG(FATAL) << "Unknown Null type " << static_cast(v.getNull()); + LOG(DFATAL) << "Unknown Null type " << static_cast(v.getNull()); + return Status::Error("Unknown Null type"); } } // namespace nebula diff --git a/src/common/utils/IndexKeyUtils.h b/src/common/utils/IndexKeyUtils.h index 981d4f70581..977800a20fc 100644 --- a/src/common/utils/IndexKeyUtils.h +++ b/src/common/utils/IndexKeyUtils.h @@ -142,11 +142,11 @@ class IndexKeyUtils final { return encodeDateTime(v.getDateTime()); } case Value::Type::GEOGRAPHY: { - LOG(FATAL) << "Should call encodeGeography separately"; + LOG(DFATAL) << "Should call encodeGeography separately"; return ""; } default: - LOG(FATAL) << "Unsupported default value type"; + LOG(DFATAL) << "Unsupported default value type"; } return ""; } diff --git a/src/daemons/MetaDaemon.cpp b/src/daemons/MetaDaemon.cpp index 0fa5f7ebafc..3ef8e087afe 100644 --- a/src/daemons/MetaDaemon.cpp +++ b/src/daemons/MetaDaemon.cpp @@ -159,14 +159,6 @@ int main(int argc, char* argv[]) { return EXIT_FAILURE; } - { - nebula::meta::JobManager* jobMgr = nebula::meta::JobManager::getInstance(); - if (!jobMgr->init(gKVStore.get())) { - LOG(ERROR) << "Init job manager failed"; - return EXIT_FAILURE; - } - } - auto godInit = initGodUser(gKVStore.get(), localhost); if (godInit != nebula::cpp2::ErrorCode::SUCCEEDED) { LOG(ERROR) << "Init god user failed"; @@ -199,6 +191,13 @@ int main(int argc, char* argv[]) { auto handler = std::make_shared(gKVStore.get(), metaClusterId()); LOG(INFO) << "The meta daemon start on " << localhost; + { + nebula::meta::JobManager* jobMgr = nebula::meta::JobManager::getInstance(); + if (!jobMgr->init(gKVStore.get(), handler->getAdminClient())) { + LOG(ERROR) << "Init job manager failed"; + return EXIT_FAILURE; + } + } try { metaServer->setPort(FLAGS_port); metaServer->setIdleTimeout(std::chrono::seconds(0)); // No idle timeout on client connection diff --git a/src/graph/context/Iterator.cpp b/src/graph/context/Iterator.cpp index 4079f190144..7b7c2e97d10 100644 --- a/src/graph/context/Iterator.cpp +++ b/src/graph/context/Iterator.cpp @@ -758,7 +758,7 @@ PropIter::PropIter(std::shared_ptr value, bool checkMemory) auto& ds = value->getDataSet(); auto status = makeDataSetIndex(ds); if (UNLIKELY(!status.ok())) { - DLOG(FATAL) << status; + LOG(DFATAL) << status; clear(); return; } diff --git a/src/graph/context/Iterator.h b/src/graph/context/Iterator.h index 1b4119f0a32..77441e289e2 100644 --- a/src/graph/context/Iterator.h +++ b/src/graph/context/Iterator.h @@ -136,12 +136,12 @@ class Iterator { } virtual const Value& getTagProp(const std::string&, const std::string&) const { - DLOG(FATAL) << "Shouldn't call the unimplemented method"; + LOG(DFATAL) << "Shouldn't call the unimplemented method"; return Value::kEmpty; } virtual const Value& getEdgeProp(const std::string&, const std::string&) const { - DLOG(FATAL) << "Shouldn't call the unimplemented method"; + LOG(DFATAL) << "Shouldn't call the unimplemented method"; return Value::kEmpty; } @@ -203,11 +203,11 @@ class DefaultIter final : public Iterator { } void select(std::size_t, std::size_t) override { - DLOG(FATAL) << "Unimplemented method for default iterator."; + LOG(DFATAL) << "Unimplemented method for default iterator."; } void sample(int64_t) override { - DLOG(FATAL) << "Unimplemented default iterator."; + LOG(DFATAL) << "Unimplemented default iterator."; } void clear() override { @@ -219,27 +219,27 @@ class DefaultIter final : public Iterator { } const Value& getColumn(const std::string& /* col */) const override { - DLOG(FATAL) << "This method should not be invoked"; + LOG(DFATAL) << "This method should not be invoked"; return Value::kEmpty; } const Value& getColumn(int32_t) const override { - DLOG(FATAL) << "This method should not be invoked"; + LOG(DFATAL) << "This method should not be invoked"; return Value::kEmpty; } StatusOr getColumnIndex(const std::string&) const override { - DLOG(FATAL) << "This method should not be invoked"; + LOG(DFATAL) << "This method should not be invoked"; return Status::Error("Unimplemented method"); } const Row* row() const override { - DLOG(FATAL) << "This method should not be invoked"; + LOG(DFATAL) << "This method should not be invoked"; return nullptr; } Row moveRow() override { - DLOG(FATAL) << "This method should not be invoked"; + LOG(DFATAL) << "This method should not be invoked"; return Row{}; } diff --git a/src/graph/executor/Executor.cpp b/src/graph/executor/Executor.cpp index 4079bf1cec4..3f7720e1e6a 100644 --- a/src/graph/executor/Executor.cpp +++ b/src/graph/executor/Executor.cpp @@ -563,7 +563,7 @@ Executor *Executor::makeExecutor(QueryContext *qctx, const PlanNode *node) { return pool->makeAndAdd(node, qctx); } case PlanNode::Kind::kUnknown: { - LOG(FATAL) << "Unknown plan node kind " << static_cast(node->kind()); + LOG(DFATAL) << "Unknown plan node kind " << static_cast(node->kind()); break; } } diff --git a/src/graph/executor/admin/SubmitJobExecutor.cpp b/src/graph/executor/admin/SubmitJobExecutor.cpp index 2af4b6cc741..6d41c9913f1 100644 --- a/src/graph/executor/admin/SubmitJobExecutor.cpp +++ b/src/graph/executor/admin/SubmitJobExecutor.cpp @@ -101,7 +101,7 @@ StatusOr SubmitJobExecutor::buildResult(meta::cpp2::JobOp jobOp, } // no default so the compiler will warning when lack } - DLOG(FATAL) << "Unknown job operation " << static_cast(jobOp); + LOG(DFATAL) << "Unknown job operation " << static_cast(jobOp); return Status::Error("Unknown job job operation %d.", static_cast(jobOp)); } diff --git a/src/graph/executor/query/DataCollectExecutor.cpp b/src/graph/executor/query/DataCollectExecutor.cpp index c4440741860..9281e05d33b 100644 --- a/src/graph/executor/query/DataCollectExecutor.cpp +++ b/src/graph/executor/query/DataCollectExecutor.cpp @@ -50,7 +50,8 @@ folly::Future DataCollectExecutor::doCollect() { break; } default: - LOG(FATAL) << "Unknown data collect type: " << static_cast(dc->kind()); + LOG(DFATAL) << "Unknown data collect type: " << static_cast(dc->kind()); + return Status::Error("Unknown data collect type: %d.", static_cast(dc->kind())); } ResultBuilder builder; builder.value(Value(std::move(result_))).iter(Iterator::Kind::kSequential); diff --git a/src/graph/executor/query/FilterExecutor.cpp b/src/graph/executor/query/FilterExecutor.cpp index 65d4d8382cc..6a14cace334 100644 --- a/src/graph/executor/query/FilterExecutor.cpp +++ b/src/graph/executor/query/FilterExecutor.cpp @@ -65,7 +65,10 @@ StatusOr FilterExecutor::handleJob(size_t begin, size_t end, Iterator * for (; iter->valid() && begin++ < end; iter->next()) { auto val = condition->eval(ctx(iter)); if (val.isBadNull() || (!val.empty() && !val.isImplicitBool() && !val.isNull())) { - return Status::Error("Wrong type result, the type should be NULL, EMPTY, BOOL"); + return Status::Error("Failed to evaluate condition: %s. %s%s", + condition->toString().c_str(), + "For boolean conditions, please write in their full forms like", + " == or IS [NOT] NULL."); } if (!(val.empty() || val.isNull() || (val.isImplicitBool() && !val.implicitBool()))) { // TODO: Maybe we can move. @@ -96,7 +99,10 @@ Status FilterExecutor::handleSingleJobFilter() { while (iter->valid()) { auto val = condition->eval(ctx(iter)); if (val.isBadNull() || (!val.empty() && !val.isImplicitBool() && !val.isNull())) { - return Status::Error("Wrong type result, the type should be NULL, EMPTY, BOOL"); + return Status::Error("Failed to evaluate condition: %s. %s%s", + condition->toString().c_str(), + "For boolean conditions, please write in their full forms like", + " == or IS [NOT] NULL."); } if (val.empty() || val.isNull() || (val.isImplicitBool() && !val.implicitBool())) { if (UNLIKELY(filter->needStableFilter())) { @@ -119,7 +125,10 @@ Status FilterExecutor::handleSingleJobFilter() { for (; iter->valid(); iter->next()) { auto val = condition->eval(ctx(iter)); if (val.isBadNull() || (!val.empty() && !val.isImplicitBool() && !val.isNull())) { - return Status::Error("Wrong type result, the type should be NULL, EMPTY, BOOL"); + return Status::Error("Failed to evaluate condition: %s. %s%s", + condition->toString().c_str(), + "For boolean conditions, please write in their full forms like", + " == or IS [NOT] NULL."); } if (val.isImplicitBool() && val.implicitBool()) { Row row; diff --git a/src/graph/optimizer/rule/PushEFilterDownRule.cpp b/src/graph/optimizer/rule/PushEFilterDownRule.cpp index 0af06f4a4b3..f18ae8798eb 100644 --- a/src/graph/optimizer/rule/PushEFilterDownRule.cpp +++ b/src/graph/optimizer/rule/PushEFilterDownRule.cpp @@ -174,7 +174,7 @@ std::string PushEFilterDownRule::toString() const { ret = EdgePropertyExpression::make(pool, std::move(edgeNameResult).value(), exp->prop()); break; default: - LOG(FATAL) << "Unexpected expr: " << exp->kind(); + LOG(DFATAL) << "Unexpected expr: " << exp->kind(); } return ret; } diff --git a/src/graph/optimizer/rule/PushFilterDownNodeRule.cpp b/src/graph/optimizer/rule/PushFilterDownNodeRule.cpp index 9c89cd16c63..20fcfb4a7d2 100644 --- a/src/graph/optimizer/rule/PushFilterDownNodeRule.cpp +++ b/src/graph/optimizer/rule/PushFilterDownNodeRule.cpp @@ -55,7 +55,7 @@ StatusOr PushFilterDownNodeRule::transform( auto *append = static_cast(node); vFilter = append->vFilter()->clone(); } else { - DLOG(FATAL) << "Unsupported node kind: " << node->kind(); + LOG(DFATAL) << "Unsupported node kind: " << node->kind(); return TransformResult::noTransform(); } auto visitor = graph::ExtractFilterExprVisitor::makePushGetVertices(pool); @@ -83,7 +83,7 @@ StatusOr PushFilterDownNodeRule::transform( append->setVertexFilter(remainedExpr); append->setFilter(vFilter); } else { - DLOG(FATAL) << "Unsupported node kind: " << newExplore->kind(); + LOG(DFATAL) << "Unsupported node kind: " << newExplore->kind(); return TransformResult::noTransform(); } @@ -111,7 +111,7 @@ bool PushFilterDownNodeRule::match(OptContext *octx, const MatchedResult &matche return false; } } else { - DLOG(FATAL) << "Unexpected node kind: " << node->kind(); + LOG(DFATAL) << "Unexpected node kind: " << node->kind(); return false; } return true; diff --git a/src/graph/optimizer/rule/UnionAllIndexScanBaseRule.cpp b/src/graph/optimizer/rule/UnionAllIndexScanBaseRule.cpp index 1cd1e216a83..93928c8d68b 100644 --- a/src/graph/optimizer/rule/UnionAllIndexScanBaseRule.cpp +++ b/src/graph/optimizer/rule/UnionAllIndexScanBaseRule.cpp @@ -167,8 +167,8 @@ StatusOr UnionAllIndexScanBaseRule::transform(OptContext* ctx, break; } default: - LOG(FATAL) << "Invalid expression kind: " << static_cast(conditionType); - break; + LOG(DFATAL) << "Invalid expression kind: " << static_cast(conditionType); + return TransformResult::noTransform(); } DCHECK(transformedExpr->kind() == ExprKind::kLogicalOr); diff --git a/src/graph/planner/match/SegmentsConnector.cpp b/src/graph/planner/match/SegmentsConnector.cpp index 63dcbcebe8b..ca9872a1a8d 100644 --- a/src/graph/planner/match/SegmentsConnector.cpp +++ b/src/graph/planner/match/SegmentsConnector.cpp @@ -140,7 +140,7 @@ SubPlan SegmentsConnector::addInput(const SubPlan& left, const SubPlan& right, b siLeft->setLeftDep(const_cast(right.root)); siLeft->setLeftVar(right.root->outputVar()); } else { - DLOG(FATAL) << "Unsupported plan node: " << left.tail->kind(); + LOG(DFATAL) << "Unsupported plan node: " << left.tail->kind(); return newPlan; } newPlan.tail = right.tail; diff --git a/src/graph/planner/plan/PlanNode.cpp b/src/graph/planner/plan/PlanNode.cpp index 0a29aa8b27c..18a2ba22e1e 100644 --- a/src/graph/planner/plan/PlanNode.cpp +++ b/src/graph/planner/plan/PlanNode.cpp @@ -307,7 +307,8 @@ const char* PlanNode::toString(PlanNode::Kind kind) { return "GetDstBySrc"; // no default so the compiler will warning when lack } - LOG(FATAL) << "Impossible kind plan node " << static_cast(kind); + LOG(DFATAL) << "Impossible kind plan node " << static_cast(kind); + return "Unknown"; } std::string PlanNode::toString() const { diff --git a/src/graph/planner/plan/PlanNode.h b/src/graph/planner/plan/PlanNode.h index 0d2853e032b..5e0e4f6dc3a 100644 --- a/src/graph/planner/plan/PlanNode.h +++ b/src/graph/planner/plan/PlanNode.h @@ -350,7 +350,7 @@ class SingleDependencyNode : public PlanNode { } PlanNode* clone() const override { - LOG(FATAL) << "Shouldn't call the unimplemented method"; + LOG(DFATAL) << "Shouldn't call the unimplemented method"; return nullptr; } @@ -372,7 +372,7 @@ class SingleInputNode : public SingleDependencyNode { std::unique_ptr explain() const override; PlanNode* clone() const override { - LOG(FATAL) << "Shouldn't call the unimplemented method"; + LOG(DFATAL) << "Shouldn't call the unimplemented method"; return nullptr; } @@ -422,7 +422,7 @@ class BinaryInputNode : public PlanNode { } PlanNode* clone() const override { - LOG(FATAL) << "Shouldn't call the unimplemented method for " << kind_; + LOG(DFATAL) << "Shouldn't call the unimplemented method for " << kind_; return nullptr; } @@ -444,7 +444,7 @@ class VariableDependencyNode : public PlanNode { std::unique_ptr explain() const override; PlanNode* clone() const override { - LOG(FATAL) << "Shouldn't call the unimplemented method"; + LOG(DFATAL) << "Shouldn't call the unimplemented method"; return nullptr; } diff --git a/src/graph/service/GraphServer.cpp b/src/graph/service/GraphServer.cpp index 897133a905e..5073fdb39b8 100644 --- a/src/graph/service/GraphServer.cpp +++ b/src/graph/service/GraphServer.cpp @@ -41,7 +41,10 @@ bool GraphServer::start() { } // Init worker id for snowflake generating unique id - nebula::Snowflake::initWorkerId(interface->metaClient_.get()); + if (!nebula::Snowflake::initWorkerId(interface->metaClient_.get())) { + LOG(ERROR) << "WorkerId init failed"; + return false; + } graphThread_ = std::make_unique([&] { thriftServer_->setPort(localHost_.port); diff --git a/src/graph/service/PermissionCheck.cpp b/src/graph/service/PermissionCheck.cpp index da0dd3789dd..fe1ab00da7c 100644 --- a/src/graph/service/PermissionCheck.cpp +++ b/src/graph/service/PermissionCheck.cpp @@ -11,7 +11,7 @@ namespace graph { /** * Read space : kUse, kDescribeSpace * Write space : kCreateSpace, kDropSpace, kClearSpace, kCreateSnapshot, - * kDropSnapshot, kBalance, kAdmin, kConfig + * kDropSnapshot, kAdminJob(data balance), kConfig * Read schema : kDescribeTag, kDescribeEdge, * kDescribeTagIndex, kDescribeEdgeIndex * Write schema : kCreateTag, kAlterTag, kCreateEdge, @@ -24,7 +24,7 @@ namespace graph { * kFetchEdges, kFindPath, kLimit, KGroupBy, kReturn * Write data: kBuildTagIndex, kBuildEdgeIndex, * kInsertVertex, kUpdateVertex, kInsertEdge, - * kUpdateEdge, kDeleteVertex, kDeleteEdges + * kUpdateEdge, kDeleteVertex, kDeleteEdges, kAdminJob(other) * Special operation : kShow, kChangePassword */ @@ -109,8 +109,21 @@ namespace graph { case Sentence::Kind::kUpdateEdge: case Sentence::Kind::kDeleteVertices: case Sentence::Kind::kDeleteTags: - case Sentence::Kind::kDeleteEdges: + case Sentence::Kind::kDeleteEdges: { + return PermissionManager::canWriteData(session, vctx); + } case Sentence::Kind::kAdminJob: { + auto *adminJobSentence = dynamic_cast(sentence); + if (adminJobSentence == nullptr) { + // should not happend. + LOG(WARNING) << "sentence is not AdminJobSentence"; + return Status::PermissionError("Invalid adminjob sentence."); + } + // admin job like data balance need permission to write space + // here to restore default permission check before balance is refactored into job + if (adminJobSentence->needWriteSpace()) { + return PermissionManager::canWriteSpace(session); + } return PermissionManager::canWriteData(session, vctx); } case Sentence::Kind::kDescribeTag: diff --git a/src/graph/util/ExpressionUtils.cpp b/src/graph/util/ExpressionUtils.cpp index cf8990ededd..54b06b3d959 100644 --- a/src/graph/util/ExpressionUtils.cpp +++ b/src/graph/util/ExpressionUtils.cpp @@ -536,7 +536,65 @@ StatusOr ExpressionUtils::foldConstantExpr(const Expression *expr) } return foldedExpr; } - return newExpr; + + auto matcher = [](const Expression *e) { + return e->kind() == Expression::Kind::kLogicalAnd || e->kind() == Expression::Kind::kLogicalOr; + }; + auto rewriter = [](const Expression *e) { + auto logicalExpr = static_cast(e); + return simplifyLogicalExpr(logicalExpr); + }; + return RewriteVisitor::transform(newExpr, matcher, rewriter); +} + +Expression *ExpressionUtils::simplifyLogicalExpr(const LogicalExpression *logicalExpr) { + auto *expr = static_cast(logicalExpr->clone()); + if (expr->kind() == Expression::Kind::kLogicalXor) return expr; + + ObjectPool *objPool = logicalExpr->getObjPool(); + + // Simplify logical and/or + for (auto iter = expr->operands().begin(); iter != expr->operands().end();) { + auto *operand = *iter; + if (operand->kind() != Expression::Kind::kConstant) { + ++iter; + continue; + } + auto &val = static_cast(operand)->value(); + if (!val.isBool()) { + ++iter; + continue; + } + if (expr->kind() == Expression::Kind::kLogicalAnd) { + if (val.getBool()) { + // Remove the true operand + iter = expr->operands().erase(iter); + continue; + } + // The whole expression is false + return ConstantExpression::make(objPool, false); + } + // expr->kind() == Expression::Kind::kLogicalOr + if (val.getBool()) { + // The whole expression is true + return ConstantExpression::make(objPool, true); + } + // Remove the false operand + iter = expr->operands().erase(iter); + } + + if (expr->operands().empty()) { + // true and true and true => true + if (expr->kind() == Expression::Kind::kLogicalAnd) { + return ConstantExpression::make(objPool, true); + } + // false or false or false => false + return ConstantExpression::make(objPool, false); + } else if (expr->operands().size() == 1) { + return expr->operands()[0]; + } else { + return expr; + } } Expression *ExpressionUtils::reduceUnaryNotExpr(const Expression *expr) { diff --git a/src/graph/util/ExpressionUtils.h b/src/graph/util/ExpressionUtils.h index 48cae74ffc7..ac10f1570d7 100644 --- a/src/graph/util/ExpressionUtils.h +++ b/src/graph/util/ExpressionUtils.h @@ -117,6 +117,13 @@ class ExpressionUtils { // v.age > 40 + 1 => v.age > 41 static StatusOr foldConstantExpr(const Expression* expr); + // Simplify logical and/or expr. + // e.g. A and true => A + // A or false => A + // A and false => false + // A or true => true + static Expression* simplifyLogicalExpr(const LogicalExpression* logicalExpr); + // Clones and reduces unaryNot expression // Examples: // !!(A and B) => (A and B) diff --git a/src/graph/util/ParserUtil.cpp b/src/graph/util/ParserUtil.cpp index c90ea0e339c..1ab8c3b07ce 100644 --- a/src/graph/util/ParserUtil.cpp +++ b/src/graph/util/ParserUtil.cpp @@ -62,7 +62,7 @@ void ParserUtil::rewriteLC(QueryContext *qctx, return static_cast(mpp); } break; default: - LOG(FATAL) << "Unexpected expression kind: " << expr->kind(); + LOG(DFATAL) << "Unexpected expression kind: " << expr->kind(); } return ret; }; diff --git a/src/graph/util/ToJson.cpp b/src/graph/util/ToJson.cpp index 5c6b574690a..5cc8cbea458 100644 --- a/src/graph/util/ToJson.cpp +++ b/src/graph/util/ToJson.cpp @@ -82,7 +82,7 @@ folly::dynamic toJson(const Value &value) { // TODO store to object or array return value.toString(); } - DLOG(FATAL) << "Impossible reach."; + LOG(DFATAL) << "Impossible reach."; return folly::dynamic::object(); } diff --git a/src/graph/util/test/ExpressionUtilsTest.cpp b/src/graph/util/test/ExpressionUtilsTest.cpp index 8b349c9c3b3..e376c48e9de 100644 --- a/src/graph/util/test/ExpressionUtilsTest.cpp +++ b/src/graph/util/test/ExpressionUtilsTest.cpp @@ -764,5 +764,68 @@ TEST_F(ExpressionUtilsTest, expandExpression) { ASSERT_EQ(expected, target->toString()); } } + +TEST_F(ExpressionUtilsTest, simplifyLogicalExpr) { + { + auto filter = parse("A and true"); + auto target = + ExpressionUtils::simplifyLogicalExpr(static_cast(filter)); + auto expected = "A"; + ASSERT_EQ(expected, target->toString()); + } + { + auto filter = parse("A and false"); + auto target = + ExpressionUtils::simplifyLogicalExpr(static_cast(filter)); + auto expected = "false"; + ASSERT_EQ(expected, target->toString()); + } + { + auto filter = parse("A or true"); + auto target = + ExpressionUtils::simplifyLogicalExpr(static_cast(filter)); + auto expected = "true"; + ASSERT_EQ(expected, target->toString()); + } + { + auto filter = parse("A or false"); + auto target = + ExpressionUtils::simplifyLogicalExpr(static_cast(filter)); + auto expected = "A"; + ASSERT_EQ(expected, target->toString()); + } + { + auto filter = parse("A or 2 > 1"); + auto target = + ExpressionUtils::simplifyLogicalExpr(static_cast(filter)); + auto expected = "(A OR (2>1))"; + ASSERT_EQ(expected, target->toString()); + } + { + auto filter = parse("true and true and true"); + auto newFilter = ExpressionUtils::flattenInnerLogicalAndExpr(filter); + auto target = + ExpressionUtils::simplifyLogicalExpr(static_cast(newFilter)); + auto expected = "true"; + ASSERT_EQ(expected, target->toString()); + } + { + auto filter = parse("false or false or false"); + auto newFilter = ExpressionUtils::flattenInnerLogicalOrExpr(filter); + auto target = + ExpressionUtils::simplifyLogicalExpr(static_cast(newFilter)); + auto expected = "false"; + ASSERT_EQ(expected, target->toString()); + } + { + auto filter = parse("(A and B) or (2 > 1) or (C and true) or (D or true)"); + auto newFilter = ExpressionUtils::flattenInnerLogicalOrExpr(filter); + auto target = + ExpressionUtils::simplifyLogicalExpr(static_cast(newFilter)); + auto expected = "true"; + ASSERT_EQ(expected, target->toString()); + } +} + } // namespace graph } // namespace nebula diff --git a/src/graph/validator/LookupValidator.cpp b/src/graph/validator/LookupValidator.cpp index 3d2c07ded17..40694e1348b 100644 --- a/src/graph/validator/LookupValidator.cpp +++ b/src/graph/validator/LookupValidator.cpp @@ -534,8 +534,8 @@ Expression* LookupValidator::reverseRelKind(RelationalExpression* expr) { reversedKind = ExprKind::kRelLE; break; default: - LOG(FATAL) << "Invalid relational expression kind: " << static_cast(kind); - break; + LOG(DFATAL) << "Invalid relational expression kind: " << static_cast(kind); + return expr; } auto left = expr->left(); diff --git a/src/graph/validator/MaintainValidator.cpp b/src/graph/validator/MaintainValidator.cpp index a7d8d0d9242..95bde20293c 100644 --- a/src/graph/validator/MaintainValidator.cpp +++ b/src/graph/validator/MaintainValidator.cpp @@ -28,7 +28,8 @@ namespace graph { // Validate columns of schema. // Check validity of columns and fill to thrift structure. static Status validateColumns(const std::vector &columnSpecs, - meta::cpp2::Schema &schema) { + meta::cpp2::Schema &schema, + bool isAlter = false) { for (auto &spec : columnSpecs) { meta::cpp2::ColumnDef column; auto type = spec->type(); @@ -59,6 +60,12 @@ static Status validateColumns(const std::vector &columnSp if (!column.nullable_ref().has_value()) { column.nullable_ref() = true; } + // Should report an error when altering the column + // which doesn't have default value to not nullable + if (isAlter && !column.nullable_ref().value() && !column.default_value_ref().has_value()) { + return Status::SemanticError("Column `%s' must have a default value if it's not nullable", + spec->name()->c_str()); + } schema.columns_ref().value().emplace_back(std::move(column)); } return Status::OK(); @@ -92,7 +99,7 @@ static StatusOr> validateSchemaOpts( return Status::SemanticError("Duplicate column name `%s'", spec->name()->c_str()); } } - NG_LOG_AND_RETURN_IF_ERROR(validateColumns(specs, schema)); + NG_LOG_AND_RETURN_IF_ERROR(validateColumns(specs, schema, true)); } schemaItem.schema_ref() = std::move(schema); diff --git a/src/graph/validator/MutateValidator.cpp b/src/graph/validator/MutateValidator.cpp index 7130c5cae9d..a2d3f798618 100644 --- a/src/graph/validator/MutateValidator.cpp +++ b/src/graph/validator/MutateValidator.cpp @@ -4,12 +4,14 @@ */ #include "graph/validator/MutateValidator.h" +#include "common/datatypes/Value.h" +#include "common/expression/Expression.h" #include "common/expression/LabelAttributeExpression.h" #include "graph/planner/plan/Mutate.h" #include "graph/planner/plan/Query.h" #include "graph/util/ExpressionUtils.h" #include "graph/util/SchemaUtil.h" -#include "graph/visitor/RewriteSymExprVisitor.h" +#include "graph/visitor/RewriteVisitor.h" namespace nebula { namespace graph { @@ -750,10 +752,70 @@ Expression *UpdateValidator::rewriteSymExpr(Expression *expr, const std::string &sym, bool &hasWrongType, bool isEdge) { - RewriteSymExprVisitor visitor(qctx_->objPool(), sym, isEdge); - expr->accept(&visitor); - hasWrongType = visitor.hasWrongType(); - return std::move(visitor).expr(); + std::unordered_set invalidExprs{ + Expression::Kind::kVersionedVar, + Expression::Kind::kVarProperty, + Expression::Kind::kInputProperty, + Expression::Kind::kVar, + // Expression::Kind::kLabelAttribute, valid only for update edge + Expression::Kind::kAttribute, + Expression::Kind::kSubscript, + Expression::Kind::kUUID, + Expression::Kind::kTagProperty, + Expression::Kind::kLabelTagProperty, + Expression::Kind::kDstProperty, + Expression::Kind::kEdgeSrc, + Expression::Kind::kEdgeType, + Expression::Kind::kEdgeRank, + Expression::Kind::kEdgeDst, + }; + if (isEdge) { + invalidExprs.emplace(Expression::Kind::kSrcProperty); + } else { + invalidExprs.emplace(Expression::Kind::kLabelAttribute); + invalidExprs.emplace(Expression::Kind::kEdgeProperty); + } + auto *r = ExpressionUtils::findAny(expr, invalidExprs); + if (r != nullptr) { + hasWrongType = true; + return nullptr; + } + + auto *pool = qctx_->objPool(); + RewriteVisitor::Matcher matcher = [](const Expression *e) -> bool { + switch (e->kind()) { + case Expression::Kind::kLabel: + case Expression::Kind::kLabelAttribute: + return true; + default: + return false; + } + }; + RewriteVisitor::Rewriter rewriter = [pool, sym, isEdge](const Expression *e) -> Expression * { + switch (e->kind()) { + case Expression::Kind::kLabel: { + auto laExpr = static_cast(e); + if (isEdge) { + return EdgePropertyExpression::make(pool, sym, laExpr->name()); + } else { + return SourcePropertyExpression::make(pool, sym, laExpr->name()); + } + } + case Expression::Kind::kLabelAttribute: { + auto laExpr = static_cast(e); + if (isEdge) { + return EdgePropertyExpression::make( + pool, laExpr->left()->name(), laExpr->right()->value().getStr()); + } else { + return nullptr; + } + } + default: + return nullptr; + } + }; + auto *newExpr = RewriteVisitor::transform(expr, matcher, rewriter); + return newExpr; } Status UpdateVertexValidator::validateImpl() { diff --git a/src/graph/validator/Validator.cpp b/src/graph/validator/Validator.cpp index a98dde58ad6..634573230f9 100644 --- a/src/graph/validator/Validator.cpp +++ b/src/graph/validator/Validator.cpp @@ -258,10 +258,10 @@ std::unique_ptr Validator::makeValidator(Sentence* sentence, QueryCon case Sentence::Kind::kUnknown: case Sentence::Kind::kReturn: { // nothing - DLOG(FATAL) << "Unimplemented sentence " << kind; + LOG(DFATAL) << "Unimplemented sentence " << kind; } } - DLOG(FATAL) << "Unknown sentence " << static_cast(kind); + LOG(DFATAL) << "Unknown sentence " << static_cast(kind); return std::make_unique(sentence, context); } diff --git a/src/graph/visitor/CMakeLists.txt b/src/graph/visitor/CMakeLists.txt index 01ccba3cd1a..23e6295f592 100644 --- a/src/graph/visitor/CMakeLists.txt +++ b/src/graph/visitor/CMakeLists.txt @@ -11,7 +11,6 @@ nebula_add_library( ExtractPropExprVisitor.cpp ExtractFilterExprVisitor.cpp FoldConstantExprVisitor.cpp - RewriteSymExprVisitor.cpp RewriteVisitor.cpp FindVisitor.cpp VidExtractVisitor.cpp diff --git a/src/graph/visitor/DeduceTypeVisitor.cpp b/src/graph/visitor/DeduceTypeVisitor.cpp index bf8ab94b3ca..d8689337d51 100644 --- a/src/graph/visitor/DeduceTypeVisitor.cpp +++ b/src/graph/visitor/DeduceTypeVisitor.cpp @@ -219,8 +219,12 @@ void DeduceTypeVisitor::visit(UnaryExpression *expr) { break; } default: { - LOG(FATAL) << "Invalid unary expression kind: " << static_cast(expr->kind()); - break; + LOG(DFATAL) << "Invalid unary expression kind: " << static_cast(expr->kind()); + std::stringstream ss; + ss << "`" << expr->toString() << "' is invalid unary expression, kind is " + << static_cast(expr->kind()) << "."; + status_ = Status::SemanticError(ss.str()); + return; } } } @@ -280,8 +284,12 @@ void DeduceTypeVisitor::visit(ArithmeticExpression *expr) { break; } default: { - LOG(FATAL) << "Invalid arithmetic expression kind: " << static_cast(expr->kind()); - break; + LOG(DFATAL) << "Invalid arithmetic expression kind: " << static_cast(expr->kind()); + std::stringstream ss; + ss << "`" << expr->toString() << "' is invalid arithmetic expression, kind is " + << static_cast(expr->kind()) << "."; + status_ = Status::SemanticError(ss.str()); + return; } } } @@ -422,8 +430,12 @@ void DeduceTypeVisitor::visit(LogicalExpression *expr) { break; } default: { - LOG(FATAL) << "Invalid logical expression kind: " << static_cast(expr->kind()); - break; + LOG(DFATAL) << "Invalid logical expression kind: " << static_cast(expr->kind()); + std::stringstream ss; + ss << "`" << expr->toString() << "' is invalid logical expression, kind is " + << static_cast(expr->kind()) << "."; + status_ = Status::SemanticError(ss.str()); + return; } } } diff --git a/src/graph/visitor/ExtractPropExprVisitor.cpp b/src/graph/visitor/ExtractPropExprVisitor.cpp index 5b2cfab849e..44531ddd02f 100644 --- a/src/graph/visitor/ExtractPropExprVisitor.cpp +++ b/src/graph/visitor/ExtractPropExprVisitor.cpp @@ -93,7 +93,8 @@ void ExtractPropExprVisitor::visit(UnaryExpression* expr) { break; } default: { - LOG(FATAL) << "Invalid Kind " << expr->kind(); + LOG(DFATAL) << "Invalid Kind " << expr->kind(); + reportError(expr); } } } @@ -110,7 +111,9 @@ void ExtractPropExprVisitor::visitPropertyExpr(PropertyExpression* expr) { break; } default: { - LOG(FATAL) << "Invalid Kind " << expr->kind(); + LOG(DFATAL) << "Invalid Kind " << expr->kind(); + reportError(expr); + return; } } auto found = propExprColMap_.find(propExpr->toString()); @@ -162,7 +165,9 @@ void ExtractPropExprVisitor::visitVertexEdgePropExpr(PropertyExpression* expr) { break; } default: { - LOG(FATAL) << "Invalid Kind " << expr->kind(); + LOG(DFATAL) << "Invalid Kind " << expr->kind(); + reportError(expr); + return; } } auto found = propExprColMap_.find(propExpr->toString()); diff --git a/src/graph/visitor/PropertyTrackerVisitor.cpp b/src/graph/visitor/PropertyTrackerVisitor.cpp index fe66c7983f8..fb2483a63f0 100644 --- a/src/graph/visitor/PropertyTrackerVisitor.cpp +++ b/src/graph/visitor/PropertyTrackerVisitor.cpp @@ -317,7 +317,8 @@ void PropertyTrackerVisitor::visit(AggregateExpression *expr) { std::transform(funName.begin(), funName.end(), funName.begin(), ::tolower); if (funName == "count") { auto kind = expr->arg()->kind(); - if (kind == Expression::Kind::kConstant) { + if (kind == Expression::Kind::kVarProperty || kind == Expression::Kind::kConstant || + kind == Expression::Kind::kInputProperty) { return; } } diff --git a/src/graph/visitor/PrunePropertiesVisitor.cpp b/src/graph/visitor/PrunePropertiesVisitor.cpp index 3fe1f349f5d..0b687748ebf 100644 --- a/src/graph/visitor/PrunePropertiesVisitor.cpp +++ b/src/graph/visitor/PrunePropertiesVisitor.cpp @@ -125,11 +125,6 @@ void PrunePropertiesVisitor::visitCurrent(Aggregate *node) { } } for (auto *groupItem : node->groupItems()) { - if (groupItem->kind() == Expression::Kind::kVarProperty || - groupItem->kind() == Expression::Kind::kInputProperty || - groupItem->kind() == Expression::Kind::kConstant) { - continue; - } status_ = extractPropsFromExpr(groupItem); if (!status_.ok()) { return; @@ -446,7 +441,11 @@ void PrunePropertiesVisitor::visit(Unwind *node) { void PrunePropertiesVisitor::visitCurrent(Unwind *node) { const auto &alias = node->alias(); - if (propsUsed_.hasAlias(alias)) { + auto expr = node->unwindExpr(); + auto kind = expr->kind(); + // unwind e.start_year as a + if (propsUsed_.hasAlias(alias) || + (kind != Expression::Kind::kVarProperty && kind != Expression::Kind::kInputProperty)) { status_ = extractPropsFromExpr(node->unwindExpr()); if (!status_.ok()) { return; diff --git a/src/graph/visitor/RewriteSymExprVisitor.cpp b/src/graph/visitor/RewriteSymExprVisitor.cpp deleted file mode 100644 index 5558bcc2999..00000000000 --- a/src/graph/visitor/RewriteSymExprVisitor.cpp +++ /dev/null @@ -1,350 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#include "graph/visitor/RewriteSymExprVisitor.h" - -namespace nebula { -namespace graph { - -RewriteSymExprVisitor::RewriteSymExprVisitor(ObjectPool *objPool, - const std::string &sym, - bool isEdge) - : pool_(objPool), sym_(sym), isEdge_(isEdge) {} - -void RewriteSymExprVisitor::visit(ConstantExpression *expr) { - UNUSED(expr); - expr_ = nullptr; -} - -void RewriteSymExprVisitor::visit(UnaryExpression *expr) { - expr->operand()->accept(this); - if (expr_) { - expr->setOperand(expr_); - } -} - -void RewriteSymExprVisitor::visit(TypeCastingExpression *expr) { - expr->operand()->accept(this); - if (expr_) { - expr->setOperand(expr_); - } -} - -void RewriteSymExprVisitor::visit(LabelExpression *expr) { - if (isEdge_) { - expr_ = EdgePropertyExpression::make(pool_, sym_, expr->name()); - } else { - expr_ = SourcePropertyExpression::make(pool_, sym_, expr->name()); - } -} - -void RewriteSymExprVisitor::visit(LabelAttributeExpression *expr) { - if (isEdge_) { - expr_ = - EdgePropertyExpression::make(pool_, expr->left()->name(), expr->right()->value().getStr()); - hasWrongType_ = false; - } else { - hasWrongType_ = true; - expr_ = nullptr; - } -} - -void RewriteSymExprVisitor::visit(ArithmeticExpression *expr) { - visitBinaryExpr(expr); -} - -void RewriteSymExprVisitor::visit(RelationalExpression *expr) { - visitBinaryExpr(expr); -} - -void RewriteSymExprVisitor::visit(SubscriptExpression *expr) { - UNUSED(expr); - hasWrongType_ = true; -} - -void RewriteSymExprVisitor::visit(AttributeExpression *expr) { - UNUSED(expr); - hasWrongType_ = true; -} - -void RewriteSymExprVisitor::visit(LogicalExpression *expr) { - auto &operands = expr->operands(); - for (auto i = 0u; i < operands.size(); i++) { - operands[i]->accept(this); - if (expr_) { - expr->setOperand(i, expr_); - } - } -} - -// function call -void RewriteSymExprVisitor::visit(FunctionCallExpression *expr) { - const auto &args = expr->args()->args(); - for (size_t i = 0; i < args.size(); ++i) { - auto &arg = args[i]; - arg->accept(this); - if (expr_) { - expr->args()->setArg(i, std::move(expr_)); - } - } -} - -void RewriteSymExprVisitor::visit(AggregateExpression *expr) { - auto *arg = expr->arg(); - arg->accept(this); - if (expr_) { - expr->setArg(std::move(expr_)); - } -} - -void RewriteSymExprVisitor::visit(UUIDExpression *expr) { - UNUSED(expr); - hasWrongType_ = true; -} - -// variable expression -void RewriteSymExprVisitor::visit(VariableExpression *expr) { - UNUSED(expr); - hasWrongType_ = true; -} - -void RewriteSymExprVisitor::visit(VersionedVariableExpression *expr) { - UNUSED(expr); - hasWrongType_ = true; -} - -// container expression -void RewriteSymExprVisitor::visit(ListExpression *expr) { - const auto &items = expr->items(); - for (size_t i = 0; i < items.size(); ++i) { - items[i]->accept(this); - if (expr_) { - expr->setItem(i, std::move(expr_)); - } - } -} - -void RewriteSymExprVisitor::visit(SetExpression *expr) { - const auto &items = expr->items(); - for (size_t i = 0; i < items.size(); ++i) { - items[i]->accept(this); - if (expr_) { - expr->setItem(i, std::move(expr_)); - } - } -} - -void RewriteSymExprVisitor::visit(MapExpression *expr) { - const auto &items = expr->items(); - for (size_t i = 0; i < items.size(); ++i) { - items[i].second->accept(this); - if (expr_) { - expr->setItem(i, {items[i].first, std::move(expr_)}); - } - } -} - -// property Expression -void RewriteSymExprVisitor::visit(TagPropertyExpression *expr) { - UNUSED(expr); - hasWrongType_ = true; -} - -void RewriteSymExprVisitor::visit(LabelTagPropertyExpression *expr) { - UNUSED(expr); - hasWrongType_ = true; -} - -void RewriteSymExprVisitor::visit(EdgePropertyExpression *expr) { - UNUSED(expr); - if (!isEdge_) { - hasWrongType_ = true; - } -} - -void RewriteSymExprVisitor::visit(InputPropertyExpression *expr) { - UNUSED(expr); - hasWrongType_ = true; -} - -void RewriteSymExprVisitor::visit(VariablePropertyExpression *expr) { - UNUSED(expr); - hasWrongType_ = true; -} - -void RewriteSymExprVisitor::visit(DestPropertyExpression *expr) { - UNUSED(expr); - hasWrongType_ = true; -} - -void RewriteSymExprVisitor::visit(SourcePropertyExpression *expr) { - UNUSED(expr); - if (isEdge_) { - hasWrongType_ = true; - } -} - -void RewriteSymExprVisitor::visit(EdgeSrcIdExpression *expr) { - UNUSED(expr); - hasWrongType_ = true; -} - -void RewriteSymExprVisitor::visit(EdgeTypeExpression *expr) { - UNUSED(expr); - hasWrongType_ = true; -} - -void RewriteSymExprVisitor::visit(EdgeRankExpression *expr) { - UNUSED(expr); - hasWrongType_ = true; -} - -void RewriteSymExprVisitor::visit(EdgeDstIdExpression *expr) { - UNUSED(expr); - hasWrongType_ = true; -} - -void RewriteSymExprVisitor::visit(VertexExpression *expr) { - UNUSED(expr); - expr_ = nullptr; -} - -void RewriteSymExprVisitor::visit(EdgeExpression *expr) { - UNUSED(expr); - expr_ = nullptr; -} - -void RewriteSymExprVisitor::visit(ColumnExpression *expr) { - UNUSED(expr); - expr_ = nullptr; -} - -void RewriteSymExprVisitor::visit(CaseExpression *expr) { - if (expr->hasCondition()) { - expr->condition()->accept(this); - if (expr_) { - expr->setCondition(expr_); - } - } - if (expr->hasDefault()) { - expr->defaultResult()->accept(this); - if (expr_) { - expr->setDefault(expr_); - } - } - auto &cases = expr->cases(); - for (size_t i = 0; i < cases.size(); ++i) { - auto when = cases[i].when; - auto then = cases[i].then; - when->accept(this); - if (expr_) { - expr->setWhen(i, expr_); - } - then->accept(this); - if (expr_) { - expr->setThen(i, expr_); - } - } -} - -void RewriteSymExprVisitor::visitBinaryExpr(BinaryExpression *expr) { - expr->left()->accept(this); - if (expr_) { - expr->setLeft(expr_); - } - expr->right()->accept(this); - if (expr_) { - expr->setRight(expr_); - } -} - -void RewriteSymExprVisitor::visit(PathBuildExpression *expr) { - const auto &items = expr->items(); - for (size_t i = 0; i < items.size(); ++i) { - items[i]->accept(this); - if (expr_) { - expr->setItem(i, std::move(expr_)); - } - } -} - -void RewriteSymExprVisitor::visit(ListComprehensionExpression *expr) { - expr->collection()->accept(this); - if (expr_) { - expr->setCollection(expr_); - } - if (expr->hasFilter()) { - expr->filter()->accept(this); - if (expr_) { - expr->setFilter(expr_); - } - } - if (expr->hasMapping()) { - expr->mapping()->accept(this); - if (expr_) { - expr->setMapping(expr_); - } - } -} - -void RewriteSymExprVisitor::visit(PredicateExpression *expr) { - expr->collection()->accept(this); - if (expr_) { - expr->setCollection(expr_); - } - if (expr->hasFilter()) { - expr->filter()->accept(this); - if (expr_) { - expr->setFilter(expr_); - } - } -} - -void RewriteSymExprVisitor::visit(ReduceExpression *expr) { - expr->initial()->accept(this); - if (expr_) { - expr->setInitial(expr_); - } - expr->collection()->accept(this); - if (expr_) { - expr->setCollection(expr_); - } - expr->mapping()->accept(this); - if (expr_) { - expr->setMapping(expr_); - } -} - -void RewriteSymExprVisitor::visit(SubscriptRangeExpression *expr) { - expr->list()->accept(this); - if (expr_) { - expr->setList(expr_); - } - if (expr->lo() != nullptr) { - expr->lo()->accept(this); - if (expr_) { - expr->setLo(expr_); - } - } - if (expr->hi() != nullptr) { - expr->hi()->accept(this); - if (expr_) { - expr->setHi(expr_); - } - } -} - -void RewriteSymExprVisitor::visit(MatchPathPatternExpression *expr) { - if (expr->genList() != nullptr) { - expr->genList()->accept(this); - if (expr_) { - expr->setGenList(expr_); - expr_ = nullptr; - } - } -} - -} // namespace graph -} // namespace nebula diff --git a/src/graph/visitor/RewriteSymExprVisitor.h b/src/graph/visitor/RewriteSymExprVisitor.h deleted file mode 100644 index 4a45a2f11a9..00000000000 --- a/src/graph/visitor/RewriteSymExprVisitor.h +++ /dev/null @@ -1,98 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#ifndef GRAPH_VISITOR_REWRITESYMEXPRVISITOR_H_ -#define GRAPH_VISITOR_REWRITESYMEXPRVISITOR_H_ - -#include - -#include "common/expression/ExprVisitor.h" - -namespace nebula { - -class Expression; - -namespace graph { - -class RewriteSymExprVisitor final : public ExprVisitor { - public: - RewriteSymExprVisitor(ObjectPool *objPool, const std::string &sym, bool isEdge); - - bool hasWrongType() const { - return hasWrongType_; - } - - Expression *expr() { - return expr_; - } - - void visit(ConstantExpression *expr) override; - void visit(UnaryExpression *expr) override; - void visit(TypeCastingExpression *expr) override; - void visit(LabelExpression *expr) override; - void visit(LabelAttributeExpression *expr) override; - // binary expression - void visit(ArithmeticExpression *expr) override; - void visit(RelationalExpression *expr) override; - void visit(SubscriptExpression *expr) override; - void visit(AttributeExpression *expr) override; - void visit(LogicalExpression *expr) override; - // function call - void visit(FunctionCallExpression *expr) override; - void visit(AggregateExpression *expr) override; - void visit(UUIDExpression *expr) override; - // variable expression - void visit(VariableExpression *expr) override; - void visit(VersionedVariableExpression *expr) override; - // container expression - void visit(ListExpression *expr) override; - void visit(SetExpression *expr) override; - void visit(MapExpression *expr) override; - // property Expression - void visit(LabelTagPropertyExpression *expr) override; - void visit(TagPropertyExpression *expr) override; - void visit(EdgePropertyExpression *expr) override; - void visit(InputPropertyExpression *expr) override; - void visit(VariablePropertyExpression *expr) override; - void visit(DestPropertyExpression *expr) override; - void visit(SourcePropertyExpression *expr) override; - void visit(EdgeSrcIdExpression *expr) override; - void visit(EdgeTypeExpression *expr) override; - void visit(EdgeRankExpression *expr) override; - void visit(EdgeDstIdExpression *expr) override; - // vertex/edge expression - void visit(VertexExpression *expr) override; - void visit(EdgeExpression *expr) override; - // case expression - void visit(CaseExpression *expr) override; - // path build expression - void visit(PathBuildExpression *expr) override; - // column expression - void visit(ColumnExpression *expr) override; - // predicate expression - void visit(PredicateExpression *expr) override; - // list comprehension expression - void visit(ListComprehensionExpression *expr) override; - // reduce expression - void visit(ReduceExpression *expr) override; - // subscript range expression - void visit(SubscriptRangeExpression *expr) override; - // match path pattern expression - void visit(MatchPathPatternExpression *expr) override; - - private: - void visitBinaryExpr(BinaryExpression *expr); - - ObjectPool *pool_; - const std::string &sym_; - bool hasWrongType_{false}; - bool isEdge_{false}; - Expression *expr_{nullptr}; -}; - -} // namespace graph -} // namespace nebula - -#endif // GRAPH_VISITOR_REWRITESYMEXPRVISITOR_H_ diff --git a/src/graph/visitor/VidExtractVisitor.cpp b/src/graph/visitor/VidExtractVisitor.cpp index 07686462b5c..8f5befe66fd 100644 --- a/src/graph/visitor/VidExtractVisitor.cpp +++ b/src/graph/visitor/VidExtractVisitor.cpp @@ -309,28 +309,30 @@ void VidExtractVisitor::visit(LogicalExpression *expr) { operandsResult.reserve(expr->operands().size()); for (const auto &operand : expr->operands()) { operand->accept(this); + if (vidPattern_.spec != VidPattern::Special::kInUsed) { + vidPattern_ = VidPattern{}; + return; + } operandsResult.emplace_back(moveVidPattern()); } VidPattern inResult{VidPattern::Special::kInUsed, {}}; for (auto &result : operandsResult) { - if (result.spec == VidPattern::Special::kInUsed) { - for (auto &node : result.nodes) { - // Can't deduce with outher source (e.g. PropertiesIndex) - switch (node.second.kind) { - case VidPattern::Vids::Kind::kOtherSource: - vidPattern_ = VidPattern{}; - return; - case VidPattern::Vids::Kind::kIn: { - inResult.nodes[node.first].kind = VidPattern::Vids::Kind::kIn; - inResult.nodes[node.first].vids.values.insert( - inResult.nodes[node.first].vids.values.end(), - std::make_move_iterator(node.second.vids.values.begin()), - std::make_move_iterator(node.second.vids.values.end())); - } - case VidPattern::Vids::Kind::kNotIn: - // nothing - break; + for (auto &node : result.nodes) { + // Can't deduce with outher source (e.g. PropertiesIndex) + switch (node.second.kind) { + case VidPattern::Vids::Kind::kOtherSource: + vidPattern_ = VidPattern{}; + return; + case VidPattern::Vids::Kind::kIn: { + inResult.nodes[node.first].kind = VidPattern::Vids::Kind::kIn; + inResult.nodes[node.first].vids.values.insert( + inResult.nodes[node.first].vids.values.end(), + std::make_move_iterator(node.second.vids.values.begin()), + std::make_move_iterator(node.second.vids.values.end())); } + case VidPattern::Vids::Kind::kNotIn: + // nothing + break; } } } diff --git a/src/interface/common.thrift b/src/interface/common.thrift index 47524736c24..150a52f459d 100644 --- a/src/interface/common.thrift +++ b/src/interface/common.thrift @@ -405,7 +405,7 @@ enum ErrorCode { E_INVALID_JOB = -2065, // Invalid task // Backup Failure - E_BACKUP_BUILDING_INDEX = -2066, // Backup terminated (index being created) + E_BACKUP_RUNNING_JOBS = -2066, // Backup terminated (some data modification jobs running) E_BACKUP_SPACE_NOT_FOUND = -2067, // Graph space does not exist at the time of backup // RESTORE Failure diff --git a/src/meta/ActiveHostsMan.cpp b/src/meta/ActiveHostsMan.cpp index 2b7230848d0..fdbc3ed9c88 100644 --- a/src/meta/ActiveHostsMan.cpp +++ b/src/meta/ActiveHostsMan.cpp @@ -13,6 +13,7 @@ DECLARE_int32(heartbeat_interval_secs); DEFINE_int32(agent_heartbeat_interval_secs, 60, "Agent heartbeat interval in seconds"); DECLARE_uint32(expired_time_factor); +DEFINE_bool(check_term_for_leader_info, false, "if check term when update leader info"); namespace nebula { namespace meta { @@ -45,14 +46,16 @@ nebula::cpp2::ErrorCode ActiveHostsMan::updateHostInfo(kvstore::KVStore* kv, TermID term = -1; nebula::cpp2::ErrorCode code; for (auto i = 0U; i != leaderKeys.size(); ++i) { - if (statusVec[i].ok()) { - std::tie(std::ignore, term, code) = MetaKeyUtils::parseLeaderValV3(vals[i]); - if (code != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(INFO) << apache::thrift::util::enumNameSafe(code); - continue; - } - if (terms[i] <= term) { - continue; + if (FLAGS_check_term_for_leader_info) { + if (statusVec[i].ok()) { + std::tie(std::ignore, term, code) = MetaKeyUtils::parseLeaderValV3(vals[i]); + if (code != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(INFO) << apache::thrift::util::enumNameSafe(code); + continue; + } + if (terms[i] <= term) { + continue; + } } } // write directly if not exist, or update if has greater term diff --git a/src/meta/MetaServiceHandler.cpp b/src/meta/MetaServiceHandler.cpp index 5daaa3df3a0..19c840597a7 100644 --- a/src/meta/MetaServiceHandler.cpp +++ b/src/meta/MetaServiceHandler.cpp @@ -79,6 +79,10 @@ namespace nebula { namespace meta { +AdminClient* MetaServiceHandler::getAdminClient() { + return adminClient_.get(); +} + folly::Future MetaServiceHandler::future_createSpace( const cpp2::CreateSpaceReq& req) { auto* processor = CreateSpaceProcessor::instance(kvstore_); diff --git a/src/meta/MetaServiceHandler.h b/src/meta/MetaServiceHandler.h index e79d543d4a6..668c8c250c1 100644 --- a/src/meta/MetaServiceHandler.h +++ b/src/meta/MetaServiceHandler.h @@ -29,6 +29,8 @@ class MetaServiceHandler final : public cpp2::MetaServiceSvIf { kAgentHBCounters.init(); } + AdminClient* getAdminClient(); + /** * Parts distribution related operations. * */ diff --git a/src/meta/processors/admin/CreateBackupProcessor.cpp b/src/meta/processors/admin/CreateBackupProcessor.cpp index 80e65f9ee66..9d2e10b346a 100644 --- a/src/meta/processors/admin/CreateBackupProcessor.cpp +++ b/src/meta/processors/admin/CreateBackupProcessor.cpp @@ -84,17 +84,22 @@ void CreateBackupProcessor::process(const cpp2::CreateBackupReq& req) { // make sure there is no index job std::unordered_set jobTypes{cpp2::JobType::REBUILD_TAG_INDEX, - cpp2::JobType::REBUILD_EDGE_INDEX}; + cpp2::JobType::REBUILD_EDGE_INDEX, + cpp2::JobType::COMPACT, + cpp2::JobType::INGEST, + cpp2::JobType::DATA_BALANCE, + cpp2::JobType::LEADER_BALANCE}; auto result = jobMgr->checkTypeJobRunning(jobTypes); if (!nebula::ok(result)) { - LOG(INFO) << "Get Index status failed, not allowed to create backup."; + LOG(INFO) << "Get running job status failed, not allowed to create backup."; handleErrorCode(nebula::error(result)); onFinished(); return; } if (nebula::value(result)) { - LOG(INFO) << "Index is rebuilding, not allowed to create backup."; - handleErrorCode(nebula::cpp2::ErrorCode::E_BACKUP_BUILDING_INDEX); + LOG(INFO) << "There is some running or queued job mutating the data, not allowed to " + "create backup now."; + handleErrorCode(nebula::cpp2::ErrorCode::E_BACKUP_RUNNING_JOBS); onFinished(); return; } diff --git a/src/meta/processors/job/AdminJobProcessor.cpp b/src/meta/processors/job/AdminJobProcessor.cpp index 53065a998a5..7a7deaaa63d 100644 --- a/src/meta/processors/job/AdminJobProcessor.cpp +++ b/src/meta/processors/job/AdminJobProcessor.cpp @@ -96,7 +96,7 @@ void AdminJobProcessor::process(const cpp2::AdminJobReq& req) { for (size_t i = 0; i < paras.size(); i++) { jobIds.push_back(std::stoi(paras[i])); } - auto ret = jobMgr_->recoverJob(spaceId_, adminClient_, jobIds); + auto ret = jobMgr_->recoverJob(spaceId_, jobIds); if (nebula::ok(ret)) { result.recovered_job_num_ref() = nebula::value(ret); } else { @@ -139,13 +139,14 @@ nebula::cpp2::ErrorCode AdminJobProcessor::addJobProcess(const cpp2::AdminJobReq } folly::SharedMutex::WriteHolder holder(LockUtils::lock()); + folly::SharedMutex::ReadHolder snapHolder(LockUtils::snapshotLock()); auto jobId = autoIncrementId(); if (!nebula::ok(jobId)) { return nebula::error(jobId); } JobDescription jobDesc(spaceId_, nebula::value(jobId), type, paras); - auto errorCode = jobMgr_->addJob(std::move(jobDesc), adminClient_); + auto errorCode = jobMgr_->addJob(std::move(jobDesc)); if (errorCode == nebula::cpp2::ErrorCode::SUCCEEDED) { result.job_id_ref() = nebula::value(jobId); } diff --git a/src/meta/processors/job/JobManager.cpp b/src/meta/processors/job/JobManager.cpp index 21bd9276a0b..a2119e0d39d 100644 --- a/src/meta/processors/job/JobManager.cpp +++ b/src/meta/processors/job/JobManager.cpp @@ -42,7 +42,8 @@ JobManager* JobManager::getInstance() { return &inst; } -bool JobManager::init(nebula::kvstore::KVStore* store) { +bool JobManager::init(nebula::kvstore::KVStore* store, AdminClient* adminClient) { + adminClient_ = adminClient; if (store == nullptr) { return false; } @@ -542,7 +543,7 @@ ErrorOr> JobManager::getAllT return taskDescriptions; } -nebula::cpp2::ErrorCode JobManager::addJob(JobDescription jobDesc, AdminClient* client) { +nebula::cpp2::ErrorCode JobManager::addJob(JobDescription jobDesc) { auto mutexIter = muJobFinished_.find(jobDesc.getSpace()); if (mutexIter == muJobFinished_.end()) { mutexIter = @@ -569,7 +570,6 @@ nebula::cpp2::ErrorCode JobManager::addJob(JobDescription jobDesc, AdminClient* } return rc; } - adminClient_ = client; return nebula::cpp2::ErrorCode::SUCCEEDED; } @@ -815,7 +815,7 @@ nebula::cpp2::ErrorCode JobManager::stopJob(GraphSpaceID spaceId, JobID jobId) { } ErrorOr JobManager::recoverJob( - GraphSpaceID spaceId, AdminClient* client, const std::vector& jobIds) { + GraphSpaceID spaceId, const std::vector& jobIds) { auto muIter = muJobFinished_.find(spaceId); if (muIter == muJobFinished_.end()) { muIter = muJobFinished_.emplace(spaceId, std::make_unique()).first; @@ -823,7 +823,6 @@ ErrorOr JobManager::recoverJob( std::lock_guard lk(*(muIter->second)); std::set jobIdSet(jobIds.begin(), jobIds.end()); std::map allJobs; - adminClient_ = client; std::unique_ptr iter; auto jobPre = MetaKeyUtils::jobPrefix(spaceId); auto retCode = kvStore_->prefix(kDefaultSpaceId, kDefaultPartId, jobPre, &iter); @@ -1012,6 +1011,10 @@ ErrorOr JobManager::checkTypeJobRunning( auto status = jobDesc.getStatus(); if (status == cpp2::JobStatus::QUEUE || status == cpp2::JobStatus::RUNNING) { + LOG(INFO) << folly::sformat("The {} job is {} in space {}", + apache::thrift::util::enumNameSafe(jType), + apache::thrift::util::enumNameSafe(status), + spaceId); return true; } } diff --git a/src/meta/processors/job/JobManager.h b/src/meta/processors/job/JobManager.h index be85d9cecc9..25773406360 100644 --- a/src/meta/processors/job/JobManager.h +++ b/src/meta/processors/job/JobManager.h @@ -29,6 +29,7 @@ extern stats::CounterId kNumRunningJobs; class JobManager : public boost::noncopyable, public nebula::cpp::NonMovable { friend class JobManagerTest; friend class GetStatsTest; + friend class CreateBackupProcessorTest; FRIEND_TEST(JobManagerTest, AddJob); FRIEND_TEST(JobManagerTest, StatsJob); FRIEND_TEST(JobManagerTest, JobPriority); @@ -74,11 +75,11 @@ class JobManager : public boost::noncopyable, public nebula::cpp::NonMovable { /** * @brief Init task queue, kvStore and schedule thread - * * @param store + * @param adminClient * @return true if the init is successful */ - bool init(nebula::kvstore::KVStore* store); + bool init(nebula::kvstore::KVStore* store, AdminClient* adminClient); /** * @brief Called when receive a system signal @@ -89,10 +90,9 @@ class JobManager : public boost::noncopyable, public nebula::cpp::NonMovable { * @brief Load job description from kvstore * * @param jobDesc - * @param client * @return nebula::cpp2::ErrorCode */ - nebula::cpp2::ErrorCode addJob(JobDescription jobDesc, AdminClient* client); + nebula::cpp2::ErrorCode addJob(JobDescription jobDesc); /** * @brief The same job in inFlightJobs_. @@ -157,7 +157,6 @@ class JobManager : public boost::noncopyable, public nebula::cpp::NonMovable { * @return Return error/recovered job num */ ErrorOr recoverJob(GraphSpaceID spaceId, - AdminClient* client, const std::vector& jobIds = {}); /** diff --git a/src/meta/processors/schema/SchemaUtil.cpp b/src/meta/processors/schema/SchemaUtil.cpp index 526efc1fe34..c482b1a5fd8 100644 --- a/src/meta/processors/schema/SchemaUtil.cpp +++ b/src/meta/processors/schema/SchemaUtil.cpp @@ -187,7 +187,7 @@ bool SchemaUtil::checkType(std::vector& columns) { return false; // no default so compiler will warning when lack } // switch - DLOG(FATAL) << "Unknown property type " << static_cast(column.get_type().get_type()); + LOG(DFATAL) << "Unknown property type " << static_cast(column.get_type().get_type()); return false; } diff --git a/src/meta/test/CreateBackupProcessorTest.cpp b/src/meta/test/CreateBackupProcessorTest.cpp index a090647fa26..4291f19c834 100644 --- a/src/meta/test/CreateBackupProcessorTest.cpp +++ b/src/meta/test/CreateBackupProcessorTest.cpp @@ -9,9 +9,10 @@ #include "common/fs/TempDir.h" #include "common/utils/Utils.h" #include "meta/processors/admin/CreateBackupProcessor.h" -#include "meta/processors/job/JobManager.h" #include "meta/test/TestUtils.h" +class JobManager; + namespace nebula { namespace meta { @@ -44,17 +45,18 @@ class TestStorageService : public storage::cpp2::StorageAdminServiceSvIf { folly::Promise pro; auto f = pro.getFuture(); storage::cpp2::CreateCPResp resp; - storage::cpp2::ResponseCommon result; - std::unordered_map info; - nebula::cpp2::LogInfo logInfo; - logInfo.log_id_ref() = logId; - logInfo.term_id_ref() = termId; - info.emplace(1, std::move(logInfo)); - nebula::cpp2::CheckpointInfo cpInfo; - cpInfo.data_path_ref() = "snapshot_path"; - cpInfo.parts_ref() = std::move(info); - cpInfo.space_id_ref() = req.get_space_ids()[0]; - resp.info_ref() = {cpInfo}; + for (auto spaceId : req.get_space_ids()) { + std::unordered_map info; + nebula::cpp2::LogInfo logInfo; + logInfo.log_id_ref() = logId; + logInfo.term_id_ref() = termId; + info.emplace(1, std::move(logInfo)); + nebula::cpp2::CheckpointInfo cpInfo; + cpInfo.data_path_ref() = "snapshot_path"; + cpInfo.parts_ref() = std::move(info); + cpInfo.space_id_ref() = spaceId; + resp.info_ref()->emplace_back(std::move(cpInfo)); + } resp.code_ref() = nebula::cpp2::ErrorCode::SUCCEEDED; pro.setValue(std::move(resp)); return f; @@ -83,106 +85,110 @@ class TestStorageService : public storage::cpp2::StorageAdminServiceSvIf { } }; -TEST(ProcessorTest, CreateBackupTest) { - auto rpcServer = std::make_unique(); - auto handler = std::make_shared(); - rpcServer->start("storage-admin", 0, handler); - LOG(INFO) << "Start storage server on " << rpcServer->port_; - - LOG(INFO) << "Now test interfaces with retry to leader!"; - fs::TempDir rootPath("/tmp/create_backup_test.XXXXXX"); - std::unique_ptr kv(MockCluster::initMetaKV(rootPath.path())); - - // register machines - std::vector machines; - std::string localIp("127.0.0.1"); - machines.emplace_back(nebula::MetaKeyUtils::machineKey(localIp, rpcServer->port_), ""); - folly::Baton b; - kv->asyncMultiPut(kDefaultSpaceId, kDefaultPartId, std::move(machines), [&](auto) { b.post(); }); - b.wait(); - - // register active hosts, same with heartbeat - auto now = time::WallClock::fastNowInMilliSec(); - HostAddr host(localIp, rpcServer->port_); - std::vector time; - ActiveHostsMan::updateHostInfo( - kv.get(), host, HostInfo(now, meta::cpp2::HostRole::STORAGE, ""), time); - TestUtils::doPut(kv.get(), time); - - // mock space 1: test_space - bool ret = false; - cpp2::SpaceDesc properties; - GraphSpaceID id = 1; - properties.space_name_ref() = "test_space"; - properties.partition_num_ref() = 1; - properties.replica_factor_ref() = 1; - auto spaceVal = MetaKeyUtils::spaceVal(properties); - std::vector data; - data.emplace_back(MetaKeyUtils::indexSpaceKey("test_space"), - std::string(reinterpret_cast(&id), sizeof(GraphSpaceID))); - data.emplace_back(MetaKeyUtils::spaceKey(id), MetaKeyUtils::spaceVal(properties)); - - // mock space 2: test_space2 - cpp2::SpaceDesc properties2; - GraphSpaceID id2 = 2; - properties2.space_name_ref() = "test_space2"; - properties2.partition_num_ref() = 1; - properties2.replica_factor_ref() = 1; - spaceVal = MetaKeyUtils::spaceVal(properties2); - data.emplace_back(MetaKeyUtils::indexSpaceKey("test_space2"), - std::string(reinterpret_cast(&id2), sizeof(GraphSpaceID))); - data.emplace_back(MetaKeyUtils::spaceKey(id2), MetaKeyUtils::spaceVal(properties2)); - - // mock index data - std::string indexName = "test_space_index"; - int32_t tagIndex = 2; - cpp2::IndexItem item; - item.index_id_ref() = tagIndex; - item.index_name_ref() = indexName; - nebula::cpp2::SchemaID schemaID; - TagID tagID = 3; - std::string tagName = "test_space_tag1"; - schemaID.tag_id_ref() = tagID; - item.schema_id_ref() = schemaID; - item.schema_name_ref() = tagName; - data.emplace_back(MetaKeyUtils::indexIndexKey(id, indexName), - std::string(reinterpret_cast(&tagIndex), sizeof(IndexID))); - data.emplace_back(MetaKeyUtils::indexKey(id, tagIndex), MetaKeyUtils::indexVal(item)); - - // mock partition data - std::vector allHosts; - HostAddr storageHost = Utils::getStoreAddrFromAdminAddr(host); - allHosts.emplace_back(storageHost); - for (auto partId = 1; partId <= 1; partId++) { - std::vector hosts2; - size_t idx = partId; - for (int32_t i = 0; i < 1; i++, idx++) { - hosts2.emplace_back(allHosts[idx % 1]); +class CreateBackupProcessorTest : public ::testing::Test { + protected: + static void initStorage() { + storaged_ = std::make_unique(); + auto adminHandler = std::make_shared(); + storaged_->start("storage-admin", 0, adminHandler); + LOG(INFO) << "Start storage server on " << storaged_->port_; + } + + static void registerStorage() { + std::vector machines; + machines.emplace_back(nebula::MetaKeyUtils::machineKey(localIp_, storaged_->port_), ""); + TestUtils::doPut(metaKv_.get(), machines); + } + + static void activeStorage() { + HostAddr host(localIp_, storaged_->port_); + std::vector time; + auto now = time::WallClock::fastNowInMilliSec(); + ActiveHostsMan::updateHostInfo( + metaKv_.get(), host, HostInfo(now, meta::cpp2::HostRole::STORAGE, ""), time); + TestUtils::doPut(metaKv_.get(), time); + } + + static void mockSpace(GraphSpaceID id, const std::string& name) { + cpp2::SpaceDesc properties; + properties.space_name_ref() = name; + properties.partition_num_ref() = 1; + properties.replica_factor_ref() = 1; + auto spaceVal = MetaKeyUtils::spaceVal(properties); + std::vector data; + data.emplace_back(MetaKeyUtils::indexSpaceKey(name), + std::string(reinterpret_cast(&id), sizeof(GraphSpaceID))); + data.emplace_back(MetaKeyUtils::spaceKey(id), MetaKeyUtils::spaceVal(properties)); + TestUtils::doPut(metaKv_.get(), data); + } + + static void mockPartition(std::vector graphIds) { + HostAddr host(localIp_, storaged_->port_); + HostAddr storageHost = Utils::getStoreAddrFromAdminAddr(host); + std::vector allHosts{storageHost}; + std::vector data; + for (auto partId = 1; partId <= 1; partId++) { + std::vector hosts; + size_t idx = partId; + for (int32_t i = 0; i < 1; i++, idx++) { + hosts.emplace_back(allHosts[idx % 1]); + } + + for (auto graphId : graphIds) { + data.emplace_back(MetaKeyUtils::partKey(graphId, partId), MetaKeyUtils::partVal(hosts)); + } } - data.emplace_back(MetaKeyUtils::partKey(id, partId), MetaKeyUtils::partVal(hosts2)); - data.emplace_back(MetaKeyUtils::partKey(id2, partId), MetaKeyUtils::partVal(hosts2)); + TestUtils::doPut(metaKv_.get(), data); } - folly::Baton baton; - kv->asyncMultiPut( - kDefaultSpaceId, kDefaultPartId, std::move(data), [&](nebula::cpp2::ErrorCode code) { - ret = (code == nebula::cpp2::ErrorCode::SUCCEEDED); - baton.post(); - }); - baton.wait(); + static void mockIndex(GraphSpaceID spaceId, + TagID tagId, + IndexID indexId, + const std::string& tagName, + const std::string& indexName) { + cpp2::IndexItem item; + item.index_id_ref() = indexId; + item.index_name_ref() = indexName; + nebula::cpp2::SchemaID schemaId; + schemaId.tag_id_ref() = tagId; + item.schema_id_ref() = schemaId; + item.schema_name_ref() = tagName; - auto client = std::make_unique(kv.get()); - { - cpp2::CreateBackupReq req; - std::vector spaces = {"test_space"}; - req.spaces_ref() = std::move(spaces); - JobManager* jobMgr = JobManager::getInstance(); - ASSERT_TRUE(jobMgr->init(kv.get())); - auto* processor = CreateBackupProcessor::instance(kv.get(), client.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - LOG(INFO) << folly::to(resp.get_code()); + std::vector data; + data.emplace_back(MetaKeyUtils::indexIndexKey(spaceId, indexName), + std::string(reinterpret_cast(&indexId), sizeof(IndexID))); + data.emplace_back(MetaKeyUtils::indexKey(spaceId, indexId), MetaKeyUtils::indexVal(item)); + TestUtils::doPut(metaKv_.get(), data); + } + + static void initMeta() { + metaPath_ = std::make_unique("/tmp/create_backup_test.XXXXXX"); + metaKv_ = MockCluster::initMetaKV(metaPath_->path()); + client_ = std::make_unique(metaKv_.get()); + jobMgr_ = JobManager::getInstance(); + ASSERT_TRUE(jobMgr_->init(metaKv_.get(), client_.get())); + // TODO(spw): prevent the mock job really been scheduled. Mock an JobManager is a better way. + jobMgr_->status_.store(JobManager::JbmgrStatus::STOPPED, std::memory_order_release); + + // register storageds + registerStorage(); + activeStorage(); + + // mock two spaces and partition + GraphSpaceID spaceId1 = 1; + GraphSpaceID spaceId2 = 2; + mockSpace(spaceId1, "test_space1"); + mockSpace(spaceId2, "test_space2"); + spacesIds_.emplace_back(spaceId1); + spacesIds_.emplace_back(spaceId2); + mockPartition(spacesIds_); + + // mock index data + mockIndex(spaceId1, 10, 11, "tag_space_tag1", "test_space_index1"); + mockIndex(spaceId2, 20, 21, "tag_space_tag2", "test_space_index2"); + } + + static void verify(meta::cpp2::CreateBackupResp& resp) { ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); auto meta = resp.get_meta(); @@ -204,7 +210,7 @@ TEST(ProcessorTest, CreateBackupTest) { }); ASSERT_EQ(it, metaFiles.cend()); - ASSERT_EQ(1, meta.get_space_backups().size()); + ASSERT_EQ(2, meta.get_space_backups().size()); for (auto s : meta.get_space_backups()) { auto spaceBackup = s.second; ASSERT_EQ(1, spaceBackup.get_host_backups().size()); @@ -213,7 +219,7 @@ TEST(ProcessorTest, CreateBackupTest) { auto checkInfo = spaceBackup.get_host_backups()[0].get_checkpoints()[0]; ASSERT_EQ("snapshot_path", checkInfo.get_data_path()); ASSERT_TRUE(meta.get_full()); - ASSERT_FALSE(meta.get_all_spaces()); + ASSERT_TRUE(meta.get_all_spaces()); auto parts = checkInfo.get_parts(); ASSERT_EQ(parts.size(), 1); for (auto p : parts) { @@ -223,7 +229,67 @@ TEST(ProcessorTest, CreateBackupTest) { ASSERT_EQ(logInfo.get_term_id(), termId); } } - jobMgr->shutDown(); + } + + protected: + static void SetUpTestCase() { + localIp_ = "127.0.0.1"; + jobMgr_ = nullptr; + initStorage(); + initMeta(); + } + + static void TearDownTestCase() { + jobMgr_->shutDown(); + jobMgr_->bgThread_.join(); + + client_.reset(nullptr); + metaKv_.reset(nullptr); + metaPath_.reset(nullptr); + storaged_.reset(nullptr); + } + + protected: + inline static std::string localIp_; + inline static std::vector spacesIds_; + + inline static std::unique_ptr storaged_; + inline static std::unique_ptr metaPath_; + inline static std::unique_ptr metaKv_; + inline static std::unique_ptr client_; + inline static JobManager* jobMgr_; +}; + +TEST_F(CreateBackupProcessorTest, Basic) { + cpp2::CreateBackupReq req; + auto processor = CreateBackupProcessor::instance(metaKv_.get(), client_.get()); + auto f = processor->getFuture(); + processor->process(req); + auto resp = std::move(f).get(); + verify(resp); +} + +TEST_F(CreateBackupProcessorTest, RunningJobs) { + std::vector jobTypes{cpp2::JobType::REBUILD_TAG_INDEX, + cpp2::JobType::REBUILD_EDGE_INDEX, + cpp2::JobType::COMPACT, + cpp2::JobType::INGEST, + cpp2::JobType::DATA_BALANCE, + cpp2::JobType::LEADER_BALANCE}; + JobID jobId = 1; + for (auto jobType : jobTypes) { + auto currJobId = jobId++; + JobDescription job(spacesIds_.front(), currJobId, jobType); + jobMgr_->addJob(job); + + cpp2::CreateBackupReq req; + auto processor = CreateBackupProcessor::instance(metaKv_.get(), client_.get()); + auto f = processor->getFuture(); + processor->process(req); // will delete the processor pointer + auto resp = std::move(f).get(); + ASSERT_EQ(resp.code(), nebula::cpp2::ErrorCode::E_BACKUP_RUNNING_JOBS); + + jobMgr_->stopJob(spacesIds_.front(), currJobId); } } } // namespace meta diff --git a/src/meta/test/GetStatsTest.cpp b/src/meta/test/GetStatsTest.cpp index 2474233737e..43ded1b5fac 100644 --- a/src/meta/test/GetStatsTest.cpp +++ b/src/meta/test/GetStatsTest.cpp @@ -109,7 +109,7 @@ class GetStatsTest : public ::testing::Test { jobMgr = JobManager::getInstance(); jobMgr->status_ = JobManager::JbmgrStatus::NOT_START; - jobMgr->init(kv_.get()); + jobMgr->init(kv_.get(), nullptr); } void TearDown() override { @@ -443,7 +443,7 @@ TEST_F(GetStatsTest, MockSingleMachineTest) { // add stats job1 JobID jobId1 = 1; JobDescription job1(spaceId, jobId1, cpp2::JobType::STATS); - jobMgr->addJob(job1, &adminClient); + jobMgr->addJob(job1); JobCallBack cb1(jobMgr, spaceId, jobId1, 0, 100); JobCallBack cb2(jobMgr, spaceId, 2, 0, 200); @@ -491,7 +491,7 @@ TEST_F(GetStatsTest, MockSingleMachineTest) { // add stats job2 of same space JobID jobId2 = 2; JobDescription job2(spaceId, jobId2, cpp2::JobType::STATS); - jobMgr->addJob(job2, &adminClient); + jobMgr->addJob(job2); // check job result { @@ -560,7 +560,7 @@ TEST_F(GetStatsTest, MockMultiMachineTest) { // add stats job JobID jobId = 1; JobDescription job(spaceId, jobId, cpp2::JobType::STATS); - jobMgr->addJob(job, &adminClient); + jobMgr->addJob(job); JobCallBack cb1(jobMgr, spaceId, jobId, 0, 100); JobCallBack cb2(jobMgr, spaceId, jobId, 1, 200); diff --git a/src/meta/test/JobManagerTest.cpp b/src/meta/test/JobManagerTest.cpp index 19c63e0ac26..f1ed97d86ce 100644 --- a/src/meta/test/JobManagerTest.cpp +++ b/src/meta/test/JobManagerTest.cpp @@ -80,7 +80,7 @@ class JobManagerTest : public ::testing::Test { }); jobMgr->status_ = JobManager::JbmgrStatus::NOT_START; jobMgr->kvStore_ = kv_.get(); - jobMgr->init(kv_.get()); + jobMgr->init(kv_.get(), adminClient_.get()); return jobMgr; } @@ -106,7 +106,7 @@ TEST_F(JobManagerTest, AddJob) { GraphSpaceID spaceId = 1; JobID jobId = 2; JobDescription jobDesc(spaceId, jobId, cpp2::JobType::COMPACT); - auto rc = jobMgr->addJob(jobDesc, adminClient_.get()); + auto rc = jobMgr->addJob(jobDesc); ASSERT_EQ(rc, nebula::cpp2::ErrorCode::SUCCEEDED); // If there is a failed data balance job, a new job cannot be added @@ -135,7 +135,7 @@ TEST_F(JobManagerTest, AddRebuildTagIndexJob) { GraphSpaceID spaceId = 1; JobID jobId = 11; JobDescription jobDesc(spaceId, jobId, cpp2::JobType::REBUILD_TAG_INDEX, paras); - auto rc = jobMgr->addJob(jobDesc, adminClient_.get()); + auto rc = jobMgr->addJob(jobDesc); ASSERT_EQ(rc, nebula::cpp2::ErrorCode::SUCCEEDED); auto result = jobMgr->runJobInternal(jobDesc, JobManager::JbOp::ADD).get(); ASSERT_EQ(result, nebula::cpp2::ErrorCode::SUCCEEDED); @@ -150,7 +150,7 @@ TEST_F(JobManagerTest, AddRebuildEdgeIndexJob) { GraphSpaceID spaceId = 1; JobID jobId = 11; JobDescription jobDesc(spaceId, jobId, cpp2::JobType::REBUILD_EDGE_INDEX, paras); - auto rc = jobMgr->addJob(jobDesc, adminClient_.get()); + auto rc = jobMgr->addJob(jobDesc); ASSERT_EQ(rc, nebula::cpp2::ErrorCode::SUCCEEDED); auto result = jobMgr->runJobInternal(jobDesc, JobManager::JbOp::ADD).get(); ASSERT_EQ(result, nebula::cpp2::ErrorCode::SUCCEEDED); @@ -213,7 +213,7 @@ TEST_F(JobManagerTest, StatsJob) { GraphSpaceID spaceId = 1; JobID jobId = 12; JobDescription jobDesc(spaceId, jobId, cpp2::JobType::STATS); - auto rc = jobMgr->addJob(jobDesc, adminClient_.get()); + auto rc = jobMgr->addJob(jobDesc); ASSERT_EQ(rc, nebula::cpp2::ErrorCode::SUCCEEDED); auto result = jobMgr->runJobInternal(jobDesc, JobManager::JbOp::ADD).get(); ASSERT_EQ(result, nebula::cpp2::ErrorCode::SUCCEEDED); @@ -247,18 +247,18 @@ TEST_F(JobManagerTest, JobPriority) { GraphSpaceID spaceId = 1; JobID jobId1 = 13; JobDescription jobDesc1(spaceId, jobId1, cpp2::JobType::COMPACT); - auto rc1 = jobMgr->addJob(jobDesc1, adminClient_.get()); + auto rc1 = jobMgr->addJob(jobDesc1); ASSERT_EQ(rc1, nebula::cpp2::ErrorCode::SUCCEEDED); JobID jobId2 = 14; JobDescription jobDesc2(spaceId, jobId2, cpp2::JobType::LEADER_BALANCE); - auto rc2 = jobMgr->addJob(jobDesc2, adminClient_.get()); + auto rc2 = jobMgr->addJob(jobDesc2); ASSERT_EQ(rc2, nebula::cpp2::ErrorCode::SUCCEEDED); GraphSpaceID spaceId2 = 2; JobID jobId3 = 15; JobDescription jobDesc3(spaceId2, jobId3, cpp2::JobType::STATS); - auto rc3 = jobMgr->addJob(jobDesc3, adminClient_.get()); + auto rc3 = jobMgr->addJob(jobDesc3); ASSERT_EQ(rc3, nebula::cpp2::ErrorCode::SUCCEEDED); ASSERT_EQ(3, jobMgr->jobSize()); @@ -307,12 +307,12 @@ TEST_F(JobManagerTest, JobDeduplication) { GraphSpaceID spaceId = 1; JobID jobId1 = 15; JobDescription jobDesc1(spaceId, jobId1, cpp2::JobType::COMPACT); - auto rc1 = jobMgr->addJob(jobDesc1, adminClient_.get()); + auto rc1 = jobMgr->addJob(jobDesc1); ASSERT_EQ(rc1, nebula::cpp2::ErrorCode::SUCCEEDED); JobID jobId2 = 16; JobDescription jobDesc2(spaceId, jobId2, cpp2::JobType::LEADER_BALANCE); - auto rc2 = jobMgr->addJob(jobDesc2, adminClient_.get()); + auto rc2 = jobMgr->addJob(jobDesc2); ASSERT_EQ(rc2, nebula::cpp2::ErrorCode::SUCCEEDED); ASSERT_EQ(2, jobMgr->jobSize()); @@ -323,7 +323,7 @@ TEST_F(JobManagerTest, JobDeduplication) { auto jobExist = jobMgr->checkOnRunningJobExist(spaceId, jobDesc3.getJobType(), jobDesc3.getParas(), jId3); if (!jobExist) { - auto rc3 = jobMgr->addJob(jobDesc3, adminClient_.get()); + auto rc3 = jobMgr->addJob(jobDesc3); ASSERT_EQ(rc3, nebula::cpp2::ErrorCode::SUCCEEDED); } @@ -333,7 +333,7 @@ TEST_F(JobManagerTest, JobDeduplication) { jobExist = jobMgr->checkOnRunningJobExist(spaceId, jobDesc4.getJobType(), jobDesc4.getParas(), jId4); if (!jobExist) { - auto rc4 = jobMgr->addJob(jobDesc4, adminClient_.get()); + auto rc4 = jobMgr->addJob(jobDesc4); ASSERT_NE(rc4, nebula::cpp2::ErrorCode::SUCCEEDED); } @@ -360,7 +360,7 @@ TEST_F(JobManagerTest, LoadJobDescription) { JobDescription jobDesc1(spaceId, jobId1, cpp2::JobType::COMPACT); jobDesc1.setStatus(cpp2::JobStatus::RUNNING); jobDesc1.setStatus(cpp2::JobStatus::FINISHED); - auto rc = jobMgr->addJob(jobDesc1, adminClient_.get()); + auto rc = jobMgr->addJob(jobDesc1); ASSERT_EQ(rc, nebula::cpp2::ErrorCode::SUCCEEDED); ASSERT_EQ(jobDesc1.getSpace(), 1); ASSERT_EQ(jobDesc1.getJobId(), 1); @@ -387,13 +387,13 @@ TEST_F(JobManagerTest, ShowJobs) { JobDescription jobDesc1(spaceId, jobId1, cpp2::JobType::COMPACT); jobDesc1.setStatus(cpp2::JobStatus::RUNNING); jobDesc1.setStatus(cpp2::JobStatus::FINISHED); - jobMgr->addJob(jobDesc1, adminClient_.get()); + jobMgr->addJob(jobDesc1); JobID jobId2 = 2; JobDescription jobDesc2(spaceId, jobId2, cpp2::JobType::FLUSH); jobDesc2.setStatus(cpp2::JobStatus::RUNNING); jobDesc2.setStatus(cpp2::JobStatus::FAILED); - jobMgr->addJob(jobDesc2, adminClient_.get()); + jobMgr->addJob(jobDesc2); auto statusOrShowResult = jobMgr->showJobs(spaceId); LOG(INFO) << "after show jobs"; @@ -424,14 +424,14 @@ TEST_F(JobManagerTest, ShowJobsFromMultiSpace) { JobDescription jd1(spaceId1, jobId1, cpp2::JobType::COMPACT); jd1.setStatus(cpp2::JobStatus::RUNNING); jd1.setStatus(cpp2::JobStatus::FINISHED); - jobMgr->addJob(jd1, adminClient_.get()); + jobMgr->addJob(jd1); GraphSpaceID spaceId2 = 2; JobID jobId2 = 2; JobDescription jd2(spaceId2, jobId2, cpp2::JobType::FLUSH); jd2.setStatus(cpp2::JobStatus::RUNNING); jd2.setStatus(cpp2::JobStatus::FAILED); - jobMgr->addJob(jd2, adminClient_.get()); + jobMgr->addJob(jd2); auto statusOrShowResult = jobMgr->showJobs(spaceId2); LOG(INFO) << "after show jobs"; @@ -456,7 +456,7 @@ TEST_F(JobManagerTest, ShowJob) { JobDescription jd(spaceId, jobId1, cpp2::JobType::COMPACT); jd.setStatus(cpp2::JobStatus::RUNNING); jd.setStatus(cpp2::JobStatus::FINISHED); - jobMgr->addJob(jd, adminClient_.get()); + jobMgr->addJob(jd); JobID jobId2 = jd.getJobId(); int32_t task1 = 0; @@ -522,7 +522,7 @@ TEST_F(JobManagerTest, ShowJobInOtherSpace) { JobDescription jd(spaceId1, jobId1, cpp2::JobType::COMPACT); jd.setStatus(cpp2::JobStatus::RUNNING); jd.setStatus(cpp2::JobStatus::FINISHED); - jobMgr->addJob(jd, adminClient_.get()); + jobMgr->addJob(jd); JobID jobId2 = jd.getJobId(); int32_t task1 = 0; @@ -601,7 +601,7 @@ TEST_F(JobManagerTest, RecoverJob) { jd.getErrorCode()); jobMgr->save(jobKey, jobVal); } - auto nJobRecovered = jobMgr->recoverJob(spaceId, nullptr); + auto nJobRecovered = jobMgr->recoverJob(spaceId); ASSERT_EQ(nebula::value(nJobRecovered), 3); std::tuple opJobId; while (jobMgr->jobSize() != 0) { @@ -650,14 +650,14 @@ TEST_F(JobManagerTest, RecoverJob) { jd.getErrorCode()); jobMgr->save(jobKey, jobVal); } - auto nJobRecovered = jobMgr->recoverJob(spaceId, nullptr, {base + 1}); + auto nJobRecovered = jobMgr->recoverJob(spaceId, {base + 1}); ASSERT_EQ(nebula::value(nJobRecovered), 0); - nJobRecovered = jobMgr->recoverJob(spaceId, nullptr, {base + 2}); + nJobRecovered = jobMgr->recoverJob(spaceId, {base + 2}); ASSERT_EQ(nebula::value(nJobRecovered), 0); - nJobRecovered = jobMgr->recoverJob(spaceId, nullptr, {base + 3}); + nJobRecovered = jobMgr->recoverJob(spaceId, {base + 3}); ASSERT_EQ(nebula::value(nJobRecovered), 0); - nJobRecovered = jobMgr->recoverJob(spaceId, nullptr, {base + nJob + 4}); + nJobRecovered = jobMgr->recoverJob(spaceId, {base + nJob + 4}); ASSERT_EQ(nebula::value(nJobRecovered), 1); std::tuple opJobId; @@ -724,10 +724,10 @@ TEST_F(JobManagerTest, RecoverJob) { jd.getErrorCode()); jobMgr->save(jobKey, jobVal); } - auto nJobRecovered = jobMgr->recoverJob(spaceId, nullptr, {base + 1}); + auto nJobRecovered = jobMgr->recoverJob(spaceId, {base + 1}); ASSERT_EQ(nebula::value(nJobRecovered), 0); - nJobRecovered = jobMgr->recoverJob(spaceId, nullptr); + nJobRecovered = jobMgr->recoverJob(spaceId); ASSERT_EQ(nebula::value(nJobRecovered), 1); } } @@ -771,7 +771,7 @@ TEST_F(JobManagerTest, NotStoppableJob) { } JobDescription jobDesc(spaceId, jobId, type); - auto code = jobMgr->addJob(jobDesc, adminClient_.get()); + auto code = jobMgr->addJob(jobDesc); ASSERT_EQ(code, nebula::cpp2::ErrorCode::SUCCEEDED); // sleep a while to make sure the task has begun @@ -852,7 +852,7 @@ TEST_F(JobManagerTest, StoppableJob) { } JobDescription jobDesc(spaceId, jobId, type); - auto code = jobMgr->addJob(jobDesc, adminClient_.get()); + auto code = jobMgr->addJob(jobDesc); ASSERT_EQ(code, nebula::cpp2::ErrorCode::SUCCEEDED); // sleep a while to make sure the task has begun diff --git a/src/meta/upgrade/MetaDataUpgrade.cpp b/src/meta/upgrade/MetaDataUpgrade.cpp index bbdd567771b..eccf19b83ef 100644 --- a/src/meta/upgrade/MetaDataUpgrade.cpp +++ b/src/meta/upgrade/MetaDataUpgrade.cpp @@ -143,7 +143,7 @@ nebula::meta::cpp2::GeoShape MetaDataUpgrade::convertToGeoShape( case nebula::meta::v2::cpp2::GeoShape::POLYGON: return nebula::meta::cpp2::GeoShape::POLYGON; default: - LOG(FATAL) << "Unimplemented"; + LOG(FATAL) << "Invalid geo shape : " << static_cast(shape); } } diff --git a/src/parser/AdminSentences.cpp b/src/parser/AdminSentences.cpp index b72735bb18a..a7b969c6d15 100644 --- a/src/parser/AdminSentences.cpp +++ b/src/parser/AdminSentences.cpp @@ -76,7 +76,7 @@ std::string SpaceOptItem::toString() const { case OptionType::GROUP_NAME: return ""; } - DLOG(FATAL) << "Space parameter illegal"; + LOG(DFATAL) << "Space parameter illegal"; return "Unknown"; } @@ -215,8 +215,8 @@ std::string AddListenerSentence::toString() const { buf += "ELASTICSEARCH "; break; case meta::cpp2::ListenerType::UNKNOWN: - LOG(FATAL) << "Unknown listener type."; - break; + LOG(DFATAL) << "Unknown listener type."; + return "Unknown"; } buf += listeners_->toString(); return buf; @@ -231,8 +231,8 @@ std::string RemoveListenerSentence::toString() const { buf += "ELASTICSEARCH "; break; case meta::cpp2::ListenerType::UNKNOWN: - DLOG(FATAL) << "Unknown listener type."; - break; + LOG(DFATAL) << "Unknown listener type."; + return "Unknown"; } return buf; } @@ -309,7 +309,8 @@ std::string AdminJobSentence::toString() const { return str; } } - LOG(FATAL) << "Unknown job operation " << static_cast(op_); + LOG(DFATAL) << "Unknown job operation " << static_cast(op_); + return "Unknown"; } meta::cpp2::JobOp AdminJobSentence::getOp() const { @@ -320,6 +321,13 @@ meta::cpp2::JobType AdminJobSentence::getJobType() const { return type_; } +bool AdminJobSentence::needWriteSpace() const { + if (kind_ == Kind::kAdminJob) { + return type_ == meta::cpp2::JobType::DATA_BALANCE || type_ == meta::cpp2::JobType::ZONE_BALANCE; + } + return false; +} + const std::vector &AdminJobSentence::getParas() const { return paras_; } @@ -343,7 +351,8 @@ std::string ShowServiceClientsSentence::toString() const { case meta::cpp2::ExternalServiceType::ELASTICSEARCH: return "SHOW TEXT SEARCH CLIENTS"; default: - LOG(FATAL) << "Unknown service type " << static_cast(type_); + LOG(DFATAL) << "Unknown service type " << static_cast(type_); + return "Unknown"; } } @@ -355,7 +364,8 @@ std::string SignInServiceSentence::toString() const { buf += "SIGN IN TEXT SERVICE "; break; default: - LOG(FATAL) << "Unknown service type " << static_cast(type_); + LOG(DFATAL) << "Unknown service type " << static_cast(type_); + return "Unknown"; } for (auto &client : clients_->clients()) { @@ -394,7 +404,8 @@ std::string SignOutServiceSentence::toString() const { case meta::cpp2::ExternalServiceType::ELASTICSEARCH: return "SIGN OUT TEXT SERVICE"; default: - LOG(FATAL) << "Unknown service type " << static_cast(type_); + LOG(DFATAL) << "Unknown service type " << static_cast(type_); + return "Unknown"; } } diff --git a/src/parser/AdminSentences.h b/src/parser/AdminSentences.h index 23b7a8e0230..57e0c45872a 100644 --- a/src/parser/AdminSentences.h +++ b/src/parser/AdminSentences.h @@ -667,6 +667,8 @@ class AdminJobSentence final : public Sentence { meta::cpp2::JobType getJobType() const; const std::vector& getParas() const; + bool needWriteSpace() const; + private: meta::cpp2::JobOp op_; meta::cpp2::JobType type_; diff --git a/src/parser/MaintainSentences.cpp b/src/parser/MaintainSentences.cpp index 308c3736914..ea881b09e70 100644 --- a/src/parser/MaintainSentences.cpp +++ b/src/parser/MaintainSentences.cpp @@ -25,7 +25,7 @@ std::string SchemaPropItem::toString() const { case COMMENT: return folly::stringPrintf("comment = \"%s\"", std::get(propValue_).c_str()); } - DLOG(FATAL) << "Schema property type illegal"; + LOG(DFATAL) << "Schema property type illegal"; return "Unknown"; } @@ -239,7 +239,7 @@ std::string IndexParamItem::toString() const { case S2_MAX_CELLS: return folly::stringPrintf("s2_max_cells = \"%ld\"", paramValue_.getInt()); } - DLOG(FATAL) << "Index param type illegal"; + LOG(DFATAL) << "Index param type illegal"; return "Unknown"; } diff --git a/src/parser/TraverseSentences.cpp b/src/parser/TraverseSentences.cpp index af1fd83ec47..a773e5e9d82 100644 --- a/src/parser/TraverseSentences.cpp +++ b/src/parser/TraverseSentences.cpp @@ -124,7 +124,8 @@ std::string OrderFactor::toString() const { case DESCEND: return folly::stringPrintf("%s DESC,", expr_->toString().c_str()); default: - LOG(FATAL) << "Unknown Order Type: " << orderType_; + LOG(DFATAL) << "Unknown Order Type: " << orderType_; + return "Unknown"; } } diff --git a/src/storage/BaseProcessor.h b/src/storage/BaseProcessor.h index 612d4235ddf..93cf0fa300b 100644 --- a/src/storage/BaseProcessor.h +++ b/src/storage/BaseProcessor.h @@ -70,6 +70,8 @@ class BaseProcessor { return nebula::cpp2::ErrorCode::E_SPACE_NOT_FOUND; } isIntId_ = (vIdType.value() == nebula::cpp2::PropertyType::INT64); + DCHECK(vIdType.value() == nebula::cpp2::PropertyType::INT64 || + vIdType.value() == nebula::cpp2::PropertyType::FIXED_STRING); return nebula::cpp2::ErrorCode::SUCCEEDED; } diff --git a/src/storage/GraphStorageServiceHandler.cpp b/src/storage/GraphStorageServiceHandler.cpp index 7d10fcf6fe1..b246c1624a3 100644 --- a/src/storage/GraphStorageServiceHandler.cpp +++ b/src/storage/GraphStorageServiceHandler.cpp @@ -158,9 +158,17 @@ folly::Future GraphStorageServiceHandler::future_scanEdge( folly::Future GraphStorageServiceHandler::future_getUUID( const cpp2::GetUUIDReq&) { - LOG(FATAL) << "Unsupported in version 2.0"; - cpp2::GetUUIDResp ret; - return ret; + LOG(DFATAL) << "Unsupported in version 2.0"; + + cpp2::GetUUIDResp resp; + cpp2::ResponseCommon result; + std::vector partRetCode; + cpp2::PartitionResult thriftRet; + thriftRet.code_ref() = nebula::cpp2::ErrorCode::E_UNSUPPORTED; + partRetCode.emplace_back(std::move(thriftRet)); + result.failed_parts_ref() = partRetCode; + resp.result_ref() = result; + return resp; } folly::Future GraphStorageServiceHandler::future_chainAddEdges( diff --git a/src/storage/StorageServer.cpp b/src/storage/StorageServer.cpp index 21bbd7e8423..d8a12e96240 100644 --- a/src/storage/StorageServer.cpp +++ b/src/storage/StorageServer.cpp @@ -85,9 +85,11 @@ std::unique_ptr StorageServer::getStoreInstance() { } return nbStore; } else if (FLAGS_store_type == "hbase") { - LOG(FATAL) << "HBase store has not been implemented"; + LOG(DFATAL) << "HBase store has not been implemented"; + return nullptr; } else { - LOG(FATAL) << "Unknown store type \"" << FLAGS_store_type << "\""; + LOG(DFATAL) << "Unknown store type \"" << FLAGS_store_type << "\""; + return nullptr; } return nullptr; } @@ -139,7 +141,8 @@ int32_t StorageServer::getAdminStoreSeqId() { newVal.append(reinterpret_cast(&curSeqId), sizeof(int32_t)); auto ret = env_->adminStore_->put(key, newVal); if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(FATAL) << "Write put in admin-storage seq id " << curSeqId << " failed."; + LOG(DFATAL) << "Write put in admin-storage seq id " << curSeqId << " failed."; + return -1; } return curSeqId; } @@ -259,6 +262,11 @@ bool StorageServer::start() { env_->edgesML_ = std::make_unique(); env_->adminStore_ = getAdminStoreInstance(); env_->adminSeqId_ = getAdminStoreSeqId(); + if (env_->adminSeqId_ < 0) { + LOG(ERROR) << "Get admin store seq id failed!"; + return false; + } + taskMgr_ = AdminTaskManager::instance(env_.get()); if (!taskMgr_->init()) { LOG(ERROR) << "Init task manager failed!"; diff --git a/src/storage/context/StorageExpressionContext.h b/src/storage/context/StorageExpressionContext.h index cb6ccb2e3d4..a51db60105c 100644 --- a/src/storage/context/StorageExpressionContext.h +++ b/src/storage/context/StorageExpressionContext.h @@ -89,7 +89,7 @@ class StorageExpressionContext final : public ExpressionContext { // Get index of property in variable tuple StatusOr getVarPropIndex(const std::string&, const std::string&) const override { - DLOG(FATAL) << "Unimplemented"; + LOG(DFATAL) << "Unimplemented"; return Status::Error("Unimplemented"); } @@ -114,7 +114,7 @@ class StorageExpressionContext final : public ExpressionContext { // Get index of property in input tuple StatusOr getInputPropIndex(const std::string&) const override { - DLOG(FATAL) << "Unimplemented"; + LOG(DFATAL) << "Unimplemented"; return Status::Error("Unimplemented"); } @@ -192,7 +192,7 @@ class StorageExpressionContext final : public ExpressionContext { */ Value getVertex(const std::string& name = "") const override { UNUSED(name); - LOG(FATAL) << "Unimplemented"; + LOG(DFATAL) << "Unimplemented"; return Value::kNullBadData; } @@ -202,7 +202,7 @@ class StorageExpressionContext final : public ExpressionContext { * @return Value */ Value getEdge() const override { - LOG(FATAL) << "Unimplemented"; + LOG(DFATAL) << "Unimplemented"; return Value::kNullBadData; } diff --git a/src/storage/exec/IndexExprContext.h b/src/storage/exec/IndexExprContext.h index 6dbe7539f9f..a96b231c261 100644 --- a/src/storage/exec/IndexExprContext.h +++ b/src/storage/exec/IndexExprContext.h @@ -63,7 +63,7 @@ class IndexExprContext : public ExpressionContext { StatusOr getVarPropIndex(const std::string &var, const std::string &prop) const override { UNUSED(var), UNUSED(prop); - DLOG(FATAL) << "Unimplemented"; + LOG(DFATAL) << "Unimplemented"; return Status::Error("Unimplemented"); } Value getSrcProp(const std::string &tag, const std::string &prop) const override { @@ -80,7 +80,7 @@ class IndexExprContext : public ExpressionContext { } StatusOr getInputPropIndex(const std::string &prop) const override { UNUSED(prop); - DLOG(FATAL) << "Unimplemented"; + LOG(DFATAL) << "Unimplemented"; return Status::Error("Unimplemented"); } Value getVertex(const std::string &) const override { diff --git a/src/storage/exec/IndexScanNode.cpp b/src/storage/exec/IndexScanNode.cpp index 93e8251aa19..9e75d184cd4 100644 --- a/src/storage/exec/IndexScanNode.cpp +++ b/src/storage/exec/IndexScanNode.cpp @@ -389,10 +389,15 @@ ::nebula::cpp2::ErrorCode IndexScanNode::init(InitContext& ctx) { ctx.retColMap[ctx.returnColumns[i]] = i; } colPosMap_ = ctx.retColMap; - // Analyze whether the scan needs to access base data. + // Analyze whether the scan needs to access base data. We check it by if requiredColumns is subset + // of index fields. In other words, if scan node is required to return property that index does + // not contain, we need to access base data. // TODO(hs.zhang): The performance is better to judge based on whether the string is truncated auto tmp = ctx.requiredColumns; for (auto& field : index_->get_fields()) { + // TODO(doodle): Both STRING and FIXED_STRING properties in tag/edge will be transformed into + // FIXED_STRING in ColumnDef of IndexItem. As for FIXED_STRING in tag/edge property, we don't + // need to access base data actually. if (field.get_type().get_type() == ::nebula::cpp2::PropertyType::FIXED_STRING) { continue; } diff --git a/src/storage/exec/MultiTagNode.h b/src/storage/exec/MultiTagNode.h index 39a60e74aaa..50698469994 100644 --- a/src/storage/exec/MultiTagNode.h +++ b/src/storage/exec/MultiTagNode.h @@ -95,17 +95,17 @@ class MultiTagNode : public IterateNode { } folly::StringPiece key() const override { - LOG(FATAL) << "not allowed to do this"; + LOG(DFATAL) << "not allowed to do this"; return ""; } folly::StringPiece val() const override { - LOG(FATAL) << "not allowed to do this"; + LOG(DFATAL) << "not allowed to do this"; return ""; } RowReader* reader() const override { - LOG(FATAL) << "not allowed to do this"; + LOG(DFATAL) << "not allowed to do this"; return nullptr; } diff --git a/src/storage/exec/QueryUtils.h b/src/storage/exec/QueryUtils.h index 561f538a764..42481acf8d4 100644 --- a/src/storage/exec/QueryUtils.h +++ b/src/storage/exec/QueryUtils.h @@ -163,7 +163,7 @@ class QueryUtils final { } } default: - LOG(FATAL) << "Should not read here"; + LOG(DFATAL) << "Should not read here"; } return Status::Error(folly::stringPrintf("Invalid property %s", prop.name_.c_str())); } @@ -191,7 +191,7 @@ class QueryUtils final { return tag; } default: - LOG(FATAL) << "Should not read here"; + LOG(DFATAL) << "Should not read here"; } return Status::Error(folly::stringPrintf("Invalid property %s", prop.name_.c_str())); } diff --git a/src/storage/exec/StorageIterator.h b/src/storage/exec/StorageIterator.h index 8f384d1c430..e26833b27a4 100644 --- a/src/storage/exec/StorageIterator.h +++ b/src/storage/exec/StorageIterator.h @@ -173,7 +173,8 @@ class SingleEdgeKeyIterator : public SingleEdgeIterator { } RowReader* reader() const override { - LOG(FATAL) << "This iterator should not read value"; + LOG(DFATAL) << "This iterator should not read value"; + return nullptr; } }; diff --git a/src/storage/test/IndexTest.cpp b/src/storage/test/IndexTest.cpp index 6e794c8a848..823637220ab 100644 --- a/src/storage/test/IndexTest.cpp +++ b/src/storage/test/IndexTest.cpp @@ -65,10 +65,11 @@ using std::string_literals::operator""s; * | | Float | NoTruncate | with each type of Value | | * | | Bool | INCLUDE_BEGIN | | | * | | String | INCLUDE_END | | | - * | | Time | EXCLUDE_BEGIN | | | - * | | Date | EXCLUDE_END | | | - * | | DateTime | POSITIVE_INF | | | - * | | Compound | NEGATIVE_INF | | | + * | | FixString | EXCLUDE_BEGIN | | | + * | | Time | EXCLUDE_END | | | + * | | Date | POSITIVE_INF | | | + * | | DateTime | NEGATIVE_INF | | | + * | | Compound | | | | * | | Nullable | | | | * | | Geography | | | | * └────────────┴───────────┴───────────────┴─────────────────────────┴─────────┘ @@ -172,13 +173,15 @@ class IndexScanTest : public ::testing::Test { auto value = writer.moveEncodedStr(); CHECK(ret[0].insert({key, value}).second); RowReaderWrapper reader(schema.get(), folly::StringPiece(value), schemaVer); + auto ttlProperty = CommonUtils::ttlValue(schema.get(), reader.get()); + auto ttlValue = ttlProperty.ok() ? IndexKeyUtils::indexVal(ttlProperty.value()) : ""; for (size_t j = 0; j < indices.size(); j++) { auto& index = indices[j]; auto indexValue = IndexKeyUtils::collectIndexValues(&reader, index.get()).value(); auto indexKeys = IndexKeyUtils::vertexIndexKeys( 8, 0, index->get_index_id(), std::to_string(i), std::move(indexValue)); for (auto& indexKey : indexKeys) { - CHECK(ret[j + 1].insert({indexKey, ""}).second); + CHECK(ret[j + 1].insert({indexKey, ttlValue}).second); } } } @@ -344,15 +347,14 @@ class IndexScanStringType : public IndexScanTest { } result.emplace_back(std::move(res).row()); } - std::vector result2(result.size()); + std::vector actual(result.size()); for (size_t j = 0; j < acquiredColumns.size(); j++) { int p = initCtx.retColMap[acquiredColumns[j]]; for (size_t i = 0; i < result.size(); i++) { - result2[i].emplace_back(result[i][p]); + actual[i].emplace_back(result[i][p]); } } - result = result2; - EXPECT_EQ(result, expect) << "Fail at case " << case_; + EXPECT_EQ(actual, expect) << "Fail at case " << case_; } std::vector expect(const std::vector& rows, @@ -1999,6 +2001,276 @@ TEST_F(IndexScanStringType, String4) { } } +TEST_F(IndexScanScalarType, FixedString) { + auto rows = R"( + fixed_string | fixed_string + aaa | aaa + aaaaa | aaaaa + aaaaaaa | aaaaaaa + abc | abc + abcd | abcd + abcde | + abcdef | abcdef + abcdefg | abcdefg + abcdf | abcdf + abd | abd + zoo | zoo + )"_row; + auto schema = R"( + a | fixed_string | 5 | false + b | fixed_string | 5 | true + )"_schema; + // Since it is a index on fixed_string property, index field length is same as length of + // fixed_string property + auto indices = R"( + TAG(t,0) + (ia,1): a(5) + (ib,2): b(5) + )"_index(schema); + auto kv = encodeTag(rows, 1, schema, indices); + auto kvstore = std::make_unique(); + for (auto& iter : kv) { + for (auto& item : iter) { + kvstore->put(item.first, item.second); + } + } + // Case 1: fixed string in [x, x] + std::vector columnHints; + { + columnHints = {makeColumnHint("a", Value("abc"))}; + checkTag(kvstore.get(), schema, indices[0], columnHints, expect(3), "case1.1"); + columnHints = {makeColumnHint("b", Value("abc"))}; + checkTag(kvstore.get(), schema, indices[1], columnHints, expect(3), "case1.2"); + columnHints = {makeColumnHint("a", Value("abcde"))}; + checkTag(kvstore.get(), schema, indices[0], columnHints, expect(5, 6, 7), "case1.3"); + columnHints = {makeColumnHint("b", Value("abcde"))}; + checkTag(kvstore.get(), schema, indices[1], columnHints, expect(6, 7), "case1.4"); + } + // Case 2: fixed string in [x, INF) + { + columnHints = {makeBeginColumnHint("a", Value("abc"))}; + checkTag( + kvstore.get(), schema, indices[0], columnHints, expect(3, 4, 5, 6, 7, 8, 9, 10), "case2.1"); + columnHints = {makeBeginColumnHint("b", Value("abc"))}; + checkTag( + kvstore.get(), schema, indices[1], columnHints, expect(3, 4, 6, 7, 8, 9, 10), "case2.2"); + columnHints = {makeBeginColumnHint("a", Value("abcde"))}; + checkTag(kvstore.get(), schema, indices[0], columnHints, expect(5, 6, 7, 8, 9, 10), "case2.3"); + columnHints = {makeBeginColumnHint("b", Value("abcde"))}; + checkTag(kvstore.get(), schema, indices[1], columnHints, expect(6, 7, 8, 9, 10), "case2.4"); + columnHints = {makeBeginColumnHint("a", Value("well"))}; + checkTag(kvstore.get(), schema, indices[0], columnHints, expect(10), "case2.5"); + columnHints = {makeBeginColumnHint("b", Value("well"))}; + checkTag(kvstore.get(), schema, indices[1], columnHints, expect(10), "case2.6"); + } + // Case 3: fixed string in (x, INF) + { + columnHints = {makeBeginColumnHint("a", Value("abc"))}; + checkTag( + kvstore.get(), schema, indices[0], columnHints, expect(4, 5, 6, 7, 8, 9, 10), "case3.1"); + columnHints = {makeBeginColumnHint("b", Value("abc"))}; + checkTag(kvstore.get(), schema, indices[1], columnHints, expect(4, 6, 7, 8, 9, 10), "case3.2"); + columnHints = {makeBeginColumnHint("a", Value("abcde"))}; + checkTag(kvstore.get(), schema, indices[0], columnHints, expect(8, 9, 10), "case3.3"); + columnHints = {makeBeginColumnHint("b", Value("abcde"))}; + checkTag(kvstore.get(), schema, indices[1], columnHints, expect(8, 9, 10), "case3.4"); + columnHints = {makeBeginColumnHint("a", Value("abd"))}; + checkTag(kvstore.get(), schema, indices[0], columnHints, expect(10), "case3.5"); + columnHints = {makeBeginColumnHint("b", Value("abd"))}; + checkTag(kvstore.get(), schema, indices[1], columnHints, expect(10), "case3.6"); + } + // Case 4: fixed string in [x, y) + { + columnHints = {makeColumnHint("a", Value("aaaaa"), Value("abcde"))}; + checkTag(kvstore.get(), schema, indices[0], columnHints, expect(1, 2, 3, 4), "case4.1"); + columnHints = {makeColumnHint("b", Value("aaaaa"), Value("abcde"))}; + checkTag(kvstore.get(), schema, indices[1], columnHints, expect(1, 2, 3, 4), "case4.2"); + columnHints = {makeColumnHint("a", Value("abc"), Value("abd"))}; + checkTag(kvstore.get(), schema, indices[0], columnHints, expect(3, 4, 5, 6, 7, 8), "case4.3"); + columnHints = {makeColumnHint("b", Value("abc"), Value("abd"))}; + checkTag(kvstore.get(), schema, indices[1], columnHints, expect(3, 4, 6, 7, 8), "case4.4"); + columnHints = {makeColumnHint("a", Value("abcde"), Value("abd"))}; + checkTag(kvstore.get(), schema, indices[0], columnHints, expect(5, 6, 7, 8), "case4.5"); + columnHints = {makeColumnHint("b", Value("abcde"), Value("abd"))}; + checkTag(kvstore.get(), schema, indices[1], columnHints, expect(6, 7, 8), "case4.6"); + } + // Case 5: fixed string in [x, y] + { + columnHints = {makeColumnHint("a", Value("aaaaa"), Value("abcde"))}; + checkTag( + kvstore.get(), schema, indices[0], columnHints, expect(1, 2, 3, 4, 5, 6, 7), "case5.1"); + columnHints = {makeColumnHint("b", Value("aaaaa"), Value("abcde"))}; + checkTag(kvstore.get(), schema, indices[1], columnHints, expect(1, 2, 3, 4, 6, 7), "case5.2"); + columnHints = {makeColumnHint("a", Value("abc"), Value("abd"))}; + checkTag( + kvstore.get(), schema, indices[0], columnHints, expect(3, 4, 5, 6, 7, 8, 9), "case5.3"); + columnHints = {makeColumnHint("b", Value("abc"), Value("abd"))}; + checkTag(kvstore.get(), schema, indices[1], columnHints, expect(3, 4, 6, 7, 8, 9), "case5.4"); + columnHints = {makeColumnHint("a", Value("abcde"), Value("abd"))}; + checkTag(kvstore.get(), schema, indices[0], columnHints, expect(5, 6, 7, 8, 9), "case5.5"); + columnHints = {makeColumnHint("b", Value("abcde"), Value("abd"))}; + checkTag(kvstore.get(), schema, indices[1], columnHints, expect(6, 7, 8, 9), "case5.6"); + } + // Case 6: fixed string in (x, y] + { + columnHints = {makeColumnHint("a", Value("aaaaa"), Value("abcde"))}; + checkTag(kvstore.get(), schema, indices[0], columnHints, expect(3, 4, 5, 6, 7), "case6.1"); + columnHints = {makeColumnHint("b", Value("aaaaa"), Value("abcde"))}; + checkTag(kvstore.get(), schema, indices[1], columnHints, expect(3, 4, 6, 7), "case6.2"); + columnHints = {makeColumnHint("a", Value("abc"), Value("abd"))}; + checkTag(kvstore.get(), schema, indices[0], columnHints, expect(4, 5, 6, 7, 8, 9), "case6.3"); + columnHints = {makeColumnHint("b", Value("abc"), Value("abd"))}; + checkTag(kvstore.get(), schema, indices[1], columnHints, expect(4, 6, 7, 8, 9), "case6.4"); + columnHints = {makeColumnHint("a", Value("abcde"), Value("abd"))}; + checkTag(kvstore.get(), schema, indices[0], columnHints, expect(8, 9), "case6.5"); + columnHints = {makeColumnHint("b", Value("abcde"), Value("abd"))}; + checkTag(kvstore.get(), schema, indices[1], columnHints, expect(8, 9), "case6.6"); + } + // Case 7: fixed string in (x, y) + { + columnHints = {makeColumnHint("a", Value("aaaaa"), Value("abcde"))}; + checkTag(kvstore.get(), schema, indices[0], columnHints, expect(3, 4), "case7.1"); + columnHints = {makeColumnHint("b", Value("aaaaa"), Value("abcde"))}; + checkTag(kvstore.get(), schema, indices[1], columnHints, expect(3, 4), "case7.2"); + columnHints = {makeColumnHint("a", Value("abc"), Value("abd"))}; + checkTag(kvstore.get(), schema, indices[0], columnHints, expect(4, 5, 6, 7, 8), "case7.3"); + columnHints = {makeColumnHint("b", Value("abc"), Value("abd"))}; + checkTag(kvstore.get(), schema, indices[1], columnHints, expect(4, 6, 7, 8), "case7.4"); + columnHints = {makeColumnHint("a", Value("abcde"), Value("abd"))}; + checkTag(kvstore.get(), schema, indices[0], columnHints, expect(8), "case7.5"); + columnHints = {makeColumnHint("b", Value("abcde"), Value("abd"))}; + checkTag(kvstore.get(), schema, indices[1], columnHints, expect(8), "case7.6"); + } + // Case 8: fixed string in (-INF, y] + { + columnHints = {makeEndColumnHint("a", Value("aaaaa"))}; + checkTag(kvstore.get(), schema, indices[0], columnHints, expect(0, 1, 2), "case8.1"); + columnHints = {makeEndColumnHint("b", Value("aaaaa"))}; + checkTag(kvstore.get(), schema, indices[1], columnHints, expect(0, 1, 2), "case8.2"); + columnHints = {makeEndColumnHint("a", Value("abcde"))}; + checkTag( + kvstore.get(), schema, indices[0], columnHints, expect(0, 1, 2, 3, 4, 5, 6, 7), "case8.3"); + columnHints = {makeEndColumnHint("b", Value("abcde"))}; + checkTag( + kvstore.get(), schema, indices[1], columnHints, expect(0, 1, 2, 3, 4, 6, 7), "case8.4"); + columnHints = {makeEndColumnHint("a", Value("abd"))}; + checkTag(kvstore.get(), + schema, + indices[0], + columnHints, + expect(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), + "case8.5"); + columnHints = {makeEndColumnHint("b", Value("abd"))}; + checkTag(kvstore.get(), + schema, + indices[1], + columnHints, + expect(0, 1, 2, 3, 4, 6, 7, 8, 9), + "case8.6"); + } + // Case 9: fixed string in (-INF, y) + { + columnHints = {makeEndColumnHint("a", Value("aaaaa"))}; + checkTag(kvstore.get(), schema, indices[0], columnHints, expect(0), "case9.1"); + columnHints = {makeEndColumnHint("b", Value("aaaaa"))}; + checkTag(kvstore.get(), schema, indices[1], columnHints, expect(0), "case9.2"); + columnHints = {makeEndColumnHint("a", Value("abcde"))}; + checkTag(kvstore.get(), schema, indices[0], columnHints, expect(0, 1, 2, 3, 4), "case9.3"); + columnHints = {makeEndColumnHint("b", Value("abcde"))}; + checkTag(kvstore.get(), schema, indices[1], columnHints, expect(0, 1, 2, 3, 4), "case9.4"); + columnHints = {makeEndColumnHint("a", Value("abd"))}; + checkTag(kvstore.get(), + schema, + indices[0], + columnHints, + expect(0, 1, 2, 3, 4, 5, 6, 7, 8), + "case9.5"); + columnHints = {makeEndColumnHint("b", Value("abd"))}; + checkTag( + kvstore.get(), schema, indices[1], columnHints, expect(0, 1, 2, 3, 4, 6, 7, 8), "case9.6"); + } +} + +TEST_F(IndexScanScalarType, CompoundFixedString) { + auto rows = R"( + fixed_string | fixed_string + aaa | aaa + abc | aaa + abcde | abcde + abcde | + abcde | abcdef + abcde | abcdefg + abcde | abcdf + abcde | abd + abd | abd + )"_row; + auto schema = R"( + a | fixed_string | 5 | false + b | fixed_string | 5 | true + )"_schema; + // Since it is a index on fixed_string property, index field length is same as length of + // fixed_string property + auto indices = R"( + TAG(t,1) + (iab,2): a(5), b(5) + )"_index(schema); + auto kv = encodeTag(rows, 1, schema, indices); + auto kvstore = std::make_unique(); + for (auto& iter : kv) { + for (auto& item : iter) { + kvstore->put(item.first, item.second); + } + } + std::vector columnHints; + // prefix + { + columnHints = {makeColumnHint("a", Value("abcde"))}; + // null is ordered at last + checkTag(kvstore.get(), schema, indices[0], columnHints, expect(2, 4, 5, 6, 7, 3), "case1.1"); + } + // prefix + prefix + { + columnHints = {makeColumnHint("a", Value("abcde")), makeColumnHint("b", Value("abcde"))}; + checkTag(kvstore.get(), schema, indices[0], columnHints, expect(2, 4, 5), "case2.1"); + columnHints = {makeColumnHint("a", Value("abcde")), makeColumnHint("b", Value("abd"))}; + checkTag(kvstore.get(), schema, indices[0], columnHints, expect(7), "case2.2"); + } + // prefix + range + { + // where a = "abcde" and b < "abd" + columnHints = {makeColumnHint("a", Value("abcde")), + makeEndColumnHint("b", Value("abd"))}; + checkTag(kvstore.get(), schema, indices[0], columnHints, expect(2, 4, 5, 6), "case3.1"); + // where a = "abcde" and b <= "abd" + columnHints = {makeColumnHint("a", Value("abcde")), makeEndColumnHint("b", Value("abd"))}; + checkTag(kvstore.get(), schema, indices[0], columnHints, expect(2, 4, 5, 6, 7), "case3.2"); + // where a = "abcde" and b > "abcde" + columnHints = {makeColumnHint("a", Value("abcde")), + makeBeginColumnHint("b", Value("abcde"))}; + checkTag(kvstore.get(), schema, indices[0], columnHints, expect(6, 7), "case3.3"); + // where a = "abcde" and b >= "abcde" + columnHints = {makeColumnHint("a", Value("abcde")), + makeBeginColumnHint("b", Value("abcde"))}; + checkTag(kvstore.get(), schema, indices[0], columnHints, expect(2, 4, 5, 6, 7), "case3.4"); + // where a = "abcde" and "abcde" < b < "abcdf" + columnHints = {makeColumnHint("a", Value("abcde")), + makeColumnHint("b", Value("abcde"), Value("abcdf"))}; + checkTag(kvstore.get(), schema, indices[0], columnHints, expect(), "case3.5"); + // where a = "abcde" and "abcde" <= b < "abcdf" + columnHints = {makeColumnHint("a", Value("abcde")), + makeColumnHint("b", Value("abcde"), Value("abcdf"))}; + checkTag(kvstore.get(), schema, indices[0], columnHints, expect(2, 4, 5), "case3.6"); + // where a = "abcde" and "abcde" < b <= "abcdf" + columnHints = {makeColumnHint("a", Value("abcde")), + makeColumnHint("b", Value("abcde"), Value("abcdf"))}; + checkTag(kvstore.get(), schema, indices[0], columnHints, expect(6), "case3.7"); + // where a = "abcde" and "abcde" <= b <= "abcdf" + columnHints = {makeColumnHint("a", Value("abcde")), + makeColumnHint("b", Value("abcde"), Value("abcdf"))}; + checkTag(kvstore.get(), schema, indices[0], columnHints, expect(2, 4, 5, 6), "case3.8"); + } +} + TEST_F(IndexScanScalarType, Nullable) { std::shared_ptr schema; auto kvstore = std::make_unique(); @@ -2137,8 +2409,42 @@ TEST_F(IndexScanScalarType, Nullable) { } } -TEST_F(IndexScanTest, TTL) { - // TODO(hs.zhang): add unittest +TEST_F(IndexScanScalarType, TTL) { + auto rows = R"( + int + 1 + 100 + + + )"_row; + auto schema = R"( + a | int | | true + )"_schema; + auto indices = R"( + TAG(t,1) + (i1,2):a + )"_index(schema); + + int64_t ttlDuration = 1; + // set a ttl property to schema + meta::cpp2::SchemaProp schemaProp; + schemaProp.ttl_duration_ref() = ttlDuration; + schemaProp.ttl_col() = "a"; + schema->setProp(std::move(schemaProp)); + + auto kv = encodeTag(rows, 1, schema, indices); + auto kvstore = std::make_unique(); + for (auto& iter : kv) { + for (auto& item : iter) { + kvstore->put(item.first, item.second); + } + } + std::vector columnHints; + columnHints = {makeBeginColumnHint("a", Value(100))}; + checkTag(kvstore.get(), schema, indices[0], columnHints, expect(2), "ttl_not_expired"); + sleep(ttlDuration + 1); + columnHints = {makeBeginColumnHint("a", Value(100))}; + checkTag(kvstore.get(), schema, indices[0], columnHints, expect(), "ttl_expired"); } TEST_F(IndexScanScalarType, Time) { diff --git a/src/storage/test/IndexTestUtil.h b/src/storage/test/IndexTestUtil.h index 11d900f74d7..8ad2c99a5e6 100644 --- a/src/storage/test/IndexTestUtil.h +++ b/src/storage/test/IndexTestUtil.h @@ -395,6 +395,8 @@ class RowParser { row.emplace_back(std::numeric_limits::quiet_NaN()); } else if (values[i] == "<-NaN>") { row.emplace_back(-std::numeric_limits::quiet_NaN()); + } else if (values[i] == "") { + row.emplace_back(Value(time::WallClock::fastNowInSec())); } else { row.emplace_back(transformMap[typeList_[i]](values[i])); } @@ -454,6 +456,7 @@ class RowParser { {"string", [](const std::string& str) { return Value(str); }}, {"float", [](const std::string& str) { return Value(folly::to(str)); }}, {"bool", [](const std::string& str) { return Value(str == "true" ? true : false); }}, + {"fixed_string", [](const std::string& str) { return Value(str); }}, {"date", [this](const std::string& str) { return Value(stringToDate(str)); }}, {"time", [this](const std::string& str) { return Value(stringToTime(str)); }}, {"datetime", [this](const std::string& str) { return Value(stringToDateTime(str)); }}, @@ -514,6 +517,7 @@ class SchemaParser { {"int", ::nebula::cpp2::PropertyType::INT64}, {"double", ::nebula::cpp2::PropertyType::DOUBLE}, {"string", ::nebula::cpp2::PropertyType::STRING}, + {"fixed_string", ::nebula::cpp2::PropertyType::FIXED_STRING}, {"bool", ::nebula::cpp2::PropertyType::BOOL}, {"date", ::nebula::cpp2::PropertyType::DATE}, {"time", ::nebula::cpp2::PropertyType::TIME}, diff --git a/src/storage/transaction/ChainProcessorFactory.cpp b/src/storage/transaction/ChainProcessorFactory.cpp index 2f25402c683..ed660d60992 100644 --- a/src/storage/transaction/ChainProcessorFactory.cpp +++ b/src/storage/transaction/ChainProcessorFactory.cpp @@ -61,7 +61,8 @@ ChainBaseProcessor* ChainProcessorFactory::makeProcessor(StorageEnv* env, break; } case ResumeType::UNKNOWN: { - LOG(FATAL) << "ResumeType::UNKNOWN: not supposed run here"; + LOG(DFATAL) << "ResumeType::UNKNOWN: not supposed run here"; + return nullptr; } } break; @@ -79,7 +80,8 @@ ChainBaseProcessor* ChainProcessorFactory::makeProcessor(StorageEnv* env, break; } case ResumeType::UNKNOWN: { - LOG(FATAL) << "ResumeType::UNKNOWN: not supposed run here"; + LOG(DFATAL) << "ResumeType::UNKNOWN: not supposed run here"; + return nullptr; } } break; @@ -97,13 +99,15 @@ ChainBaseProcessor* ChainProcessorFactory::makeProcessor(StorageEnv* env, break; } case ResumeType::UNKNOWN: { - LOG(FATAL) << "ResumeType::UNKNOWN: not supposed run here"; + LOG(DFATAL) << "ResumeType::UNKNOWN: not supposed run here"; + return nullptr; } } break; } case RequestType::UNKNOWN: { - LOG(FATAL) << "RequestType::UNKNOWN: not supposed run here"; + LOG(DFATAL) << "RequestType::UNKNOWN: not supposed run here"; + return nullptr; } } ret->term_ = termId; diff --git a/src/storage/transaction/ConsistUtil.cpp b/src/storage/transaction/ConsistUtil.cpp index a0923d84a96..d1aa3c0a3cb 100644 --- a/src/storage/transaction/ConsistUtil.cpp +++ b/src/storage/transaction/ConsistUtil.cpp @@ -72,7 +72,8 @@ RequestType ConsistUtil::parseType(folly::StringPiece val) { case 'd': return RequestType::DELETE; default: - LOG(FATAL) << "should not happen, identifier is " << identifier; + LOG(DFATAL) << "should not happen, identifier is " << identifier; + return RequestType::UNKNOWN; } } diff --git a/tests/Makefile b/tests/Makefile index acca9cdcaa2..5200dc86219 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -118,6 +118,7 @@ test: sess $(test_j) --dist=loadfile -k "not tck" $(TEST_DIR) slow-query: currdir + $(test_j) tck/steps/test_kill_slow_query_base_test.py && \ $(test_j) tck/steps/test_kill_slow_query_via_same_service.py && \ $(test_j) tck/steps/test_kill_slow_query_via_different_service.py && \ $(test_j) tck/steps/test_kill_permission_via_same_service.py && \ diff --git a/tests/common/plan_differ.py b/tests/common/plan_differ.py index 19cae12dc81..061f8a2d31e 100644 --- a/tests/common/plan_differ.py +++ b/tests/common/plan_differ.py @@ -189,15 +189,22 @@ def _is_subdict_nested(self, expect, resp): def _try_convert_json(j): try: - return json.loads(j) + res = json.loads(j) + if isinstance(res, list): + for m in res: + if isinstance(m, dict): + if 'tagId' in m: + m.pop('tagId') + if 'type' in m: + m.pop('type') + return res except: return j extracted_resp_dict = {} if len(key_list) == 1: - - for k in resp: - extracted_resp_dict[k] = _try_convert_json(resp[k]) + for k in resp: + extracted_resp_dict[k] = _try_convert_json(resp[k]) else: extracted_resp_dict = self._convert_jsonStr_to_dict(resp, key_list) diff --git a/tests/tck/conftest.py b/tests/tck/conftest.py index 294aa7f7d8a..4d06bbf6c0b 100644 --- a/tests/tck/conftest.py +++ b/tests/tck/conftest.py @@ -473,7 +473,6 @@ def executing_query( exec_query(request, ngql, exec_ctx, sess) sess.release() - @when(parse("profiling query:\n{query}")) def profiling_query(query, exec_ctx, request): ngql = "PROFILE {" + combine_query(query) + "}" @@ -826,7 +825,6 @@ def drop_used_space(exec_ctx): session = exec_ctx.get('current_session') response(session, stmt) - @then(parse("the execution plan should be:\n{plan}")) def check_plan(request, plan, exec_ctx): ngql = exec_ctx["ngql"] diff --git a/tests/tck/features/bugfix/CompareDate.feature b/tests/tck/features/bugfix/CompareDate.feature new file mode 100644 index 00000000000..32f04db9ee2 --- /dev/null +++ b/tests/tck/features/bugfix/CompareDate.feature @@ -0,0 +1,30 @@ +# Copyright (c) 2022 vesoft inc. All rights reserved. +# +# This source code is licensed under Apache 2.0 License. +Feature: Compare date value + + # #5046 + Scenario: Compare date value + Given an empty graph + And create a space with following options: + | partition_num | 1 | + | replica_factor | 1 | + | vid_type | FIXED_STRING(30) | + | charset | utf8 | + | collate | utf8_bin | + When executing query: + """ + create tag date_comp(i1 int, d1 date); + """ + Then the execution should be successful + When try to execute query: + """ + INSERT VERTEX date_comp(i1, d1) VALUES 'xxx':(1, date()); + """ + Then the execution should be successful + When try to execute query: + """ + UPDATE VERTEX ON date_comp 'xxx' SET i1=3 WHEN d1 == date() + YIELD i1; + """ + Then the execution should be successful diff --git a/tests/tck/features/fetch/FetchEdges.intVid.feature b/tests/tck/features/fetch/FetchEdges.intVid.feature index 469bd5ebf30..e9d3e17a036 100644 --- a/tests/tck/features/fetch/FetchEdges.intVid.feature +++ b/tests/tck/features/fetch/FetchEdges.intVid.feature @@ -198,35 +198,41 @@ Feature: Fetch Int Vid Edges | serve.start_year | serve.end_year | Scenario: Fetch prop Error + # fetch on a not existing edgetype + When executing query: + """ + FETCH PROP ON not_exist_edge hash("Boris Diaw")->hash("Spurs") YIELD edge as e + """ + Then a ExecutionError should be raised at runtime: EdgeNotFound: EdgeName `not_exist_edge` When executing query: """ FETCH PROP ON serve hash("Boris Diaw")->hash("Spurs") YIELD $^.serve.start_year """ - Then a ExecutionError should be raised at runtime: + Then a ExecutionError should be raised at runtime: TagNotFound: TagName `serve` When executing query: """ FETCH PROP ON serve hash("Boris Diaw")->hash("Spurs") YIELD $$.serve.start_year """ - Then a ExecutionError should be raised at runtime: + Then a ExecutionError should be raised at runtime: TagNotFound: TagName `serve` # yield not existing edgetype When executing query: """ FETCH PROP ON serve hash("Boris Diaw")->hash("Spurs") YIELD abc.start_year """ - Then a ExecutionError should be raised at runtime: + Then a ExecutionError should be raised at runtime: EdgeNotFound: EdgeName `abc` # Fetch prop returns not existing property When executing query: """ FETCH PROP ON serve hash('Boris Diaw')->hash('Hawks') YIELD serve.start_year1 """ - Then a SemanticError should be raised at runtime: + Then a SemanticError should be raised at runtime: `serve.start_year1', not found the property `start_year1'. # Fetch prop on illegal input When executing query: """ GO FROM hash('Boris Diaw') OVER serve YIELD serve._src AS src, serve._dst AS src | FETCH PROP ON serve $-.src->$-.dst YIELD serve.start_year, serve.end_year """ - Then a SemanticError should be raised at runtime: + Then a SemanticError should be raised at runtime: `$-.dst', not exist prop `dst' Scenario: Fetch prop on a edge and return duplicate columns When executing query: diff --git a/tests/tck/features/fetch/FetchEdges.strVid.feature b/tests/tck/features/fetch/FetchEdges.strVid.feature index 8a4d4b2c44d..225d7e90db2 100644 --- a/tests/tck/features/fetch/FetchEdges.strVid.feature +++ b/tests/tck/features/fetch/FetchEdges.strVid.feature @@ -231,30 +231,30 @@ Feature: Fetch String Vid Edges """ FETCH PROP ON serve "Boris Diaw"->"Spurs" YIELD $^.serve.start_year """ - Then a ExecutionError should be raised at runtime: + Then a ExecutionError should be raised at runtime: TagNotFound: TagName `serve` When executing query: """ FETCH PROP ON serve "Boris Diaw"->"Spurs" YIELD $$.serve.start_year """ - Then a ExecutionError should be raised at runtime: + Then a ExecutionError should be raised at runtime: TagNotFound: TagName `serve` When executing query: """ FETCH PROP ON serve "Boris Diaw"->"Spurs" YIELD abc.start_year """ - Then a ExecutionError should be raised at runtime: + Then a ExecutionError should be raised at runtime: EdgeNotFound: EdgeName `abc` # Fetch prop on illegal input When executing query: """ GO FROM 'Boris Diaw' OVER serve YIELD serve._src AS src, serve._dst AS src | FETCH PROP ON serve $-.src->$-.dst YIELD serve.start_year, serve.end_year """ - Then a SemanticError should be raised at runtime: + Then a SemanticError should be raised at runtime: `$-.dst', not exist prop `dst' # Fetch prop returns not existing property When executing query: """ FETCH PROP ON serve 'Boris Diaw'->'Hawks' YIELD serve.not_exist_prop """ - Then a SemanticError should be raised at runtime: + Then a SemanticError should be raised at runtime: `serve.not_exist_prop', not found the property `not_exist_prop'. Scenario: yield edge When executing query: diff --git a/tests/tck/features/fetch/FetchVertices.intVid.feature b/tests/tck/features/fetch/FetchVertices.intVid.feature index f1470ff4cde..966d395ff8a 100644 --- a/tests/tck/features/fetch/FetchVertices.intVid.feature +++ b/tests/tck/features/fetch/FetchVertices.intVid.feature @@ -311,44 +311,44 @@ Feature: Fetch Int Vid Vertices """ FETCH PROP ON player hash('Boris Diaw') YIELD not_exist_tag.name, player.age """ - Then a ExecutionError should be raised at runtime: + Then a ExecutionError should be raised at runtime: TagNotFound: TagName `not_exist_tag` # Fetch prop no not existing tag When executing query: """ FETCH PROP ON not_exist_tag hash('Boris Diaw') """ - Then a ExecutionError should be raised at runtime: + Then a ExecutionError should be raised at runtime: TagNotFound: TagName `not_exist_tag` # yield not existing property When executing query: """ FETCH PROP ON player hash('Boris Diaw') YIELD player.not_existing_prop """ - Then a SemanticError should be raised at runtime: + Then a SemanticError should be raised at runtime: `player.not_existing_prop', not found the property `not_existing_prop'. # duplicate input When executing query: """ GO FROM hash('Boris Diaw') over like YIELD like._dst as id, like._dst as id | FETCH PROP ON player $-.id YIELD player.name, player.age """ - Then a SemanticError should be raised at runtime: + Then a SemanticError should be raised at runtime: Duplicate Column Name : `id' # only constant list or single column of data is allowed in piped FETCH clause When executing query: """ GO FROM 'Boris Diaw' over like YIELD like._src as src, like._dst as dst | FETCH PROP ON player $-.src, $-.dst YIELD vertex as node; """ - Then a SyntaxError should be raised at runtime: + Then a SyntaxError should be raised at runtime: syntax error near `, $-.dst' Scenario: Different from v1.x When executing query: """ GO FROM hash('Boris Diaw') over like YIELD like._dst as id | FETCH PROP ON player $-.id YIELD player.name, player.age, $-.* """ - Then a SemanticError should be raised at runtime: + Then a SemanticError should be raised at runtime: `$-.*', not exist prop `*' # $- is not supported When executing query: """ GO FROM hash('NON EXIST VERTEX ID') OVER serve YIELD dst(edge) as id | FETCH PROP ON team $- YIELD vertex as node """ - Then a SyntaxError should be raised at runtime: + Then a SyntaxError should be raised at runtime: syntax error near `YIELD' Scenario: format yield When executing query: diff --git a/tests/tck/features/fetch/FetchVertices.strVid.feature b/tests/tck/features/fetch/FetchVertices.strVid.feature index 1710050fa56..59d9516430e 100644 --- a/tests/tck/features/fetch/FetchVertices.strVid.feature +++ b/tests/tck/features/fetch/FetchVertices.strVid.feature @@ -396,70 +396,70 @@ Feature: Fetch String Vertices """ FETCH PROP ON player 'Boris Diaw' YIELD $^.player.name, player.age """ - Then a SemanticError should be raised at runtime: + Then a SemanticError should be raised at runtime: unsupported src/dst property expression in yield. # Fetch Vertices not support get dst property When executing query: """ FETCH PROP ON player 'Boris Diaw' YIELD $$.player.name, player.age """ - Then a SemanticError should be raised at runtime: + Then a SemanticError should be raised at runtime: unsupported src/dst property expression in yield. # Fetch vertex yields not existing tag When executing query: """ FETCH PROP ON player 'Boris Diaw' YIELD not_exist_tag.name, player.age """ - Then a ExecutionError should be raised at runtime: + Then a ExecutionError should be raised at runtime: TagNotFound: TagName `not_exist_tag` When executing query: """ FETCH PROP ON * "Tim Duncan", "Boris Diaw" YIELD not_exist_tag.name """ - Then a ExecutionError should be raised at runtime: + Then a ExecutionError should be raised at runtime: TagNotFound: TagName `not_exist_tag` # Fetch prop no not existing tag When executing query: """ FETCH PROP ON not_exist_tag 'Boris Diaw' """ - Then a ExecutionError should be raised at runtime: + Then a ExecutionError should be raised at runtime: TagNotFound: TagName `not_exist_tag` When executing query: """ GO FROM 'Boris Diaw' over like YIELD like._dst as id, like._dst as id | FETCH PROP ON player $-.id YIELD player.name, player.age """ - Then a SemanticError should be raised at runtime: + Then a SemanticError should be raised at runtime: Duplicate Column Name : `id' When executing query: """ GO FROM "11" over like YIELD like._dst as id | FETCH PROP ON player "11" YIELD $-.id """ - Then a SemanticError should be raised at runtime: + Then a SemanticError should be raised at runtime: unsupported input/variable property expression in yield. # Fetch on existing vertex, and yield not existing property When executing query: """ FETCH PROP ON player 'Boris Diaw' YIELD player.not_exist_prop """ - Then a SemanticError should be raised at runtime: + Then a SemanticError should be raised at runtime: `player.not_exist_prop', not found the property `not_exist_prop'. When executing query: """ FETCH PROP ON * "Tim Duncan", "Boris Diaw" YIELD player.not_exist_prop """ - Then a SemanticError should be raised at runtime: + Then a SemanticError should be raised at runtime: `player.not_exist_prop', not found the property `not_exist_prop'. # only constant list or single column of data is allowed in piped FETCH clause When executing query: """ GO FROM 'Boris Diaw' over like YIELD like._src as src, like._dst as dst | FETCH PROP ON player $-.src, $-.dst; """ - Then a SyntaxError should be raised at runtime: + Then a SyntaxError should be raised at runtime: syntax error near `, $-.dst' Scenario: Different from v1.x When executing query: """ GO FROM 'Boris Diaw' over like YIELD like._dst as id | FETCH PROP ON player $-.id YIELD player.name, player.age, $-.* """ - Then a SemanticError should be raised at runtime: + Then a SemanticError should be raised at runtime: `$-.*', not exist prop `*' # Different from 1.x $- is not supported When executing query: """ GO FROM 'NON EXIST VERTEX ID' OVER serve | FETCH PROP ON team $- """ - Then a SyntaxError should be raised at runtime: + Then a SyntaxError should be raised at runtime: syntax error near `$-' Scenario: format yield When executing query: diff --git a/tests/tck/features/index/Index.feature b/tests/tck/features/index/Index.feature index cc05630a200..c135ac4a797 100644 --- a/tests/tck/features/index/Index.feature +++ b/tests/tck/features/index/Index.feature @@ -2145,3 +2145,1445 @@ Feature: IndexTest_Vid_String | "8" | "8" | | "9" | "9" | Then drop the used space + + Scenario: IndexTest FailureTest + Given an empty graph + And create a space with following options: + | partition_num | 1 | + | replica_factor | 1 | + | vid_type | FIXED_STRING(30) | + | charset | utf8 | + | collate | utf8_bin | + And having executed: + """ + CREATE TAG t1(col1 int, col2 double, col3 bool, col4 string, + col5 time, col6 date, col7 datetime, col8 fixed_string(10), + col9 geography, col10 int, col11 int, col12 int, + col13 int, col14 int, col15 int, col16 int, + col17 int, col18 int, col19 int, col20 int); + CREATE EDGE e1(col1 int, col2 double, col3 bool, col4 string, + col5 time, col6 date, col7 datetime, col8 fixed_string(10), + col9 geography, col10 int, col11 int, col12 int, + col13 int, col14 int, col15 int, col16 int, + col17 int, col18 int, col19 int, col20 int); + """ + When executing query: + """ + CREATE TAG INDEX string_index_with_no_length ON t1(col4); + """ + Then a ExecutionError should be raised at runtime: + When executing query: + """ + CREATE EDGE INDEX string_index_with_no_length ON e1(col4); + """ + Then a ExecutionError should be raised at runtime: + When executing query: + """ + CREATE TAG INDEX string_index_too_long ON t1(col4(257)); + """ + Then a ExecutionError should be raised at runtime: + When executing query: + """ + CREATE EDGE INDEX string_index_too_long ON e1(col4(257)); + """ + Then a ExecutionError should be raised at runtime: + When executing query: + """ + CREATE TAG INDEX index_with_too_many_properties ON t1(col1, col2, col3, col4(10), col5, col6, col7, col8, col9, col10, col11, col12, col13, col14, col15, col16, col17, col18); + """ + Then a ExecutionError should be raised at runtime: + When executing query: + """ + CREATE EDGE INDEX index_with_too_many_properties ON e1(col1, col2, col3, col4(10), col5, col6, col7, col8, col9, col10, col11, col12, col13, col14, col15, col16, col17, col18); + """ + Then a ExecutionError should be raised at runtime: + When executing query: + """ + CREATE TAG INDEX index_with_duplicate_properties ON t1(col1, col2, col3, col1); + """ + Then a ExecutionError should be raised at runtime: + When executing query: + """ + CREATE EDGE INDEX index_with_duplicate_properties ON e1(col1, col2, col3, col1); + """ + Then a ExecutionError should be raised at runtime: + + Scenario: IndexTest CompoundIndexTest1 + Given an empty graph + And create a space with following options: + | partition_num | 1 | + | replica_factor | 1 | + | vid_type | FIXED_STRING(30) | + | charset | utf8 | + | collate | utf8_bin | + And having executed: + """ + CREATE TAG t1(col1 int, col2 double, col3 bool, col4 string, col5 time, col6 date, col7 datetime, col8 fixed_string(10)); + """ + When executing query: + """ + CREATE TAG INDEX ti1 ON t1(col1); + CREATE TAG INDEX ti12 ON t1(col1, col2); + CREATE TAG INDEX ti13 ON t1(col1, col3); + CREATE TAG INDEX ti14 ON t1(col1, col4(10)); + CREATE TAG INDEX ti15 ON t1(col1, col5); + CREATE TAG INDEX ti16 ON t1(col1, col6); + CREATE TAG INDEX ti17 ON t1(col1, col7); + CREATE TAG INDEX ti18 ON t1(col1, col8); + """ + Then the execution should be successful + And wait 5 seconds + When executing query: + """ + INSERT VERTEX t1(col1, col2, col3, col4, col5, col6, col7, col8) + VALUES + "1": (1, 1.0, false, "apple", time("11:11:11"), date("2022-01-01"), datetime("2022-01-01T11:11:11"), "apple"), + "2": (2, 2.0, true, "banana", time("22:22:22"), date("2022-12-31"), datetime("2022-12-31T22:22:22"), "banana"); + """ + Then the execution should be successful + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 == 1 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 > 1 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 >= 1 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 < 2 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 <= 2 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 == 1 AND t1.col2 == 1.0 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 >= 1 AND t1.col2 > 1.0 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 >= 1 AND t1.col2 >= 1.0 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 <= 2 AND t1.col2 < 2.0 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 <= 2 AND t1.col2 <= 2.0 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 == 1 AND t1.col3 == false YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 >= 1 AND t1.col3 > false YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 >= 1 AND t1.col3 >= false YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 <= 2 AND t1.col3 < true YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 <= 2 AND t1.col3 <= true YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 == 1 AND t1.col4 == "apple" YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 >= 1 AND t1.col4 > "apple" YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 >= 1 AND t1.col4 >= "apple" YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 <= 2 AND t1.col4 < "banana" YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 <= 2 AND t1.col4 <= "banana" YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 == 1 AND t1.col5 == time("11:11:11") YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 >= 1 AND t1.col5 > time("11:11:11") YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 >= 1 AND t1.col5 >= time("11:11:11") YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 <= 2 AND t1.col5 < time("22:22:22") YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 <= 2 AND t1.col5 <= time("22:22:22") YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 == 1 AND t1.col6 == date("2022-01-01") YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 >= 1 AND t1.col6 > date("2022-01-01") YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 >= 1 AND t1.col6 >= date("2022-01-01") YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 <= 2 AND t1.col6 < date("2022-12-31") YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 <= 2 AND t1.col6 <= date("2022-12-31") YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 == 1 AND t1.col7 == datetime("2022-01-01T11:11:11") YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 >= 1 AND t1.col7 > datetime("2022-01-01T11:11:11") YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 >= 1 AND t1.col7 >= datetime("2022-01-01T11:11:11") YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 <= 2 AND t1.col7 < datetime("2022-12-31T22:22:22") YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 <= 2 AND t1.col7 <= datetime("2022-12-31T22:22:22") YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 == 1 AND t1.col8 == "apple" YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 >= 1 AND t1.col8 > "apple" YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 >= 1 AND t1.col8 >= "apple" YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 <= 2 AND t1.col8 < "banana" YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col1 <= 2 AND t1.col8 <= "banana" YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + | "2" | + Then drop the used space + + Scenario: IndexTest CompoundIndexTest2 + Given an empty graph + And create a space with following options: + | partition_num | 1 | + | replica_factor | 1 | + | vid_type | FIXED_STRING(30) | + | charset | utf8 | + | collate | utf8_bin | + And having executed: + """ + CREATE TAG t1(col1 int, col2 double, col3 bool, col4 string); + """ + When executing query: + """ + CREATE TAG INDEX ti21 ON t1(col2, col1); + CREATE TAG INDEX ti23 ON t1(col2, col3); + CREATE TAG INDEX ti24 ON t1(col2, col4(10)); + CREATE TAG INDEX ti31 ON t1(col3, col1); + CREATE TAG INDEX ti32 ON t1(col3, col2); + CREATE TAG INDEX ti34 ON t1(col3, col4(10)); + CREATE TAG INDEX ti41 ON t1(col4(10), col1); + CREATE TAG INDEX ti42 ON t1(col4(10), col2); + CREATE TAG INDEX ti43 ON t1(col4(10), col3); + """ + Then the execution should be successful + And wait 5 seconds + When executing query: + """ + INSERT VERTEX t1(col1, col2, col3, col4) + VALUES + "1": (1, 1.0, false, "apple"), + "2": (2, 1.0, true, "banana"), + "3": (3, 2.0, false, "carrot"), + "4": (4, 2.0, true, "durian"); + """ + Then the execution should be successful + When executing query: + """ + LOOKUP ON t1 WHERE t1.col2 == 1.0 AND t1.col1 == 1 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col2 == 1.0 AND t1.col1 > 1 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col2 == 1.0 AND t1.col1 >= 1 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col2 == 1.0 AND t1.col1 < 2 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col2 == 1.0 AND t1.col1 <= 2 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col2 == 1.0 AND t1.col3 == false YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col2 == 1.0 AND t1.col3 > false YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col2 == 1.0 AND t1.col3 >= false YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col2 == 1.0 AND t1.col3 < true YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col2 == 1.0 AND t1.col3 <= true YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col2 == 1.0 AND t1.col4 == "apple" YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col2 == 1.0 AND t1.col4 > "apple" YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col2 == 1.0 AND t1.col4 >= "apple" YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col2 == 1.0 AND t1.col4 < "banana" YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col2 == 1.0 AND t1.col4 <= "banana" YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col3 == false AND t1.col1 == 1 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col3 == false AND t1.col1 > 1 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "3" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col3 == false AND t1.col1 >= 1 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + | "3" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col3 == false AND t1.col1 < 3 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col3 == false AND t1.col1 <= 3 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + | "3" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col3 == false AND t1.col2 == 1.0 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col3 == false AND t1.col2 > 1.0 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "3" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col3 == false AND t1.col2 >= 1.0 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + | "3" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col3 == false AND t1.col2 < 2.0 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col3 == false AND t1.col2 <= 2.0 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + | "3" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col3 == false AND t1.col4 == "apple" YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col3 == false AND t1.col4 > "apple" YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "3" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col3 == false AND t1.col4 >= "apple" YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + | "3" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col3 == false AND t1.col4 < "carrot" YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col3 == false AND t1.col4 <= "carrot" YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "1" | + | "3" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col4 == "carrot" AND t1.col1 == 3 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "3" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col4 == "carrot" AND t1.col1 > 3 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col4 == "carrot" AND t1.col1 >= 3 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "3" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col4 == "carrot" AND t1.col1 < 3 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col4 == "carrot" AND t1.col1 <= 3 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "3" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col4 == "durian" AND t1.col2 == 2.0 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "4" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col4 == "durian" AND t1.col2 > 2.0 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col4 == "durian" AND t1.col2 >= 2.0 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "4" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col4 == "durian" AND t1.col2 < 2.0 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col4 == "durian" AND t1.col2 <= 2.0 YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "4" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col4 == "banana" AND t1.col3 == true YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col4 == "banana" AND t1.col3 > true YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col4 == "banana" AND t1.col3 >= true YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "2" | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col4 == "banana" AND t1.col3 < true YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + When executing query: + """ + LOOKUP ON t1 WHERE t1.col4 == "banana" AND t1.col3 <= true YIELD id(vertex) as id + """ + Then the result should be, in any order: + | id | + | "2" | + Then drop the used space + + Scenario: IndexTest CompoundIndexTest3 + Given an empty graph + And create a space with following options: + | partition_num | 1 | + | replica_factor | 1 | + | vid_type | FIXED_STRING(30) | + | charset | utf8 | + | collate | utf8_bin | + And having executed: + """ + CREATE EDGE e1(col1 int, col2 double, col3 bool, col4 string, col5 time, col6 date, col7 datetime, col8 fixed_string(10)); + """ + When executing query: + """ + CREATE EDGE INDEX ei1 ON e1(col1); + CREATE EDGE INDEX ei12 ON e1(col1, col2); + CREATE EDGE INDEX ei13 ON e1(col1, col3); + CREATE EDGE INDEX ei14 ON e1(col1, col4(10)); + CREATE EDGE INDEX ei15 ON e1(col1, col5); + CREATE EDGE INDEX ei16 ON e1(col1, col6); + CREATE EDGE INDEX ei17 ON e1(col1, col7); + CREATE EDGE INDEX ei18 ON e1(col1, col8); + """ + Then the execution should be successful + And wait 5 seconds + When executing query: + """ + INSERT EDGE e1(col1, col2, col3, col4, col5, col6, col7, col8) + VALUES + "1" -> "3": (1, 1.0, false, "apple", time("11:11:11"), date("2022-01-01"), datetime("2022-01-01T11:11:11"), "apple"), + "2" -> "4": (2, 2.0, true, "banana", time("22:22:22"), date("2022-12-31"), datetime("2022-12-31T22:22:22"), "banana"); + """ + Then the execution should be successful + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 == 1 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "3" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 > 1 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "2" | "4" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 >= 1 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "3" | + | "2" | "4" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 < 2 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "3" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 <= 2 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "3" | + | "2" | "4" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 == 1 AND e1.col2 == 1.0 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "3" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 >= 1 AND e1.col2 > 1.0 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "2" | "4" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 >= 1 AND e1.col2 >= 1.0 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "3" | + | "2" | "4" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 <= 2 AND e1.col2 < 2.0 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "3" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 <= 2 AND e1.col2 <= 2.0 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "3" | + | "2" | "4" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 == 1 AND e1.col3 == false YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "3" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 >= 1 AND e1.col3 > false YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "2" | "4" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 >= 1 AND e1.col3 >= false YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "3" | + | "2" | "4" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 <= 2 AND e1.col3 < true YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "3" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 <= 2 AND e1.col3 <= true YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "3" | + | "2" | "4" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 == 1 AND e1.col4 == "apple" YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "3" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 >= 1 AND e1.col4 > "apple" YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "2" | "4" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 >= 1 AND e1.col4 >= "apple" YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "3" | + | "2" | "4" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 <= 2 AND e1.col4 < "banana" YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "3" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 <= 2 AND e1.col4 <= "banana" YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "3" | + | "2" | "4" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 == 1 AND e1.col5 == time("11:11:11") YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "3" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 >= 1 AND e1.col5 > time("11:11:11") YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "2" | "4" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 >= 1 AND e1.col5 >= time("11:11:11") YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "3" | + | "2" | "4" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 <= 2 AND e1.col5 < time("22:22:22") YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "3" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 <= 2 AND e1.col5 <= time("22:22:22") YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "3" | + | "2" | "4" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 == 1 AND e1.col6 == date("2022-01-01") YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "3" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 >= 1 AND e1.col6 > date("2022-01-01") YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "2" | "4" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 >= 1 AND e1.col6 >= date("2022-01-01") YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "3" | + | "2" | "4" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 <= 2 AND e1.col6 < date("2022-12-31") YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "3" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 <= 2 AND e1.col6 <= date("2022-12-31") YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "3" | + | "2" | "4" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 == 1 AND e1.col7 == datetime("2022-01-01T11:11:11") YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "3" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 >= 1 AND e1.col7 > datetime("2022-01-01T11:11:11") YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "2" | "4" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 >= 1 AND e1.col7 >= datetime("2022-01-01T11:11:11") YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "3" | + | "2" | "4" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 <= 2 AND e1.col7 < datetime("2022-12-31T22:22:22") YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "3" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 <= 2 AND e1.col7 <= datetime("2022-12-31T22:22:22") YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "3" | + | "2" | "4" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 == 1 AND e1.col8 == "apple" YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "3" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 >= 1 AND e1.col8 > "apple" YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "2" | "4" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 >= 1 AND e1.col8 >= "apple" YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "3" | + | "2" | "4" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 <= 2 AND e1.col8 < "banana" YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "3" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col1 <= 2 AND e1.col8 <= "banana" YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "3" | + | "2" | "4" | + Then drop the used space + + Scenario: IndexTest CompoundIndexTest4 + Given an empty graph + And create a space with following options: + | partition_num | 1 | + | replica_factor | 1 | + | vid_type | FIXED_STRING(30) | + | charset | utf8 | + | collate | utf8_bin | + And having executed: + """ + CREATE EDGE e1(col1 int, col2 double, col3 bool, col4 string); + """ + When executing query: + """ + CREATE EDGE INDEX ei21 ON e1(col2, col1); + CREATE EDGE INDEX ei23 ON e1(col2, col3); + CREATE EDGE INDEX ei24 ON e1(col2, col4(10)); + CREATE EDGE INDEX ei31 ON e1(col3, col1); + CREATE EDGE INDEX ei32 ON e1(col3, col2); + CREATE EDGE INDEX ei34 ON e1(col3, col4(10)); + CREATE EDGE INDEX ei41 ON e1(col4(10), col1); + CREATE EDGE INDEX ei42 ON e1(col4(10), col2); + CREATE EDGE INDEX ei43 ON e1(col4(10), col3); + """ + Then the execution should be successful + And wait 5 seconds + When executing query: + """ + INSERT EDGE e1(col1, col2, col3, col4) + VALUES + "1" -> "5": (1, 1.0, false, "apple"), + "2" -> "6": (2, 1.0, true, "banana"), + "3" -> "7": (3, 2.0, false, "carrot"), + "4" -> "8": (4, 2.0, true, "durian"); + """ + Then the execution should be successful + When executing query: + """ + LOOKUP ON e1 WHERE e1.col2 == 1.0 AND e1.col1 == 1 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "5" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col2 == 1.0 AND e1.col1 > 1 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "2" | "6" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col2 == 1.0 AND e1.col1 >= 1 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "5" | + | "2" | "6" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col2 == 1.0 AND e1.col1 < 2 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "5" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col2 == 1.0 AND e1.col1 <= 2 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "5" | + | "2" | "6" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col2 == 1.0 AND e1.col3 == false YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "5" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col2 == 1.0 AND e1.col3 > false YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "2" | "6" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col2 == 1.0 AND e1.col3 >= false YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "5" | + | "2" | "6" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col2 == 1.0 AND e1.col3 < true YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "5" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col2 == 1.0 AND e1.col3 <= true YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "5" | + | "2" | "6" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col2 == 1.0 AND e1.col4 == "apple" YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "5" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col2 == 1.0 AND e1.col4 > "apple" YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "2" | "6" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col2 == 1.0 AND e1.col4 >= "apple" YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "5" | + | "2" | "6" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col2 == 1.0 AND e1.col4 < "banana" YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "5" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col2 == 1.0 AND e1.col4 <= "banana" YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "5" | + | "2" | "6" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col3 == false AND e1.col1 == 1 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "5" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col3 == false AND e1.col1 > 1 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "3" | "7" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col3 == false AND e1.col1 >= 1 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "5" | + | "3" | "7" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col3 == false AND e1.col1 < 3 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "5" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col3 == false AND e1.col1 <= 3 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "5" | + | "3" | "7" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col3 == false AND e1.col2 == 1.0 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "5" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col3 == false AND e1.col2 > 1.0 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "3" | "7" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col3 == false AND e1.col2 >= 1.0 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "5" | + | "3" | "7" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col3 == false AND e1.col2 < 2.0 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "5" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col3 == false AND e1.col2 <= 2.0 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "5" | + | "3" | "7" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col3 == false AND e1.col4 == "apple" YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "5" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col3 == false AND e1.col4 > "apple" YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "3" | "7" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col3 == false AND e1.col4 >= "apple" YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "5" | + | "3" | "7" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col3 == false AND e1.col4 < "carrot" YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "5" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col3 == false AND e1.col4 <= "carrot" YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "1" | "5" | + | "3" | "7" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col4 == "carrot" AND e1.col1 == 3 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "3" | "7" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col4 == "carrot" AND e1.col1 > 3 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col4 == "carrot" AND e1.col1 >= 3 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "3" | "7" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col4 == "carrot" AND e1.col1 < 3 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col4 == "carrot" AND e1.col1 <= 3 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "3" | "7" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col4 == "durian" AND e1.col2 == 2.0 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "4" | "8" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col4 == "durian" AND e1.col2 > 2.0 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col4 == "durian" AND e1.col2 >= 2.0 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "4" | "8" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col4 == "durian" AND e1.col2 < 2.0 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col4 == "durian" AND e1.col2 <= 2.0 YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "4" | "8" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col4 == "banana" AND e1.col3 == true YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "2" | "6" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col4 == "banana" AND e1.col3 > true YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col4 == "banana" AND e1.col3 >= true YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "2" | "6" | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col4 == "banana" AND e1.col3 < true YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + When executing query: + """ + LOOKUP ON e1 WHERE e1.col4 == "banana" AND e1.col3 <= true YIELD src(edge) as src, dst(edge) as dst + """ + Then the result should be, in any order: + | src | dst | + | "2" | "6" | + Then drop the used space diff --git a/tests/tck/features/match/Base.feature b/tests/tck/features/match/Base.feature index 72b1ae2f46d..5f0ef7d3906 100644 --- a/tests/tck/features/match/Base.feature +++ b/tests/tck/features/match/Base.feature @@ -208,6 +208,15 @@ Feature: Basic match | "Danny Green" | "LeBron James" | | "Danny Green" | "Marco Belinelli" | | "Danny Green" | "Tim Duncan" | + When executing query: + """ + MATCH (v1) -[:like]-> (v2:player{name: "Danny Green"}) + RETURN v1.player.name AS Name, v2.player.name AS Friend + """ + Then the result should be, in any order: + | Name | Friend | + | "Dejounte Murray" | "Danny Green" | + | "Marco Belinelli" | "Danny Green" | When executing query: """ MATCH (v1:player{name: "Danny Green"}) <-[:like]- (v2) @@ -243,7 +252,9 @@ Feature: Basic match | "Danny Green" | "Tim Duncan" | When executing query: """ - MATCH (v:player)-[e:like]-(v2) where v.player.age == 38 RETURN * + MATCH (v:player)-[e:like]-(v2) + WHERE v.player.age == 38 + RETURN * """ Then the result should be, in any order, with relax comparison: | v | e | v2 | @@ -254,7 +265,38 @@ Feature: Basic match | ("Yao Ming" :player{age: 38, name: "Yao Ming"}) | [:like "Yao Ming"->"Tracy McGrady" @0 {likeness: 90}] | ("Tracy McGrady" :player{age: 39, name: "Tracy McGrady"}) | When executing query: """ - MATCH (v:player)-[e:like]->(v2) where id(v) == "Tim Duncan" RETURN DISTINCT properties(e) as props, e + MATCH (v:player)-[e:like]->(v2) + WHERE v2.player.age > 45 + RETURN * + """ + Then the result should be, in any order, with relax comparison: + | v | e | v2 | + | ("Yao Ming" :player{age: 38, name: "Yao Ming"}) | [:like "Yao Ming"->"Shaquille O'Neal" @0 {likeness: 90}] | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | + | ("Tracy McGrady" :player{age: 39, name: "Tracy McGrady"}) | [:like "Tracy McGrady"->"Grant Hill" @0 {likeness: 90}] | ("Grant Hill" :player{age: 46, name: "Grant Hill"}) | + When executing query: + """ + MATCH (v:player)-[e:like]->(v2) + WHERE v.player.age == 38 and (v2.player.age < 35 or v2.player.age == 40) + RETURN * + """ + Then the result should be, in any order, with relax comparison: + | v | e | v2 | + | ("Paul Gasol" :player{age: 38, name: "Paul Gasol"}) | [:like "Paul Gasol"->"Kobe Bryant" @0 {likeness: 90}] | ("Kobe Bryant" :player{age: 40, name: "Kobe Bryant"}) | + | ("Paul Gasol" :player{age: 38, name: "Paul Gasol"}) | [:like "Paul Gasol"->"Marc Gasol" @0 {likeness: 99}] | ("Marc Gasol" :player{age: 34, name: "Marc Gasol"}) | + When executing query: + """ + MATCH (v:player)-[e:like]->(v2) + WHERE v.player.age == 38 and (v2.player.age < 35 or v2.player.age == 40) and e.likeness > 90 + RETURN * + """ + Then the result should be, in any order, with relax comparison: + | v | e | v2 | + | ("Paul Gasol" :player{age: 38, name: "Paul Gasol"}) | [:like "Paul Gasol"->"Marc Gasol" @0 {likeness: 99}] | ("Marc Gasol" :player{age: 34, name: "Marc Gasol"}) | + When executing query: + """ + MATCH (v:player)-[e:like]->(v2) + WHERE id(v) == "Tim Duncan" + RETURN DISTINCT properties(e) as props, e """ Then the result should be, in any order, with relax comparison: | props | e | @@ -262,11 +304,55 @@ Feature: Basic match | {likeness: 95} | [:like "Tim Duncan"->"Tony Parker" @0 {likeness: 95}] | When executing query: """ - MATCH (v:player)-[e:like]->(v2) where id(v) == "Tim Duncan" RETURN DISTINCT properties(e) as props + MATCH (v:player)-[e:like]->(v2) + WHERE id(v) == "Tim Duncan" + RETURN DISTINCT properties(e) as props """ Then the result should be, in any order, with relax comparison: | props | | {likeness: 95} | + When executing query: + """ + MATCH (v1:player)-[e]->(v2) + WHERE v2.player.age == 38 or (v2.team.name == 'Rockets' and v1.player.age == 38) + RETURN + v1.player.name AS Name, + type(e) as Type, + CASE WHEN v2.player.name IS NOT NULL THEN v2.player.name ELSE v2.team.name END AS FriendOrTeam + """ + Then the result should be, in any order, with relax comparison: + | Name | Type | FriendOrTeam | + | "Yao Ming" | "serve" | "Rockets" | + | "Marc Gasol" | "like" | "Paul Gasol" | + When executing query: + """ + MATCH (v1:player{name: "Danny Green"}) -[:like]- (v2) + WHERE v1.player.age > 1000 + RETURN v1.player.name AS Name, v2.player.name AS Friend + """ + Then the result should be, in any order: + | Name | Friend | + When executing query: + """ + MATCH (v1:player{name: "Danny Green"}) -[:like]- (v2:player{name: "Yao Ming"}) + RETURN v1.player.name AS Name, v2.player.name AS Friend + """ + Then the result should be, in any order: + | Name | Friend | + When executing query: + """ + MATCH (v1:player{name: "Danny Green"}) -[e1:like]- (v2) + WHERE e1.likeness_not_exists > 0 + RETURN v1.player.name AS Name, v2.player.name AS Friend + """ + Then the result should be, in any order: + | Name | Friend | + When try to execute query: + """ + MATCH (v1:player{name: "Danny Green"}) -[:like_not_exists]- (v2) + RETURN v1.player.name AS Name, v2.player.name AS Friend + """ + Then a SemanticError should be raised at runtime: `like_not_exists': Unknown edge type Scenario: two steps When executing query: @@ -280,6 +366,105 @@ Feature: Basic match | "Paul George" | "Russell Westbrook" | "Paul George" | | "Damian Lillard" | "LaMarcus Aldridge" | "Tim Duncan" | | "Damian Lillard" | "LaMarcus Aldridge" | "Tony Parker" | + When executing query: + """ + MATCH (v1:player) -[:like]-> (v2) -[:like]-> (v3) + WHERE v1.player.age == 28 + RETURN v1.player.name AS Player, v2.player.name AS Friend, v3.player.name AS FoF, v3.player.name_not_exists AS NotExists + """ + Then the result should be, in any order: + | Player | Friend | FoF | NotExists | + | "Damian Lillard" | "LaMarcus Aldridge" | "Tim Duncan" | __NULL__ | + | "Damian Lillard" | "LaMarcus Aldridge" | "Tony Parker" | __NULL__ | + | "Paul George" | "Russell Westbrook" | "James Harden" | __NULL__ | + | "Paul George" | "Russell Westbrook" | "Paul George" | __NULL__ | + When executing query: + """ + MATCH (v1) -[:like]-> (v2:player{age: 28}) -[:like]-> (v3) + RETURN v1.player.name AS Player, v2.player.name AS Friend, v3.player.name AS FoF + """ + Then the result should be, in any order: + | Player | Friend | FoF | + | "Russell Westbrook" | "Paul George" | "Russell Westbrook" | + When executing query: + """ + MATCH (v1) -[:like]-> (v2) -[:like]-> (v3) + WHERE v2.player.age == 28 + RETURN v1.player.name AS Player, v2.player.name AS Friend, v3.player.name AS FoF + """ + Then the result should be, in any order: + | Player | Friend | FoF | + | "Russell Westbrook" | "Paul George" | "Russell Westbrook" | + When executing query: + """ + MATCH (v1) -[:like]-> (v2) -[:like]-> (v3:player{age: 28}) + RETURN v1.player.name AS Player, v2.player.name AS Friend, v3.player.name AS FoF + """ + Then the result should be, in any order: + | Player | Friend | FoF | + | "Dejounte Murray" | "Russell Westbrook" | "Paul George" | + | "James Harden" | "Russell Westbrook" | "Paul George" | + | "Paul George" | "Russell Westbrook" | "Paul George" | + When executing query: + """ + MATCH (v1) -[:like]-> (v2) -[:like]-> (v3) + WHERE v3.player.age == 28 + RETURN v1.player.name AS Player, v2.player.name AS Friend, v3.player.name AS FoF + """ + Then the result should be, in any order: + | Player | Friend | FoF | + | "Dejounte Murray" | "Russell Westbrook" | "Paul George" | + | "James Harden" | "Russell Westbrook" | "Paul George" | + | "Paul George" | "Russell Westbrook" | "Paul George" | + When executing query: + """ + MATCH (v1) -[e1:like]-> (v2) -[e2:like]-> (v3) + WHERE v1.player.age > 28 and e1.likeness > 90 and v2.player.age > 40 and e2.likeness > 90 and v3.player.age > 40 + RETURN v1.player.name AS Player, v2.player.name AS Friend, v3.player.name AS FoF + """ + Then the result should be, in any order: + | Player | Friend | FoF | + | "Dejounte Murray" | "Tim Duncan" | "Manu Ginobili" | + | "Tony Parker" | "Tim Duncan" | "Manu Ginobili" | + When executing query: + """ + MATCH (v1) -[e1:like]-> (v2) -[e2]-> (v3) + WHERE v3.player.age == 38 or (v3.team.name == 'Rockets' and v1.player.age == 38) + RETURN v1.player.name AS Player, v2.player.name AS Friend, type(e2) AS TYPE, v3.player.name AS FoF, v3.team.name AS FoT + """ + Then the result should be, in any order: + | Player | Friend | TYPE | FoF | FoT | + | "Paul Gasol" | "Marc Gasol" | "like" | "Paul Gasol" | __NULL__ | + | "Yao Ming" | "Tracy McGrady" | "serve" | __NULL__ | "Rockets" | + When executing query: + """ + MATCH (v1) -[e1:like]-> (v2) -[e2]-> (v3) + WHERE v1.player.age > 1000 + RETURN v1.player.name AS Player, v2.player.name AS Friend, v3.player.name AS FoF + """ + Then the result should be, in any order: + | Player | Friend | FoF | + When executing query: + """ + MATCH (v1:player{name: "Danny Green"}) -[:like]-> (v2) -[:like]-> (v3:player{name: "Yao Ming"}) + RETURN v1.player.name AS Player, v2.player.name AS Friend, v3.player.name AS FoF + """ + Then the result should be, in any order: + | Player | Friend | FoF | + When executing query: + """ + MATCH (v1) -[e1:like]-> (v2) -[e2]-> (v3) + WHERE e1.likeness_not_exists > 0 + RETURN v1.player.name AS Player, v2.player.name AS Friend, v3.player.name AS FoF + """ + Then the result should be, in any order: + | Player | Friend | FoF | + When try to execute query: + """ + MATCH (v1) -[e1:like]-> (v2) -[e2:like_not_exists]-> (v3) + RETURN v1.player.name AS Player, v2.player.name AS Friend, v3.player.name AS FoF + """ + Then a SemanticError should be raised at runtime: `like_not_exists': Unknown edge type When executing query: """ MATCH (v1:player{name: 'Tony Parker'}) -[r1:serve]-> (v2) <-[r2:serve]- (v3) @@ -632,7 +817,7 @@ Feature: Basic match """ MATCH (v:player) where v.player.name return v """ - Then a ExecutionError should be raised at runtime: Wrong type result, the type should be NULL, EMPTY, BOOL + Then a ExecutionError should be raised at runtime: Failed to evaluate condition: v.player.name. For boolean conditions, please write in their full forms like == or IS [NOT] NULL. Scenario: Unimplemented features When executing query: diff --git a/tests/tck/features/match/SeekById.feature b/tests/tck/features/match/SeekById.feature index 618c8bbd434..03724af662a 100644 --- a/tests/tck/features/match/SeekById.feature +++ b/tests/tck/features/match/SeekById.feature @@ -25,6 +25,23 @@ Feature: Match seek by id | 'Jonathon Simmons' | | 'Klay Thompson' | | 'Dejounte Murray' | + # start vid finder don't support variable currently + When executing query: + """ + WITH [1, 2, 3] AS coll + UNWIND coll AS vid + MATCH (v) WHERE id(v) == vid + RETURN v; + """ + Then a ExecutionError should be raised at runtime: Scan vertices or edges need to specify a limit number, or limit number can not push down. + When executing query: + """ + WITH [1, 2, 3] AS coll + UNWIND coll AS vid + MATCH (v) WHERE id(v) == "Tony Parker" OR id(v) == vid + RETURN v; + """ + Then a ExecutionError should be raised at runtime: Scan vertices or edges need to specify a limit number, or limit number can not push down. Scenario: basic logical not When executing query: @@ -128,6 +145,28 @@ Feature: Match seek by id | 'Jonathon Simmons' | | 'Klay Thompson' | | 'Dejounte Murray' | + When executing query: + """ + MATCH (v) + WHERE id(v) IN ['James Harden', 'Jonathon Simmons', 'Klay Thompson', 'Dejounte Murray', 'Paul Gasol'] + OR true + RETURN v.player.name AS Name + """ + Then a ExecutionError should be raised at runtime: Scan vertices or edges need to specify a limit number, or limit number can not push down. + When executing query: + """ + MATCH (v) + WHERE id(v) IN ['James Harden', 'Jonathon Simmons', 'Klay Thompson', 'Dejounte Murray', 'Paul Gasol'] + AND true + RETURN v.player.name AS Name + """ + Then the result should be, in any order: + | Name | + | 'Paul Gasol' | + | 'James Harden' | + | 'Jonathon Simmons' | + | 'Klay Thompson' | + | 'Dejounte Murray' | When executing query: """ MATCH (v) @@ -262,6 +301,11 @@ Feature: Match seek by id RETURN v.player.name AS Name """ Then a ExecutionError should be raised at runtime: Scan vertices or edges need to specify a limit number, or limit number can not push down. + When executing query: + """ + MATCH (v) WHERE id(v) == "Tim Duncan" OR id(v) != "Tony Parker" RETURN COUNT(*) AS count + """ + Then a ExecutionError should be raised at runtime: Scan vertices or edges need to specify a limit number, or limit number can not push down. Scenario: test OR logic When executing query: diff --git a/tests/tck/features/optimizer/PrunePropertiesRule.feature b/tests/tck/features/optimizer/PrunePropertiesRule.feature index 734d0383c9b..f2316b6ce13 100644 --- a/tests/tck/features/optimizer/PrunePropertiesRule.feature +++ b/tests/tck/features/optimizer/PrunePropertiesRule.feature @@ -19,12 +19,12 @@ Feature: Prune Properties rule | 33 | | 41 | And the execution plan should be: - | id | name | dependencies | operator info | - | 8 | Project | 4 | | - | 4 | AppendVertices | 3 | { "props": "[{\"props\":[\"age\"],\"tagId\": 3}]" } | - | 3 | Traverse | 7 | { "vertexProps": "", "edgeProps": "[{\"props\":[\"_dst\", \"_rank\", \"_type\"],\"type\": 6}]" } | - | 7 | IndexScan | 2 | | - | 2 | Start | | | + | id | name | dependencies | operator info | + | 8 | Project | 4 | | + | 4 | AppendVertices | 3 | { "props": "[{\"props\":[\"age\"]}]" } | + | 3 | Traverse | 7 | { "vertexProps": "", "edgeProps": "[{\"props\":[\"_dst\", \"_rank\", \"_type\"]}]" } | + | 7 | IndexScan | 2 | | + | 2 | Start | | | When profiling query: """ MATCH p = (v:player{name: "Tony Parker"})-[e:like]->(v2) @@ -36,12 +36,12 @@ Feature: Prune Properties rule | "Tony Parker" | | "Tony Parker" | And the execution plan should be: - | id | name | dependencies | operator info | - | 8 | Project | 4 | | - | 4 | AppendVertices | 3 | { "props": "[{\"tagId\": 5, \"props\": [\"_tag\"]}, {\"tagId\": 3, \"props\": [\"_tag\"]}, {\"tagId\": 4, \"props\": [\"_tag\"]}]"} | - | 3 | Traverse | 7 | { "vertexProps": "[{\"props\":[\"name\"],\"tagId\": 3}]", "edgeProps": "[{\"props\":[\"_dst\", \"_rank\", \"_type\"],\"type\": 6}]" } | - | 7 | IndexScan | 2 | | - | 2 | Start | | | + | id | name | dependencies | operator info | + | 8 | Project | 4 | | + | 4 | AppendVertices | 3 | { "props": "[{ \"props\": [\"_tag\"]}, {\"props\": [\"_tag\"]}, {\"props\": [\"_tag\"]}]"} | + | 3 | Traverse | 7 | { "vertexProps": "[{\"props\":[\"name\"]}]", "edgeProps": "[{\"props\":[\"_dst\", \"_rank\", \"_type\"]}]" } | + | 7 | IndexScan | 2 | | + | 2 | Start | | | When profiling query: """ MATCH p = (v:player{name: "Tony Parker"})-[e:like]-(v2) @@ -58,12 +58,12 @@ Feature: Prune Properties rule | ("Tony Parker" :player{age: 36, name: "Tony Parker"}) | | ("Tony Parker" :player{age: 36, name: "Tony Parker"}) | And the execution plan should be: - | id | name | dependencies | operator info | - | 8 | Project | 4 | | - | 4 | AppendVertices | 3 | { "props": "[{\"tagId\": 5, \"props\": [\"_tag\"]}, {\"tagId\": 3, \"props\": [\"_tag\"]}, {\"tagId\": 4, \"props\": [\"_tag\"]}]" } | - | 3 | Traverse | 7 | { "vertexProps": "[{\"props\": [\"name\", \"age\", \"_tag\"], \"tagId\": 3}, {\"props\": [\"name\", \"speciality\", \"_tag\"], \"tagId\": 5}, {\"tagId\": 4, \"props\": [\"name\", \"_tag\"]}]", "edgeProps": "[{\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": -6}, {\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": 6}]" } | - | 7 | IndexScan | 2 | | - | 2 | Start | | | + | id | name | dependencies | operator info | + | 8 | Project | 4 | | + | 4 | AppendVertices | 3 | { "props": "[{\"props\": [\"_tag\"]}, {\"props\": [\"_tag\"]}, {\"props\": [\"_tag\"]}]" } | + | 3 | Traverse | 7 | { "vertexProps": "[{\"props\": [\"name\", \"age\", \"_tag\"]}, {\"props\": [\"name\", \"speciality\", \"_tag\"]}, {\"props\": [\"name\", \"_tag\"]}]", "edgeProps": "[{\"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"props\": [\"_dst\", \"_rank\", \"_type\"]}]" } | + | 7 | IndexScan | 2 | | + | 2 | Start | | | When profiling query: """ MATCH p = (v:player{name: "Tony Parker"})-[e:like]->(v2) @@ -75,12 +75,12 @@ Feature: Prune Properties rule | ("LaMarcus Aldridge" :player{age: 33, name: "LaMarcus Aldridge"}) | | ("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"}) | And the execution plan should be: - | id | name | dependencies | operator info | - | 8 | Project | 4 | | - | 4 | AppendVertices | 3 | { "props": "[{\"props\":[\"name\", \"age\", \"_tag\"],\"tagId\": 3}, {\"props\":[\"name\", \"speciality\", \"_tag\"], \"tagId\": 5}, {\"props\":[\"name\", \"_tag\"],\"tagId\": 4}]" } | - | 3 | Traverse | 7 | { "vertexProps": "", "edgeProps": "[{\"props\":[\"_dst\", \"_rank\", \"_type\"],\"type\": 6}]" } | - | 7 | IndexScan | 2 | | - | 2 | Start | | | + | id | name | dependencies | operator info | + | 8 | Project | 4 | | + | 4 | AppendVertices | 3 | { "props": "[{\"props\":[\"name\", \"age\", \"_tag\"]}, {\"props\":[\"name\", \"speciality\", \"_tag\"]}, {\"props\":[\"name\", \"_tag\"]}]" } | + | 3 | Traverse | 7 | { "vertexProps": "", "edgeProps": "[{\"props\":[\"_dst\", \"_rank\", \"_type\"]}]" } | + | 7 | IndexScan | 2 | | + | 2 | Start | | | # The rule will not take affect in this case because it returns the whole path When executing query: """ @@ -108,12 +108,12 @@ Feature: Prune Properties rule | "like" | | "like" | And the execution plan should be: - | id | name | dependencies | operator info | - | 8 | Project | 4 | | - | 4 | AppendVertices | 3 | { "props": "[{\"props\":[\"_tag\"],\"tagId\": 5}, {\"props\":[\"_tag\"],\"tagId\": 3}, {\"props\":[\"_tag\"],\"tagId\": 4} ]" } | - | 3 | Traverse | 7 | { "vertexProps": "", "edgeProps": "[{\"props\":[\"_dst\", \"_rank\", \"_type\"],\"type\": 6}]" } | - | 7 | IndexScan | 2 | | - | 2 | Start | | | + | id | name | dependencies | operator info | + | 8 | Project | 4 | | + | 4 | AppendVertices | 3 | { "props": "[{\"props\":[\"_tag\"]}, {\"props\":[\"_tag\"]}, {\"props\":[\"_tag\"]} ]" } | + | 3 | Traverse | 7 | { "vertexProps": "", "edgeProps": "[{\"props\":[\"_dst\", \"_rank\", \"_type\"]}]" } | + | 7 | IndexScan | 2 | | + | 2 | Start | | | When executing query: """ MATCH (v:player{name: "Tony Parker"})-[:like]-(v2)--(v3) @@ -170,21 +170,21 @@ Feature: Prune Properties rule | "Tim Duncan" | "Boris Diaw" | "Suns" | | "Tim Duncan" | "Boris Diaw" | "Tim Duncan" | And the execution plan should be: - | id | name | dependencies | operator info | - | 16 | TopN | 12 | | - | 12 | Project | 9 | | - | 9 | HashInnerJoin | 22, 23 | | - | 22 | Project | 5 | | - | 5 | AppendVertices | 4 | { "props": "[{\"props\":[\"name\"],\"tagId\": 3}]" } | - | 4 | Traverse | 2 | { "vertexProps": "[{\"props\":[\"name\"],\"tagId\": 3}]" } | - | 2 | Dedup | 1 | | - | 1 | PassThrough | 3 | | - | 3 | Start | | | - | 23 | Project | 8 | | - | 8 | AppendVertices | 7 | { "props": "[{\"tagId\": 3,\"props\":[\"name\"]}, {\"tagId\": 4,\"props\":[\"name\"]}]" } | - | 7 | Traverse | 6 | { "vertexProps": "[{\"tagId\": 3,\"props\":[\"name\"]}]", "edgeProps": "[{\"type\": -8, \"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"type\": -7, \"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": -6}, {\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": 6}, {\"type\": -7, \"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": 7}, {\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": 8}]" } | - | 6 | Argument | 0 | | - | 0 | Start | | | + | id | name | dependencies | operator info | + | 16 | TopN | 12 | | + | 12 | Project | 9 | | + | 9 | HashInnerJoin | 22, 23 | | + | 22 | Project | 5 | | + | 5 | AppendVertices | 4 | { "props": "[{\"props\":[\"name\"]}]" } | + | 4 | Traverse | 2 | { "vertexProps": "[{\"props\":[\"name\"] }]" } | + | 2 | Dedup | 1 | | + | 1 | PassThrough | 3 | | + | 3 | Start | | | + | 23 | Project | 8 | | + | 8 | AppendVertices | 7 | { "props": "[{ \"props\":[\"name\"]}, { \"props\":[\"name\"]}]" } | + | 7 | Traverse | 6 | { "vertexProps": "[{ \"props\":[\"name\"]}]", "edgeProps": "[{ \"props\": [\"_dst\", \"_rank\", \"_type\"]}, { \"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"props\": [\"_dst\", \"_rank\", \"_type\"]}, { \"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"props\": [\"_dst\", \"_rank\", \"_type\"]}]" } | + | 6 | Argument | 0 | | + | 0 | Start | | | When profiling query: """ MATCH (m)-[]-(n), (n)-[]-(l), (l)-[]-(h) WHERE id(m)=="Tim Duncan" @@ -204,26 +204,26 @@ Feature: Prune Properties rule | "Tim Duncan" | "Aron Baynes" | "Spurs" | "Aron Baynes" | | "Tim Duncan" | "Aron Baynes" | "Spurs" | "Boris Diaw" | And the execution plan should be: - | id | name | dependencies | operator info | - | 20 | TopN | 23 | | - | 23 | Project | 13 | | - | 13 | HashInnerJoin | 9, 30 | | - | 9 | HashInnerJoin | 28, 29 | | - | 28 | Project | 5 | | - | 5 | AppendVertices | 4 | { "props": "[{\"props\":[\"name\"],\"tagId\": 3}]" } | - | 4 | Traverse | 2 | { "vertexProps": "[{\"props\":[\"name\"],\"tagId\": 3}]", "edgeProps": "[{\"type\": -8, \"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"type\": 8, \"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": -6}, {\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": 6}, {\"type\": -7, \"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": 7}]" } | - | 2 | Dedup | 1 | | - | 1 | PassThrough | 3 | | - | 3 | Start | | | - | 29 | Project | 8 | | - | 8 | AppendVertices | 7 | { "props": "[{\"tagId\": 4,\"props\":[\"name\"]}]" } | - | 7 | Traverse | 6 | { "vertexProps": "[{\"tagId\": 3,\"props\":[\"name\"]}]", "edgeProps": "[{\"type\": -8, \"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"type\": 8, \"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": -6}, {\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": 6}, {\"type\": -7, \"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": 7}]" } | - | 6 | Argument | | | - | 30 | Project | 12 | | - | 12 | AppendVertices | 11 | { "props": "[{\"props\":[\"name\"],\"tagId\": 3}]" } | - | 11 | Traverse | 10 | { "vertexProps": "[{\"props\":[\"name\"],\"tagId\": 4}]", "edgeProps": "[{\"type\": -8, \"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"type\": 8, \"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": -6}, {\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": 6}, {\"type\": -7, \"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": 7}]" } | - | 10 | Argument | 0 | | - | 0 | Start | | | + | id | name | dependencies | operator info | + | 20 | TopN | 23 | | + | 23 | Project | 13 | | + | 13 | HashInnerJoin | 9, 30 | | + | 9 | HashInnerJoin | 28, 29 | | + | 28 | Project | 5 | | + | 5 | AppendVertices | 4 | { "props": "[{\"props\":[\"name\"] }]" } | + | 4 | Traverse | 2 | { "vertexProps": "[{\"props\":[\"name\"] }]", "edgeProps": "[{ \"props\": [\"_dst\", \"_rank\", \"_type\"]}, { \"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"props\": [\"_dst\", \"_rank\", \"_type\"]}, { \"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"props\": [\"_dst\", \"_rank\", \"_type\"]}]" } | + | 2 | Dedup | 1 | | + | 1 | PassThrough | 3 | | + | 3 | Start | | | + | 29 | Project | 8 | | + | 8 | AppendVertices | 7 | { "props": "[{ \"props\":[\"name\"]}]" } | + | 7 | Traverse | 6 | { "vertexProps": "[{ \"props\":[\"name\"]}]", "edgeProps": "[{ \"props\": [\"_dst\", \"_rank\", \"_type\"]}, { \"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"props\": [\"_dst\", \"_rank\", \"_type\"]}, { \"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"props\": [\"_dst\", \"_rank\", \"_type\"]}]" } | + | 6 | Argument | | | + | 30 | Project | 12 | | + | 12 | AppendVertices | 11 | { "props": "[{\"props\":[\"name\"] }]" } | + | 11 | Traverse | 10 | { "vertexProps": "[{\"props\":[\"name\"] }]", "edgeProps": "[{ \"props\": [\"_dst\", \"_rank\", \"_type\"]}, { \"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"props\": [\"_dst\", \"_rank\", \"_type\"]}, { \"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"props\": [\"_dst\", \"_rank\", \"_type\"]}]" } | + | 10 | Argument | 0 | | + | 0 | Start | | | # The schema id is not fixed in standalone cluster, so we skip it @distonly @@ -250,20 +250,20 @@ Feature: Prune Properties rule | "Tim Duncan" | "Boris Diaw" | "Suns" | | "Tim Duncan" | "Boris Diaw" | "Tim Duncan" | And the execution plan should be: - | id | name | dependencies | operator info | - | 17 | TopN | 13 | | - | 13 | Project | 12 | | - | 12 | HashInnerJoin | 19, 11 | | - | 19 | Project | 5 | | - | 5 | AppendVertices | 4 | { "props": "[{\"props\":[\"name\"],\"tagId\": 3}]" } | - | 4 | Traverse | 2 | { "vertexProps": "[{\"props\":[\"name\"],\"tagId\": 3}]" } | - | 2 | Dedup | 1 | | - | 1 | PassThrough | 3 | | - | 3 | Start | | | - | 11 | Project | 10 | | - | 10 | AppendVertices | 9 | { "props": "[{\"tagId\": 3,\"props\":[\"name\"]}, {\"tagId\": 4,\"props\":[\"name\"]}]" } | - | 9 | Traverse | 8 | { "vertexProps": "[{\"tagId\": 3,\"props\":[\"name\"]}]" } | - | 8 | Argument | | | + | id | name | dependencies | operator info | + | 17 | TopN | 13 | | + | 13 | Project | 12 | | + | 12 | HashInnerJoin | 19, 11 | | + | 19 | Project | 5 | | + | 5 | AppendVertices | 4 | { "props": "[{\"props\":[\"name\"] }]" } | + | 4 | Traverse | 2 | { "vertexProps": "[{\"props\":[\"name\"] }]" } | + | 2 | Dedup | 1 | | + | 1 | PassThrough | 3 | | + | 3 | Start | | | + | 11 | Project | 10 | | + | 10 | AppendVertices | 9 | { "props": "[{ \"props\":[\"name\"]}, { \"props\":[\"name\"]}]" } | + | 9 | Traverse | 8 | { "vertexProps": "[{ \"props\":[\"name\"]}]" } | + | 8 | Argument | | | When profiling query: """ MATCH (m)-[]-(n) WHERE id(m)=="Tim Duncan" @@ -284,25 +284,25 @@ Feature: Prune Properties rule | "Tim Duncan" | "Aron Baynes" | "Spurs" | "Aron Baynes" | | "Tim Duncan" | "Aron Baynes" | "Spurs" | "Boris Diaw" | And the execution plan should be: - | id | name | dependencies | operator info | - | 21 | TopN | 17 | | - | 17 | Project | 16 | | - | 16 | HashInnerJoin | 23, 14 | | - | 23 | Project | 5 | | - | 5 | AppendVertices | 4 | { "props": "[{\"props\":[\"name\"],\"tagId\": 3}]" } | - | 4 | Traverse | 2 | { "vertexProps": "[{\"props\":[\"name\"],\"tagId\": 3}]" } | - | 2 | Dedup | 1 | | - | 1 | PassThrough | 3 | | - | 3 | Start | | | - | 14 | HashInnerJoin | 33, 34 | | - | 33 | Project | 10 | | - | 10 | AppendVertices | 9 | { "props": "[{\"tagId\": 4,\"props\":[\"name\"]}]" } | - | 9 | Traverse | 8 | { "vertexProps": "[{\"tagId\": 3,\"props\":[\"name\"]}]" } | - | 8 | Argument | | | - | 34 | Project | 13 | | - | 13 | AppendVertices | 12 | { "props": "[{\"tagId\": 3,\"props\":[\"name\"]}]" } | - | 12 | Traverse | 11 | { "vertexProps": "[{\"tagId\": 4,\"props\":[\"name\"]}]", "edgeProps": "[{\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": -8}, {\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": 8}, {\"type\": -6, \"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": 6}, {\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": -7}, {\"props\": [\"_dst\", \"_rank\", \"_type\"], \"type\": 7}]" } | - | 11 | Argument | | | + | id | name | dependencies | operator info | + | 21 | TopN | 17 | | + | 17 | Project | 16 | | + | 16 | HashInnerJoin | 23, 14 | | + | 23 | Project | 5 | | + | 5 | AppendVertices | 4 | { "props": "[{\"props\":[\"name\"] }]" } | + | 4 | Traverse | 2 | { "vertexProps": "[{\"props\":[\"name\"] }]" } | + | 2 | Dedup | 1 | | + | 1 | PassThrough | 3 | | + | 3 | Start | | | + | 14 | HashInnerJoin | 33, 34 | | + | 33 | Project | 10 | | + | 10 | AppendVertices | 9 | { "props": "[{ \"props\":[\"name\"]}]" } | + | 9 | Traverse | 8 | { "vertexProps": "[{ \"props\":[\"name\"]}]" } | + | 8 | Argument | | | + | 34 | Project | 13 | | + | 13 | AppendVertices | 12 | { "props": "[{ \"props\":[\"name\"]}]" } | + | 12 | Traverse | 11 | { "vertexProps": "[{ \"props\":[\"name\"]}]", "edgeProps": "[{\"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"props\": [\"_dst\", \"_rank\", \"_type\"] }, { \"props\": [\"_dst\", \"_rank\", \"_type\"]}, {\"props\": [\"_dst\", \"_rank\", \"_type\"] }, {\"props\": [\"_dst\", \"_rank\", \"_type\"] }, {\"props\": [\"_dst\", \"_rank\", \"_type\"] }]" } | + | 11 | Argument | | | When profiling query: """ MATCH (v:player{name:"Tony Parker"}) @@ -315,18 +315,18 @@ Feature: Prune Properties rule | "Tim Duncan" | | "Tim Duncan" | And the execution plan should be: - | id | name | dependencies | operator info | - | 10 | Project | 11 | | - | 11 | HashInnerJoin | 14, 9 | | - | 14 | Project | 3 | | - | 3 | AppendVertices | 12 | { "props": "[{\"props\":[\"name\"],\"tagId\": 3}]" } | - | 12 | IndexScan | 2 | | - | 2 | Start | | | - | 9 | Project | 8 | | - | 8 | AppendVertices | 7 | { "props": "[{\"props\":[\"name\"],\"tagId\": 3}]" } | - | 7 | Traverse | 6 | { "vertexProps": "", "edgeProps": "[{\"type\": -8, \"props\": [\"_type\", \"_rank\", \"_dst\"]}, {\"props\": [\"_type\", \"_rank\", \"_dst\"], \"type\": -6}, {\"props\": [\"_type\", \"_rank\", \"_dst\"], \"type\": -7}]" } | - | 6 | Argument | 0 | | - | 0 | Start | | | + | id | name | dependencies | operator info | + | 10 | Project | 11 | | + | 11 | HashInnerJoin | 14, 9 | | + | 14 | Project | 3 | | + | 3 | AppendVertices | 12 | { "props": "[{\"props\":[\"name\"] }]" } | + | 12 | IndexScan | 2 | | + | 2 | Start | | | + | 9 | Project | 8 | | + | 8 | AppendVertices | 7 | { "props": "[{\"props\":[\"name\"] }]" } | + | 7 | Traverse | 6 | { "vertexProps": "", "edgeProps": "[{\"props\": [\"_type\", \"_rank\", \"_dst\"]}, {\"props\": [\"_type\", \"_rank\", \"_dst\"]}, {\"props\": [\"_type\", \"_rank\", \"_dst\"] }]"} | + | 6 | Argument | 0 | | + | 0 | Start | | | # The schema id is not fixed in standalone cluster, so we skip it @distonly @@ -351,20 +351,20 @@ Feature: Prune Properties rule | "Tim Duncan" | "Manu Ginobili" | NULL | | "Tim Duncan" | "Manu Ginobili" | NULL | And the execution plan should be: - | id | name | dependencies | operator info | - | 17 | TopN | 13 | | - | 13 | Project | 12 | | - | 12 | HashLeftJoin | 19, 11 | | - | 19 | Project | 5 | | - | 5 | AppendVertices | 4 | { "props": "[{\"props\":[\"name\"],\"tagId\": 3}]" } | - | 4 | Traverse | 2 | | - | 2 | Dedup | 1 | | - | 1 | PassThrough | 3 | | - | 3 | Start | | | - | 11 | Project | 10 | | - | 10 | AppendVertices | 9 | { "props": "[{\"props\":[\"name\", \"age\", \"_tag\"],\"tagId\": 3}, {\"props\":[\"name\", \"speciality\", \"_tag\"],\"tagId\": 5}, {\"props\":[\"name\", \"_tag\"],\"tagId\": 4}]" } | - | 9 | Traverse | 8 | { "vertexProps": "[{\"props\":[\"name\"],\"tagId\": 3}]" } | - | 8 | Argument | | | + | id | name | dependencies | operator info | + | 17 | TopN | 13 | | + | 13 | Project | 12 | | + | 12 | HashLeftJoin | 19, 11 | | + | 19 | Project | 5 | | + | 5 | AppendVertices | 4 | { "props": "[{\"props\":[\"name\"] }]" } | + | 4 | Traverse | 2 | | + | 2 | Dedup | 1 | | + | 1 | PassThrough | 3 | | + | 3 | Start | | | + | 11 | Project | 10 | | + | 10 | AppendVertices | 9 | { "props": "[{\"props\":[\"name\", \"age\", \"_tag\"] }, {\"props\":[\"name\", \"speciality\", \"_tag\"] }, {\"props\":[\"name\", \"_tag\"] }]" } | + | 9 | Traverse | 8 | { "vertexProps": "[{\"props\":[\"name\"] }]" } | + | 8 | Argument | | | When profiling query: """ MATCH (m:player{name:"Tim Duncan"})-[:like]-(n)--() @@ -391,6 +391,24 @@ Feature: Prune Properties rule @distonly Scenario: return function Given a graph with space named "nba" + When profiling query: + """ + MATCH (v1)-[:like]->(v2) + WHERE id(v1) == "Tim Duncan" + RETURN count(v2), v1 + """ + Then the result should be, in order: + | count(v2) | v1 | + | 2 | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | + And the execution plan should be: + | id | name | dependencies | operator info | + | 7 | Aggregate | 6 | | + | 6 | Project | 5 | | + | 5 | AppendVertices | 4 | { "props": "[{\"props\":[\"_tag\"] }, {\"props\":[\"_tag\"] }, {\"props\":[\"_tag\"] }]" } | + | 4 | Traverse | 2 | {"vertexProps": "[{\"props\":[\"name\", \"age\", \"_tag\"] }, {\"props\":[\"name\", \"speciality\", \"_tag\"] }, {\"props\":[\"name\", \"_tag\"] }]" , "edgeProps": "[{ \"props\": [\"_type\", \"_rank\", \"_dst\"]}]" } | + | 2 | Dedup | 1 | | + | 1 | PassThrough | 3 | | + | 3 | Start | | | When profiling query: """ MATCH (v1)-[e:like*1..5]->(v2) @@ -401,14 +419,14 @@ Feature: Prune Properties rule | count(v2.player.age) | | 24 | And the execution plan should be: - | id | name | dependencies | operator info | - | 7 | Aggregate | 6 | | - | 6 | Project | 5 | | - | 5 | AppendVertices | 4 | { "props": "[{\"props\":[\"age\"],\"tagId\": 3}]" } | - | 4 | Traverse | 2 | {"vertexProps": "", "edgeProps": "[{\"type\": 6, \"props\": [\"_type\", \"_rank\", \"_dst\"]}]" } | - | 2 | Dedup | 1 | | - | 1 | PassThrough | 3 | | - | 3 | Start | | | + | id | name | dependencies | operator info | + | 7 | Aggregate | 6 | | + | 6 | Project | 5 | | + | 5 | AppendVertices | 4 | { "props": "[{\"props\":[\"age\"] }]" } | + | 4 | Traverse | 2 | {"vertexProps": "", "edgeProps": "[{ \"props\": [\"_type\", \"_rank\", \"_dst\"]}]" } | + | 2 | Dedup | 1 | | + | 1 | PassThrough | 3 | | + | 3 | Start | | | When profiling query: """ MATCH (v1)-[e:like*1..5]->(v2) @@ -419,14 +437,14 @@ Feature: Prune Properties rule | count(v2) | | 24 | And the execution plan should be: - | id | name | dependencies | operator info | - | 7 | Aggregate | 6 | | - | 6 | Project | 5 | | - | 5 | AppendVertices | 4 | { "props": "[{\"props\":[\"_tag\", \"name\", \"speciality\"],\"tagId\": 5}, {\"props\":[\"_tag\", \"name\", \"age\"],\"tagId\": 3}, {\"props\":[\"_tag\", \"name\"],\"tagId\": 4}]" } | - | 4 | Traverse | 2 | {"vertexProps": "", "edgeProps": "[{\"type\": 6, \"props\": [\"_type\", \"_rank\", \"_dst\"]}]" } | - | 2 | Dedup | 1 | | - | 1 | PassThrough | 3 | | - | 3 | Start | | | + | id | name | dependencies | operator info | + | 7 | Aggregate | 6 | | + | 6 | Project | 5 | | + | 5 | AppendVertices | 4 | { "props": "[{\"props\":[\"_tag\"] }, {\"props\":[\"_tag\"] }, {\"props\":[\"_tag\"] }]" } | + | 4 | Traverse | 2 | {"vertexProps": "" , "edgeProps": "[{ \"props\": [\"_type\", \"_rank\", \"_dst\"]}]" } | + | 2 | Dedup | 1 | | + | 1 | PassThrough | 3 | | + | 3 | Start | | | When profiling query: """ MATCH p = (v1)-[e:like*1..5]->(v2) @@ -437,14 +455,14 @@ Feature: Prune Properties rule | length(p) | | 1 | And the execution plan should be: - | id | name | dependencies | operator info | - | 13 | Project | 11 | | - | 11 | Limit | 5 | | - | 5 | AppendVertices | 4 | { "props": "[{\"props\":[\"_tag\", \"name\", \"speciality\"],\"tagId\": 5}, {\"props\":[\"_tag\", \"name\", \"age\"],\"tagId\": 3}, {\"props\":[\"_tag\", \"name\"],\"tagId\": 4}]" } | - | 4 | Traverse | 2 | {"vertexProps": "[{\"props\":[\"name\", \"age\", \"_tag\"],\"tagId\": 3}, {\"props\":[\"name\", \"speciality\", \"_tag\"],\"tagId\": 5}, {\"props\":[\"name\", \"_tag\"],\"tagId\": 4}]", "edgeProps": "[{\"type\": 6, \"props\": [\"_type\", \"_rank\", \"_dst\", \"_src\", \"likeness\"]}]" } | - | 2 | Dedup | 1 | | - | 1 | PassThrough | 3 | | - | 3 | Start | | | + | id | name | dependencies | operator info | + | 13 | Project | 11 | | + | 11 | Limit | 5 | | + | 5 | AppendVertices | 4 | { "props": "[{\"props\":[\"_tag\", \"name\", \"speciality\"] }, {\"props\":[\"_tag\", \"name\", \"age\"] }, {\"props\":[\"_tag\", \"name\"] }]" } | + | 4 | Traverse | 2 | {"vertexProps": "[{\"props\":[\"name\", \"age\", \"_tag\"] }, {\"props\":[\"name\", \"speciality\", \"_tag\"] }, {\"props\":[\"name\", \"_tag\"] }]", "edgeProps": "[{ \"props\": [\"_type\", \"_rank\", \"_dst\", \"_src\", \"likeness\"]}]" } | + | 2 | Dedup | 1 | | + | 1 | PassThrough | 3 | | + | 3 | Start | | | When profiling query: """ MATCH p = (a:player)-[e:like*1..3]->(b:player{age:39}) @@ -464,19 +482,19 @@ Feature: Prune Properties rule | "Tracy McGrady" | 1 | | "Tracy McGrady" | 3 | And the execution plan should be: - | id | name | dependencies | operator info | - | 14 | Project | 13 | | - | 13 | HashInnerJoin | 15,12 | | - | 15 | Project | 17 | | - | 17 | AppendVertices | 16 | { "props": "[{\"props\":[\"_tag\", \"name\", \"speciality\"],\"tagId\": 5}, {\"props\":[\"_tag\", \"name\", \"age\"],\"tagId\": 3}, {\"props\":[\"_tag\", \"name\"],\"tagId\": 4}]" } | - | 16 | Traverse | 2 | {"vertexProps": "[{\"props\":[\"name\", \"age\", \"_tag\"],\"tagId\": 3}, {\"props\":[\"name\", \"speciality\", \"_tag\"],\"tagId\": 5}, {\"props\":[\"name\", \"_tag\"],\"tagId\": 4}]", "edgeProps": "[{\"type\": 6, \"props\": [\"_type\", \"_rank\", \"_dst\", \"_src\", \"likeness\"]}]" } | - | 2 | Dedup | 1 | | - | 1 | PassThrough | 3 | | - | 3 | Start | | | - | 12 | Project | 18 | | - | 18 | AppendVertices | 10 | { "props": "[{\"props\":[\"_tag\"],\"tagId\": 4}]" } | - | 10 | Traverse | 8 | {"vertexProps": "[{\"props\":[\"name\", \"age\", \"_tag\"],\"tagId\": 3}, {\"props\":[\"name\", \"speciality\", \"_tag\"],\"tagId\": 5}, {\"props\":[\"name\", \"_tag\"],\"tagId\": 4}]", "edgeProps": "[{\"type\": 7, \"props\": [\"_type\", \"_rank\", \"_dst\"]}]" } | - | 8 | Argument | | | + | id | name | dependencies | operator info | + | 14 | Project | 13 | | + | 13 | HashInnerJoin | 15,12 | | + | 15 | Project | 17 | | + | 17 | AppendVertices | 16 | { "props": "[{\"props\":[\"_tag\", \"name\", \"speciality\"] }, {\"props\":[\"_tag\", \"name\", \"age\"] }, {\"props\":[\"_tag\", \"name\"] }]" } | + | 16 | Traverse | 2 | {"vertexProps": "[{\"props\":[\"name\", \"age\", \"_tag\"] }, {\"props\":[\"name\", \"speciality\", \"_tag\"] }, {\"props\":[\"name\", \"_tag\"] }]", "edgeProps": "[{ \"props\": [\"_type\", \"_rank\", \"_dst\", \"_src\", \"likeness\"]}]" } | + | 2 | Dedup | 1 | | + | 1 | PassThrough | 3 | | + | 3 | Start | | | + | 12 | Project | 18 | | + | 18 | AppendVertices | 10 | { "props": "[{\"props\":[\"_tag\"] }]" } | + | 10 | Traverse | 8 | {"vertexProps": "[{\"props\":[\"name\", \"age\", \"_tag\"] }, {\"props\":[\"name\", \"speciality\", \"_tag\"] }, {\"props\":[\"name\", \"_tag\"] }]", "edgeProps": "[{ \"props\": [\"_type\", \"_rank\", \"_dst\"]}]" } | + | 8 | Argument | | | @distonly Scenario: union match @@ -498,19 +516,19 @@ Feature: Prune Properties rule | "LaMarcus Aldridge" | | "Steve Nash" | And the execution plan should be: - | id | name | dependencies | operator info | - | 14 | Dedup | 13 | | - | 13 | Union | 18, 19 | | - | 18 | Project | 4 | | - | 4 | AppendVertices | 20 | { "props": "[{\"props\":[\"_tag\"],\"tagId\": 5}, {\"props\":[\"_tag\"],\"tagId\": 3}, {\"props\":[\"_tag\"],\"tagId\": 4}]" } | - | 20 | Traverse | 16 | {"vertexProps": "", "edgeProps": "[{\"type\": 6, \"props\": [\"_type\", \"_rank\", \"_dst\"]}]" } | - | 16 | IndexScan | 2 | | - | 2 | Start | | | - | 19 | Project | 10 | | - | 10 | AppendVertices | 21 | { "props": "[{\"props\":[\"_tag\"],\"tagId\": 5}, {\"props\":[\"_tag\"],\"tagId\": 3}, {\"props\":[\"_tag\"],\"tagId\": 4}]" } | - | 21 | Traverse | 17 | {"vertexProps": "", "edgeProps": "[{\"type\": 6, \"props\": [\"_type\", \"_rank\", \"_dst\"]}, {\"type\": -6, \"props\": [\"_type\", \"_rank\", \"_dst\"]}]" } | - | 17 | IndexScan | 8 | | - | 8 | Start | | | + | id | name | dependencies | operator info | + | 14 | Dedup | 13 | | + | 13 | Union | 18, 19 | | + | 18 | Project | 4 | | + | 4 | AppendVertices | 20 | { "props": "[{\"props\":[\"_tag\"] }, {\"props\":[\"_tag\"] }, {\"props\":[\"_tag\"] }]" } | + | 20 | Traverse | 16 | {"vertexProps": "", "edgeProps": "[{ \"props\": [\"_type\", \"_rank\", \"_dst\"]}]" } | + | 16 | IndexScan | 2 | | + | 2 | Start | | | + | 19 | Project | 10 | | + | 10 | AppendVertices | 21 | { "props": "[{\"props\":[\"_tag\"] }, {\"props\":[\"_tag\"] }, {\"props\":[\"_tag\"] }]" } | + | 21 | Traverse | 17 | {"vertexProps": "", "edgeProps": "[{ \"props\": [\"_type\", \"_rank\", \"_dst\"]}, { \"props\": [\"_type\", \"_rank\", \"_dst\"]}]" } | + | 17 | IndexScan | 8 | | + | 8 | Start | | | @distonly Scenario: optional match @@ -530,27 +548,27 @@ Feature: Prune Properties rule | "Spurs" | 11 | | "Hornets" | 3 | And the execution plan should be: - | id | name | dependencies | operator info | - | 21 | Aggregate | 20 | | - | 20 | Aggregate | 19 | | - | 19 | HashLeftJoin | 10, 25 | | - | 10 | Aggregate | 23 | | - | 23 | Project | 22 | | - | 22 | Filter | 29 | | - | 29 | AppendVertices | 28 | { "props": "[{\"props\":[\"name\", \"_tag\"],\"tagId\": 4}]" } | - | 28 | Traverse | 27 | {"vertexProps": "[{\"props\":[\"name\", \"age\", \"_tag\"],\"tagId\": 3}, {\"props\":[\"name\", \"speciality\", \"_tag\"],\"tagId\": 5}, {\"props\":[\"name\", \"_tag\"],\"tagId\": 4}]", "edgeProps": "[{\"type\": 7, \"props\": [\"_type\", \"_rank\", \"_dst\"]}]" } | - | 27 | Traverse | 26 | {"vertexProps": "", "edgeProps": "[{\"type\": -8, \"props\": [\"_type\", \"_rank\", \"_dst\"]}]" } | - | 26 | Traverse | 2 | {"vertexProps": "", "edgeProps": "[{\"type\": -6, \"props\": [\"_type\", \"_rank\", \"_dst\"]}, {\"type\": 6, \"props\": [\"_type\", \"_rank\", \"_dst\"]}]" } | - | 2 | Dedup | 1 | | - | 1 | PassThrough | 3 | | - | 3 | Start | | | - | 25 | Project | 24 | | - | 24 | Filter | 16 | | - | 16 | AppendVertices | 15 | { "props": "[{\"props\":[\"name\", \"_tag\"],\"tagId\": 4}]" } | - | 15 | Traverse | 14 | {"vertexProps": "[{\"props\":[\"name\", \"age\", \"_tag\"],\"tagId\": 3}, {\"props\":[\"name\", \"speciality\", \"_tag\"],\"tagId\": 5}, {\"props\":[\"name\", \"_tag\"],\"tagId\": 4}]", "edgeProps": "[{\"type\": 7, \"props\": [\"_type\", \"_rank\", \"_dst\"]}]" } | - | 14 | Traverse | 13 | {"vertexProps": "", "edgeProps": "[{\"type\": -6, \"props\": [\"_type\", \"_rank\", \"_dst\"]}]" } | - | 13 | Traverse | 11 | {"vertexProps": "", "edgeProps": "[{\"type\": -6, \"props\": [\"_type\", \"_rank\", \"_dst\"]}, {\"type\": 6, \"props\": [\"_type\", \"_rank\", \"_dst\"]}]" } | - | 11 | Argument | | | + | id | name | dependencies | operator info | + | 21 | Aggregate | 20 | | + | 20 | Aggregate | 19 | | + | 19 | HashLeftJoin | 10, 25 | | + | 10 | Aggregate | 23 | | + | 23 | Project | 22 | | + | 22 | Filter | 29 | | + | 29 | AppendVertices | 28 | { "props": "[{ \"props\":[\"name\",\"age\",\"_tag\"]},{\"props\":[\"name\",\"speciality\",\"_tag\"] },{ \"props\":[\"name\",\"_tag\"]}]" } | + | 28 | Traverse | 27 | {"vertexProps": "[{\"props\":[\"age\"] }]", "edgeProps": "[{ \"props\": [\"_type\", \"_rank\", \"_dst\"]}]" } | + | 27 | Traverse | 26 | {"vertexProps": "", "edgeProps": "[{ \"props\": [\"_type\", \"_rank\", \"_dst\"]}]" } | + | 26 | Traverse | 2 | {"vertexProps": "[{ \"props\":[\"name\",\"age\",\"_tag\"]},{\"props\":[\"name\",\"speciality\",\"_tag\"] },{ \"props\":[\"name\",\"_tag\"]}]", "edgeProps": "[{ \"props\": [\"_type\", \"_rank\", \"_dst\"]}, { \"props\": [\"_type\", \"_rank\", \"_dst\"]}]" } | + | 2 | Dedup | 1 | | + | 1 | PassThrough | 3 | | + | 3 | Start | | | + | 25 | Project | 24 | | + | 24 | Filter | 16 | | + | 16 | AppendVertices | 15 | { "props": "[{ \"props\":[\"name\",\"age\",\"_tag\"]},{\"props\":[\"name\",\"speciality\",\"_tag\"] },{ \"props\":[\"name\",\"_tag\"]}]"} | + | 15 | Traverse | 14 | {"vertexProps": "[{\"props\":[\"age\"] }]", "edgeProps": "[{ \"props\": [\"_type\", \"_rank\", \"_dst\"]}]" } | + | 14 | Traverse | 13 | {"vertexProps": "", "edgeProps": "[{ \"props\": [\"_type\", \"_rank\", \"_dst\"]}]" } | + | 13 | Traverse | 11 | {"vertexProps": "[{ \"props\":[\"name\",\"age\",\"_tag\"]},{\"props\":[\"name\",\"speciality\",\"_tag\"] },{ \"props\":[\"name\",\"_tag\"]}]", "edgeProps": "[{ \"props\": [\"_type\", \"_rank\", \"_dst\"]}, { \"props\": [\"_type\", \"_rank\", \"_dst\"]}]" } | + | 11 | Argument | | | @distonly Scenario: test properties: diff --git a/tests/tck/features/schema/Comment.feature b/tests/tck/features/schema/Comment.feature index b6fef409e43..b1d4f890ff3 100644 --- a/tests/tck/features/schema/Comment.feature +++ b/tests/tck/features/schema/Comment.feature @@ -124,7 +124,7 @@ Feature: Schema Comment """ ALTER TAG test_comment_tag comment = ; ALTER TAG test_comment_tag ADD (gender string COMMENT 'The gender.'); - ALTER TAG test_comment_tag CHANGE (name string NOT NULL); + ALTER TAG test_comment_tag CHANGE (name string NOT NULL DEFAULT "jack"); ALTER TAG test_comment_tag DROP (age); """ Then the execution should be successful @@ -134,15 +134,15 @@ Feature: Schema Comment SHOW CREATE tag test_comment_tag; """ Then the result should be, in any order: - | Tag | Create Tag | - | "test_comment_tag" | 'CREATE TAG `test_comment_tag` (\n `name` string NOT NULL,\n `gender` string NULL COMMENT "The gender."\n) ttl_duration = 0, ttl_col = "", comment = ' | + | Tag | Create Tag | + | "test_comment_tag" | 'CREATE TAG `test_comment_tag` (\n `name` string NOT NULL DEFAULT \"jack\",\n `gender` string NULL COMMENT "The gender."\n) ttl_duration = 0, ttl_col = "", comment = ' | When executing query: """ DESC tag test_comment_tag; """ Then the result should be, in any order: | Field | Type | Null | Default | Comment | - | "name" | "string" | "NO" | EMPTY | EMPTY | + | "name" | "string" | "NO" | "jack" | EMPTY | | "gender" | "string" | "YES" | EMPTY | "The gender." | # tag index When executing query: @@ -188,7 +188,7 @@ Feature: Schema Comment """ ALTER EDGE test_comment_edge comment = ; ALTER EDGE test_comment_edge ADD (gender string COMMENT 'The gender.'); - ALTER EDGE test_comment_edge CHANGE (name string NOT NULL); + ALTER EDGE test_comment_edge CHANGE (name string NOT NULL DEFAULT "jack"); ALTER EDGE test_comment_edge DROP (age); """ Then the execution should be successful @@ -198,15 +198,15 @@ Feature: Schema Comment SHOW CREATE edge test_comment_edge; """ Then the result should be, in any order: - | Edge | Create Edge | - | "test_comment_edge" | 'CREATE EDGE `test_comment_edge` (\n `name` string NOT NULL,\n `gender` string NULL COMMENT "The gender."\n) ttl_duration = 0, ttl_col = "", comment = ' | + | Edge | Create Edge | + | "test_comment_edge" | 'CREATE EDGE `test_comment_edge` (\n `name` string NOT NULL DEFAULT \"jack\",\n `gender` string NULL COMMENT "The gender."\n) ttl_duration = 0, ttl_col = "", comment = ' | When executing query: """ DESC edge test_comment_edge; """ Then the result should be, in any order: | Field | Type | Null | Default | Comment | - | "name" | "string" | "NO" | EMPTY | EMPTY | + | "name" | "string" | "NO" | "jack" | EMPTY | | "gender" | "string" | "YES" | EMPTY | "The gender." | # edge index When executing query: diff --git a/tests/tck/features/schema/Schema.feature b/tests/tck/features/schema/Schema.feature index d06bc0bad9c..0f8c0bbb5d9 100644 --- a/tests/tck/features/schema/Schema.feature +++ b/tests/tck/features/schema/Schema.feature @@ -516,7 +516,7 @@ Feature: Insert string vid of vertex and edge """ ALTER TAG t CHANGE (description string NOT NULL) """ - Then the execution should be successful + Then a SemanticError should be raised at runtime: Column `description' must have a default value if it's not nullable And wait 3 seconds # insert When executing query: @@ -532,13 +532,6 @@ Feature: Insert string vid of vertex and edge Then the result should be, in any order: | t.name | t.age | t.description | | "N/A" | -1 | "some one" | - And wait 3 seconds - # insert without default prop, failed - When executing query: - """ - INSERT VERTEX t() VALUES "1":() - """ - Then a ExecutionError should be raised at runtime: Storage Error: The not null field doesn't have a default value. # test alter edge with default value When executing query: """ @@ -569,14 +562,7 @@ Feature: Insert string vid of vertex and edge """ ALTER EDGE e CHANGE (description string NOT NULL) """ - Then the execution should be successful - And wait 3 seconds - # insert without default prop, failed - When executing query: - """ - INSERT EDGE e() VALUES "1"->"2":() - """ - Then a SemanticError should be raised at runtime: The property `description' is not nullable and has no default value. + Then a SemanticError should be raised at runtime: Column `description' must have a default value if it's not nullable # test alter edge with timestamp default When executing query: """ @@ -850,6 +836,78 @@ Feature: Insert string vid of vertex and edge DROP SPACE issue2009; """ Then the execution should be successful + Then drop the used space + + Scenario: alter a tag to add an column which doesn't have a default value to not nullable + Given an empty graph + And create a space with following options: + | partition_num | 1 | + | replica_factor | 1 | + | vid_type | FIXED_STRING(20) | + When executing query: + """ + CREATE TAG person(age int); + """ + Then the execution should be successful + When executing query: + """ + CREATE TAG INDEX person_age_index ON person(age); + """ + Then the execution should be successful + And wait 3 seconds + When executing query: + """ + INSERT VERTEX person values "1":(23); + """ + Then the execution should be successful + When executing query: + """ + LOOKUP ON person YIELD properties(VERTEX) AS props; + """ + Then the result should be, in any order, with relax comparison: + | props | + | {age: 23} | + When executing query: + """ + ALTER TAG person ADD (gender bool NOT NULL); + """ + Then a SemanticError should be raised at runtime: Column `gender' must have a default value if it's not nullable + + Scenario: alter a edge to change an column which doesn't have a default value to not nullable + Given an empty graph + And create a space with following options: + | partition_num | 1 | + | replica_factor | 1 | + | vid_type | FIXED_STRING(20) | + When executing query: + """ + CREATE EDGE person(age int); + """ + Then the execution should be successful + When executing query: + """ + CREATE EDGE INDEX person_age_index ON person(age); + """ + Then the execution should be successful + And wait 3 seconds + When executing query: + """ + INSERT EDGE person values "1"->"2":(23); + """ + Then the execution should be successful + When executing query: + """ + LOOKUP ON person YIELD properties(EDGE) AS props; + """ + Then the result should be, in any order, with relax comparison: + | props | + | {age: 23} | + When executing query: + """ + ALTER EDGE person ADD (gender bool NOT NULL); + """ + Then a SemanticError should be raised at runtime: Column `gender' must have a default value if it's not nullable + Then drop the used space Scenario: Don't allow DOT in schema name Given an empty graph @@ -862,3 +920,4 @@ Feature: Insert string vid of vertex and edge CREATE TAG `tag.prop`() """ Then a SyntaxError should be raised at runtime: Don't allow DOT in label: near `.prop`()' + Then drop the used space diff --git a/tests/tck/slowquery/KillSlowQueryBaseTest.feature b/tests/tck/slowquery/KillSlowQueryBaseTest.feature new file mode 100644 index 00000000000..76badfb4601 --- /dev/null +++ b/tests/tck/slowquery/KillSlowQueryBaseTest.feature @@ -0,0 +1,101 @@ +# Copyright (c) 2022 vesoft inc. All rights reserved. +# +# This source code is licensed under Apache 2.0 License. +Feature: Slow Query Test + + Background: + Given a graph with space named "nba" + + Scenario: [slowquery_test_001] without sessionId and planId + When executing query: + """ + KILL QUERY () + """ + Then a SyntaxError should be raised at runtime: syntax error near `)' + + Scenario: [slowquery_test_002] without sessionId + When executing query: + """ + KILL QUERY (session=123) + """ + Then a SyntaxError should be raised at runtime: syntax error near `)' + + Scenario: [slowquery_test_003] without planId + When executing query: + """ + KILL QUERY (plan=987654321) + """ + Then an ExecutionError should be raised at runtime: ExecutionPlanId[987654321] does not exist in current Session. + + Scenario: [slowquery_test_004] wrong sessionId and planId + When executing query: + """ + KILL QUERY (session=987654321, plan=987654321) + """ + Then an ExecutionError should be raised at runtime: SessionId[987654321] does not exist + + Scenario: [slowquery_test_005] sessionId is STRING + When executing query: + """ + KILL QUERY (session="100", plan=101) + """ + Then a SyntaxError should be raised at runtime: syntax error near `", plan=' + + Scenario: [slowquery_test_006] planId is STRING + When executing query: + """ + KILL QUERY (session=100, plan="101") + """ + Then a SyntaxError should be raised at runtime: syntax error near `")' + + Scenario: [slowquery_test_007] sessionId and planId are STRING + When executing query: + """ + KILL QUERY (session="100", plan="101") + """ + Then a SyntaxError should be raised at runtime: syntax error near `", plan=' + + Scenario: [slowquery_test_008] wrong sessionId + When executing query: + """ + KILL QUERY (session=$-.sid) + """ + Then an SyntaxError should be raised at runtime: syntax error near `)' + + Scenario: [slowquery_test_009] wrong planId + When executing query: + """ + KILL QUERY (plan=$-.eid) + """ + Then an SemanticError should be raised at runtime: `$-.eid', not exist prop `eid' + + Scenario: [slowquery_test_010] wrong sessionId and planId + When executing query: + """ + KILL QUERY (session=$-.sid, plan=$-.eid) + """ + Then an SemanticError should be raised at runtime: `$-.sid', not exist prop `sid' + + Scenario: [slowquery_test_011] show queries + When executing query: + """ + SHOW LOCAL QUERIES + """ + Then the execution should be successful + + Scenario: [slowquery_test_012] show queries + When executing query: + """ + SHOW QUERIES + """ + Then the execution should be successful + + Scenario: [slowquery_test_013] sessionId is string + When executing query via graph 1: + """ + SHOW QUERIES + | YIELD $-.SessionID AS sid, $-.`Query` AS eid, $-.DurationInUSec AS dur WHERE $-.DurationInUSec > 10000000 + | ORDER BY $-.dur + | KILL QUERY (session=$-.sid, plan=$-.eid) + """ + Then an SemanticError should be raised at runtime: $-.eid, Session ID must be an integer but was STRING diff --git a/tests/tck/slowquery/KillSlowQueryViaDiffrentService.feature b/tests/tck/slowquery/KillSlowQueryViaDiffrentService.feature index 63a6fe57642..bd049e943bf 100644 --- a/tests/tck/slowquery/KillSlowQueryViaDiffrentService.feature +++ b/tests/tck/slowquery/KillSlowQueryViaDiffrentService.feature @@ -3,36 +3,26 @@ # This source code is licensed under Apache 2.0 License. Feature: Slow Query Test + Background: + Given a graph with space named "nba" + # There should be a least 2 thread to run this test case suite. - Scenario: Set up slow query at first graph service + Scenario: [slowquery_test_101] Setup slow query # Set up a slow query which will be killed later. - Given a graph with space named "nba" When executing query via graph 0: """ GO 100000 STEPS FROM "Tim Duncan" OVER like YIELD like._dst """ Then an ExecutionError should be raised at runtime: Execution had been killed - Scenario: Show all queries and kill all slow queries at second graph service - When executing query via graph 1: - """ - SHOW LOCAL QUERIES - """ - Then the execution should be successful + Scenario: [slowquery_test_102] Kill go sentence When executing query via graph 1: """ SHOW QUERIES """ Then the execution should be successful - # In case that rebuild indexes cost too much time. + # make sure the record exists And wait 10 seconds - When executing query via graph 1: - """ - SHOW QUERIES - """ - Then the result should be, in order: - | SessionID | ExecutionPlanID | User | Host | StartTime | DurationInUSec | Status | Query | - | /\d+/ | /\d+/ | "root" | /.*/ | /.*/ | /\d+/ | "RUNNING" | "GO 100000 STEPS FROM \"Tim Duncan\" OVER like YIELD like._dst" | When executing query via graph 1: """ SHOW QUERIES @@ -42,49 +32,32 @@ Feature: Slow Query Test Then the result should be, in order: | sid | eid | dur | | /\d+/ | /\d+/ | /\d+/ | + # sessionId not exist When executing query via graph 1: """ - KILL QUERY () - """ - Then a SyntaxError should be raised at runtime: syntax error near `)' - When executing query via graph 1: - """ - KILL QUERY (session=123) - """ - Then a SyntaxError should be raised at runtime: syntax error near `)' - When executing query via graph 1: - """ - KILL QUERY (plan=987654321) - """ - Then an ExecutionError should be raised at runtime: ExecutionPlanId[987654321] does not exist in current Session. - When executing query via graph 1: - """ - KILL QUERY (session=987654321, plan=987654321) - """ - Then an ExecutionError should be raised at runtime: SessionId[987654321] does not exist - When executing query via graph 1: - """ - KILL QUERY (session=$-.sid, plan=$-.eid) - """ - Then an SemanticError should be raised at runtime: `$-.sid', not exist prop `sid' - When executing query via graph 1: - """ - KILL QUERY (plan=$-.eid) + SHOW QUERIES + | YIELD $-.SessionID AS sid, $-.ExecutionPlanID AS eid, $-.DurationInUSec AS dur + WHERE $-.DurationInUSec > 1000000 AND $-.`Query` CONTAINS "GO 100000 STEPS" + | ORDER BY $-.dur + | KILL QUERY(session=200, plan=$-.eid) """ - Then an SemanticError should be raised at runtime: `$-.eid', not exist prop `eid' + Then an ExecutionError should be raised at runtime: SessionId[200] does not exist + # planId not exist When executing query via graph 1: """ SHOW QUERIES - | YIELD $-.SessionID AS sid, $-.`Query` AS eid, $-.DurationInUSec AS dur WHERE $-.DurationInUSec > 10000000 + | YIELD $-.SessionID AS sid, $-.ExecutionPlanID AS eid, $-.DurationInUSec AS dur + WHERE $-.DurationInUSec > 1000000 AND $-.`Query` CONTAINS "GO 100000 STEPS" | ORDER BY $-.dur - | KILL QUERY (session=$-.sid, plan=$-.eid) + | KILL QUERY(session=$-.sid, plan=201) """ - Then an SemanticError should be raised at runtime: $-.eid, Session ID must be an integer but was STRING + Then an ExecutionError should be raised at runtime. + # Kill go sentence When executing query via graph 1: """ SHOW QUERIES | YIELD $-.SessionID AS sid, $-.ExecutionPlanID AS eid, $-.DurationInUSec AS dur - WHERE $-.DurationInUSec > 1000000 AND $-.`Query` CONTAINS "GO" + WHERE $-.DurationInUSec > 1000000 AND $-.`Query` CONTAINS "GO 100000 STEPS" | ORDER BY $-.dur | KILL QUERY(session=$-.sid, plan=$-.eid) """ diff --git a/tests/tck/slowquery/KillSlowQueryViaSameService.feature b/tests/tck/slowquery/KillSlowQueryViaSameService.feature index 55e70185f60..b911704d0e3 100644 --- a/tests/tck/slowquery/KillSlowQueryViaSameService.feature +++ b/tests/tck/slowquery/KillSlowQueryViaSameService.feature @@ -3,36 +3,26 @@ # This source code is licensed under Apache 2.0 License. Feature: Slow Query Test + Background: + Given a graph with space named "nba" + # There should be a least 2 thread to run this test case suite. - Scenario: Set up slow query + Scenario: [slowquery_test_201] Setup slow query # Set up a slow query which will be killed later. - Given a graph with space named "nba" When executing query: """ GO 100000 STEPS FROM "Tim Duncan" OVER like YIELD like._dst """ Then an ExecutionError should be raised at runtime: Execution had been killed - Scenario: Show all queries and kill all slow queries - When executing query: - """ - SHOW LOCAL QUERIES - """ - Then the execution should be successful + Scenario: [slowquery_test_202] kill go sentence on same service When executing query: """ SHOW QUERIES """ Then the execution should be successful - # In case that rebuild indexes cost too much time. And wait 10 seconds - When executing query: - """ - SHOW QUERIES - """ - Then the result should be, in order: - | SessionID | ExecutionPlanID | User | Host | StartTime | DurationInUSec | Status | Query | - | /\d+/ | /\d+/ | "root" | /.*/ | /.*/ | /\d+/ | "RUNNING" | "GO 100000 STEPS FROM \"Tim Duncan\" OVER like YIELD like._dst" | + # make sure the record exists When executing query: """ SHOW QUERIES @@ -42,49 +32,11 @@ Feature: Slow Query Test Then the result should be, in order: | sid | eid | dur | | /\d+/ | /\d+/ | /\d+/ | - When executing query: - """ - KILL QUERY () - """ - Then a SyntaxError should be raised at runtime: syntax error near `)' - When executing query: - """ - KILL QUERY (session=123) - """ - Then a SyntaxError should be raised at runtime: syntax error near `)' - When executing query: - """ - KILL QUERY (plan=987654321) - """ - Then an ExecutionError should be raised at runtime: ExecutionPlanId[987654321] does not exist in current Session. - When executing query: - """ - KILL QUERY (session=987654321, plan=987654321) - """ - Then an ExecutionError should be raised at runtime: SessionId[987654321] does not exist - When executing query: - """ - KILL QUERY (session=$-.sid, plan=$-.eid) - """ - Then an SemanticError should be raised at runtime: `$-.sid', not exist prop `sid' - When executing query: - """ - KILL QUERY (plan=$-.eid) - """ - Then an SemanticError should be raised at runtime: `$-.eid', not exist prop `eid' - When executing query: - """ - SHOW QUERIES - | YIELD $-.SessionID AS sid, $-.`Query` AS eid, $-.DurationInUSec AS dur WHERE $-.DurationInUSec > 10000000 - | ORDER BY $-.dur - | KILL QUERY (session=$-.sid, plan=$-.eid) - """ - Then an SemanticError should be raised at runtime: $-.eid, Session ID must be an integer but was STRING When executing query: """ SHOW QUERIES | YIELD $-.SessionID AS sid, $-.ExecutionPlanID AS eid, $-.DurationInUSec AS dur - WHERE $-.DurationInUSec > 1000000 AND $-.`Query` CONTAINS "GO" + WHERE $-.DurationInUSec > 1000000 AND $-.`Query` CONTAINS "GO 100000 STEPS" | ORDER BY $-.dur | KILL QUERY(session=$-.sid, plan=$-.eid) """ diff --git a/tests/tck/slowquery/PermissionViaDifferentService.feature b/tests/tck/slowquery/PermissionViaDifferentService.feature new file mode 100644 index 00000000000..de27708c250 --- /dev/null +++ b/tests/tck/slowquery/PermissionViaDifferentService.feature @@ -0,0 +1,146 @@ +# Copyright (c) 2021 vesoft inc. All rights reserved. +# +# This source code is licensed under Apache 2.0 License. +Feature: Test kill queries permission from different services + + Background: + Given a graph with space named "nba" + + Scenario: [slowquery_test_301] Setup slow query by user root + # Set up a slow query which will be killed later. + When executing query via graph 1: + """ + USE nba; + GO 100001 STEPS FROM "Tim Duncan" OVER like YIELD like._dst + """ + Then an ExecutionError should be raised at runtime: Execution had been killed + + Scenario: [slowquery_test_302] Kill successful by user root + When executing query: + """ + CREATE USER IF NOT EXISTS test_permission WITH PASSWORD 'test'; + GRANT ROLE USER ON nba TO test_permission; + """ + Then the execution should be successful + And wait 10 seconds + # Make sure the record exists + When executing query with user test_permission with password test: + """ + USE nba; + SHOW QUERIES + | YIELD $-.SessionID AS sid, $-.ExecutionPlanID AS eid, $-.DurationInUSec AS dur + WHERE $-.DurationInUSec > 1000000 AND $-.`Query` CONTAINS "GO 100001 STEPS"; + """ + Then the result should be, in order: + | sid | eid | dur | + | /\d+/ | /\d+/ | /\d+/ | + # Kill failed by user test_permission + When executing query with user test_permission with password test: + """ + USE nba; + SHOW QUERIES + | YIELD $-.SessionID AS sid, $-.ExecutionPlanID AS eid, $-.DurationInUSec AS dur + WHERE $-.DurationInUSec > 1000000 AND $-.`Query` CONTAINS "GO 100001 STEPS" + | ORDER BY $-.dur + | KILL QUERY(session=$-.sid, plan=$-.eid) + """ + Then an PermissionError should be raised at runtime: Only GOD role could kill others' queries. + # Kill successful by user root + When executing query with user root with password nebula: + """ + USE nba; + SHOW QUERIES + | YIELD $-.SessionID AS sid, $-.ExecutionPlanID AS eid, $-.DurationInUSec AS dur + WHERE $-.DurationInUSec > 1000000 AND $-.`Query` CONTAINS "GO 100001 STEPS" + | ORDER BY $-.dur + | KILL QUERY(session=$-.sid, plan=$-.eid) + """ + Then the execution should be successful + + Scenario: [slowquery_test_303] Setup slow query by user test_permission + When executing query: + """ + CREATE USER IF NOT EXISTS test_permission WITH PASSWORD 'test'; + GRANT ROLE USER ON nba TO test_permission; + """ + Then the execution should be successful + And wait 5 seconds + When executing query with user test_permission with password test: + """ + USE nba; + GO 100002 STEPS FROM "Tim Duncan" OVER like YIELD like._dst + """ + Then an ExecutionError should be raised at runtime: Execution had been killed + + Scenario: [slowquery_test_304] Kill successful by user test_permission + When executing query: + """ + SHOW QUERIES + """ + Then the execution should be successful + And wait 15 seconds + # Make sure the record exists + When executing query with user test_permission with password test: + """ + USE nba; + SHOW QUERIES + | YIELD $-.SessionID AS sid, $-.ExecutionPlanID AS eid, $-.DurationInUSec AS dur + WHERE $-.DurationInUSec > 1000000 AND $-.`Query` CONTAINS "GO 100002 STEPS"; + """ + Then the result should be, in order: + | sid | eid | dur | + | /\d+/ | /\d+/ | /\d+/ | + When executing query with user test_permission with password test: + """ + USE nba; + SHOW QUERIES + | YIELD $-.SessionID AS sid, $-.ExecutionPlanID AS eid, $-.DurationInUSec AS dur + WHERE $-.DurationInUSec > 1000000 AND $-.`Query` CONTAINS "GO 100002 STEPS" + | ORDER BY $-.dur + | KILL QUERY(session=$-.sid, plan=$-.eid) + """ + Then the execution should be successful + + Scenario: [slowquery_test_305] Setup slow query by user test_permission + When executing query: + """ + CREATE USER IF NOT EXISTS test_permission WITH PASSWORD 'test'; + GRANT ROLE USER ON nba TO test_permission; + """ + Then the execution should be successful + And wait 5 seconds + When executing query with user test_permission with password test: + """ + USE nba; + GO 100003 STEPS FROM "Tim Duncan" OVER like YIELD like._dst + """ + Then an ExecutionError should be raised at runtime: Execution had been killed + + Scenario: [slowquery_test_306] Kill successful by user root + When executing query: + """ + SHOW QUERIES + """ + Then the execution should be successful + And wait 15 seconds + # Make sure the record exists + When executing query with user root with password nebula: + """ + USE nba; + SHOW QUERIES + | YIELD $-.SessionID AS sid, $-.ExecutionPlanID AS eid, $-.DurationInUSec AS dur + WHERE $-.DurationInUSec > 1000000 AND $-.`Query` CONTAINS "GO 100003 STEPS"; + """ + Then the result should be, in order: + | sid | eid | dur | + | /\d+/ | /\d+/ | /\d+/ | + When executing query with user root with password nebula: + """ + USE nba; + SHOW QUERIES + | YIELD $-.SessionID AS sid, $-.ExecutionPlanID AS eid, $-.DurationInUSec AS dur + WHERE $-.DurationInUSec > 1000000 AND $-.`Query` CONTAINS "GO 100003 STEPS" + | ORDER BY $-.dur + | KILL QUERY(session=$-.sid, plan=$-.eid) + """ + Then the execution should be successful diff --git a/tests/tck/slowquery/PermissionViaSameService.feature b/tests/tck/slowquery/PermissionViaSameService.feature new file mode 100644 index 00000000000..a09e816df51 --- /dev/null +++ b/tests/tck/slowquery/PermissionViaSameService.feature @@ -0,0 +1,99 @@ +# Copyright (c) 2021 vesoft inc. All rights reserved. +# +# This source code is licensed under Apache 2.0 License. +Feature: Test kill queries from same service + + Background: + Given a graph with space named "nba" + + Scenario: [slowquery_test_401] Setup slow query + # Set up a slow query which will be killed later. + When executing query: + """ + GO 100001 STEPS FROM "Tim Duncan" OVER like YIELD like._dst + """ + Then an ExecutionError should be raised at runtime: Execution had been killed + + Scenario: [slowquery_test_402] Kill successful by user root + When executing query: + """ + CREATE USER IF NOT EXISTS test_permission WITH PASSWORD 'test'; + GRANT ROLE USER ON nba TO test_permission; + """ + Then the execution should be successful + And wait 10 seconds + # Make sure the record exists + When executing query with user test_permission with password test: + """ + USE nba; + SHOW QUERIES + | YIELD $-.SessionID AS sid, $-.ExecutionPlanID AS eid, $-.DurationInUSec AS dur + WHERE $-.DurationInUSec > 1000000 AND $-.`Query` CONTAINS "GO 100001 STEPS"; + """ + Then the result should be, in order: + | sid | eid | dur | + | /\d+/ | /\d+/ | /\d+/ | + When executing query with user test_permission with password test: + """ + USE nba; + SHOW QUERIES + | YIELD $-.SessionID AS sid, $-.ExecutionPlanID AS eid, $-.DurationInUSec AS dur + WHERE $-.DurationInUSec > 1000000 AND $-.`Query` CONTAINS "GO 100001 STEPS" + | ORDER BY $-.dur + | KILL QUERY(session=$-.sid, plan=$-.eid) + """ + Then an PermissionError should be raised at runtime: Only GOD role could kill others' queries. + When executing query with user root with password nebula: + """ + USE nba; + SHOW QUERIES + | YIELD $-.SessionID AS sid, $-.ExecutionPlanID AS eid, $-.DurationInUSec AS dur + WHERE $-.DurationInUSec > 1000000 AND $-.`Query` CONTAINS "GO 100001 STEPS" + | ORDER BY $-.dur + | KILL QUERY(session=$-.sid, plan=$-.eid) + """ + Then the execution should be successful + + Scenario: [slowquery_test_403] Setup slow query + When executing query: + """ + CREATE USER IF NOT EXISTS test_permission WITH PASSWORD 'test'; + GRANT ROLE USER ON nba TO test_permission; + """ + Then the execution should be successful + And wait 5 seconds + When executing query with user test_permission with password test: + """ + USE nba; + GO 100002 STEPS FROM "Tim Duncan" OVER like YIELD like._dst + """ + Then an ExecutionError should be raised at runtime: Execution had been killed + + Scenario: [slowquery_test_404] Kill successful by user test_permission + When executing query: + """ + SHOW QUERIES + """ + Then the execution should be successful + # Make sure the record exists + And wait 15 seconds + When executing query with user test_permission with password test: + """ + USE nba; + SHOW QUERIES + | YIELD $-.SessionID AS sid, $-.ExecutionPlanID AS eid, $-.DurationInUSec AS dur + WHERE $-.DurationInUSec > 1000000 AND $-.`Query` CONTAINS "GO 100002 STEPS"; + """ + Then the result should be, in order: + | sid | eid | dur | + | /\d+/ | /\d+/ | /\d+/ | + When executing query with user test_permission with password test: + """ + USE nba; + SHOW QUERIES + | YIELD $-.SessionID AS sid, $-.ExecutionPlanID AS eid, $-.DurationInUSec AS dur + WHERE $-.DurationInUSec > 1000000 AND $-.`Query` CONTAINS "GO 100002 STEPS" + | ORDER BY $-.dur + | KILL QUERY(session=$-.sid, plan=$-.eid) + """ + Then the execution should be successful diff --git a/tests/tck/slowquery/permissionViaDifferentService.feature b/tests/tck/slowquery/permissionViaDifferentService.feature deleted file mode 100644 index 9dd4e23b073..00000000000 --- a/tests/tck/slowquery/permissionViaDifferentService.feature +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (c) 2022 vesoft inc. All rights reserved. -# -# This source code is licensed under Apache 2.0 License. -Feature: Test kill queries permission from different services - - Scenario: Setup slow query in service 1 - # Set up a slow query which will be killed later. - Given a graph with space named "nba" - When executing query via graph 1: - """ - USE nba; - GO 100000 STEPS FROM "Tim Duncan" OVER like YIELD like._dst - """ - Then an ExecutionError should be raised at runtime: Execution had been killed - - Scenario: Test permisson of kill queries from service 0 - Given a graph with space named "nba" - When executing query: - """ - CREATE USER IF NOT EXISTS test_permission WITH PASSWORD 'test'; - GRANT ROLE USER ON nba TO test_permission; - """ - Then the execution should be successful - And wait 3 seconds - When executing query with user test_permission with password test: - """ - USE nba; - SHOW QUERIES - | YIELD $-.SessionID AS sid, $-.ExecutionPlanID AS eid, $-.DurationInUSec AS dur - WHERE $-.DurationInUSec > 1000000 AND $-.`Query` CONTAINS "GO" - | ORDER BY $-.dur - | KILL QUERY(session=$-.sid, plan=$-.eid) - """ - Then an PermissionError should be raised at runtime: Only GOD role could kill others' queries. - When executing query with user root with password nebula: - """ - USE nba; - SHOW QUERIES - | YIELD $-.SessionID AS sid, $-.ExecutionPlanID AS eid, $-.DurationInUSec AS dur - WHERE $-.DurationInUSec > 1000000 AND $-.`Query` CONTAINS "GO" - | ORDER BY $-.dur - | KILL QUERY(session=$-.sid, plan=$-.eid) - """ - Then the execution should be successful diff --git a/tests/tck/slowquery/permissionViaSameService.feature b/tests/tck/slowquery/permissionViaSameService.feature deleted file mode 100644 index 61043e82385..00000000000 --- a/tests/tck/slowquery/permissionViaSameService.feature +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (c) 2022 vesoft inc. All rights reserved. -# -# This source code is licensed under Apache 2.0 License. -Feature: Test kill queries from same service - - Scenario: Setup slow query - # Set up a slow query which will be killed later. - Given a graph with space named "nba" - When executing query: - """ - GO 100000 STEPS FROM "Tim Duncan" OVER like YIELD like._dst - """ - Then an ExecutionError should be raised at runtime: Execution had been killed - - Scenario: Test permisson of kill queries - Given a graph with space named "nba" - When executing query: - """ - CREATE USER IF NOT EXISTS test_permission WITH PASSWORD 'test'; - GRANT ROLE USER ON nba TO test_permission; - """ - Then the execution should be successful - And wait 3 seconds - When executing query with user test_permission with password test: - """ - USE nba; - SHOW QUERIES - | YIELD $-.SessionID AS sid, $-.ExecutionPlanID AS eid, $-.DurationInUSec AS dur - WHERE $-.DurationInUSec > 1000000 AND $-.`Query` CONTAINS "GO" - | ORDER BY $-.dur - | KILL QUERY(session=$-.sid, plan=$-.eid) - """ - Then an PermissionError should be raised at runtime: Only GOD role could kill others' queries. - When executing query with user root with password nebula: - """ - USE nba; - SHOW QUERIES - | YIELD $-.SessionID AS sid, $-.ExecutionPlanID AS eid, $-.DurationInUSec AS dur - WHERE $-.DurationInUSec > 1000000 AND $-.`Query` CONTAINS "GO" - | ORDER BY $-.dur - | KILL QUERY(session=$-.sid, plan=$-.eid) - """ - Then the execution should be successful diff --git a/tests/tck/steps/test_kill_permission_via_different_service.py b/tests/tck/steps/test_kill_permission_via_different_service.py index bbbb0fd842c..c20146ea4d9 100644 --- a/tests/tck/steps/test_kill_permission_via_different_service.py +++ b/tests/tck/steps/test_kill_permission_via_different_service.py @@ -4,4 +4,4 @@ from pytest_bdd import scenarios -scenarios('slowquery/permissionViaDifferentService.feature') +scenarios('slowquery/PermissionViaDifferentService.feature') diff --git a/tests/tck/steps/test_kill_permission_via_same_service.py b/tests/tck/steps/test_kill_permission_via_same_service.py index 0eb0b764eb5..e160535c8f3 100644 --- a/tests/tck/steps/test_kill_permission_via_same_service.py +++ b/tests/tck/steps/test_kill_permission_via_same_service.py @@ -4,4 +4,4 @@ from pytest_bdd import scenarios -scenarios('slowquery/permissionViaSameService.feature') +scenarios('slowquery/PermissionViaSameService.feature') diff --git a/tests/tck/steps/test_kill_slow_query_base_test.py b/tests/tck/steps/test_kill_slow_query_base_test.py new file mode 100644 index 00000000000..ead91174493 --- /dev/null +++ b/tests/tck/steps/test_kill_slow_query_base_test.py @@ -0,0 +1,7 @@ +# Copyright (c) 2022 vesoft inc. All rights reserved. +# +# This source code is licensed under Apache 2.0 License. + +from pytest_bdd import scenarios + +scenarios('slowquery/KillSlowQueryBaseTest.feature')