Skip to content
This repository has been archived by the owner on Oct 18, 2021. It is now read-only.

Commit

Permalink
Merge pull request #123 from Nicole00/v2.5.0
Browse files Browse the repository at this point in the history
fix numeric check & update client version
  • Loading branch information
Nicole00 authored Aug 19, 2021
2 parents 6a1a0e1 + c86f1a7 commit 3c0f4c6
Show file tree
Hide file tree
Showing 4 changed files with 13 additions and 26 deletions.
2 changes: 1 addition & 1 deletion nebula-exchange/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
<scala-logging.version>3.9.2</scala-logging.version>
<scala-xml.version>2.11.0-M4</scala-xml.version>
<scopt.version>3.7.1</scopt.version>
<nebula.version>2.0.0-SNAPSHOT</nebula.version>
<nebula.version>2.5.0</nebula.version>
<s2.version>1.0.0</s2.version>
<neo.version>2.4.5-M1</neo.version>
<gremlin.version>3.4.6</gremlin.version>
Expand Down
2 changes: 1 addition & 1 deletion nebula-spark-connector/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

<properties>
<spark.version>2.4.4</spark.version>
<nebula.version>2.0.0-SNAPSHOT</nebula.version>
<nebula.version>2.5.0</nebula.version>
<compiler.source.version>1.8</compiler.source.version>
<compiler.target.version>1.8</compiler.target.version>
<scalatest.version>3.2.3</scalatest.version>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,11 @@ object NebulaUtils {
}

def isNumic(str: String): Boolean = {
for (char <- str.toCharArray) {
val newStr: String = if (str.startsWith("-")) {
str.substring(1)
} else { str }

for (char <- newStr.toCharArray) {
if (!Character.isDigit(char)) return false
}
true
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -155,18 +155,10 @@ package object connector {
implicit val encoder: Encoder[NebulaGraphxVertex] =
Encoders.bean[NebulaGraphxVertex](classOf[NebulaGraphxVertex])

val fields = vertexDataset.schema.fields
vertexDataset
.map(row => {
val vertexId = row.get(0)
val vid: Long = if (row.schema.fields(0).dataType == LongType) {
vertexId.toString.toLong
} else {
MurmurHash2.hash64(vertexId.toString.getBytes(),
vertexId.toString.getBytes().length,
0xc70f6907)
}

val vertexId = row.get(0)
val vid: Long = vertexId.toString.toLong
val props: ListBuffer[Any] = ListBuffer()
for (i <- row.schema.fields.indices) {
if (i != 0) {
Expand All @@ -187,7 +179,6 @@ package object connector {
implicit val encoder: Encoder[NebulaGraphxEdge] =
Encoders.bean[NebulaGraphxEdge](classOf[NebulaGraphxEdge])

val fields = edgeDataset.schema.fields
edgeDataset
.map(row => {
val props: ListBuffer[Any] = ListBuffer()
Expand All @@ -196,18 +187,10 @@ package object connector {
props.append(row.get(i))
}
}
val srcId = row.get(0).toString.getBytes()
val dstId = row.get(1).toString.getBytes()
val edgeSrc = if (row.schema.fields(0).dataType == LongType) {
srcId.toString.toLong
} else {
MurmurHash2.hash64(srcId, srcId.length, 0xc70f6907)
}
val edgeDst = if (row.schema.fields(0).dataType == LongType) {
dstId.toString.toLong
} else {
MurmurHash2.hash64(dstId, dstId.length, 0xc70f6907)
}
val srcId = row.get(0)
val dstId = row.get(1)
val edgeSrc = srcId.toString.toLong
val edgeDst = dstId.toString.toLong

val edgeProp = (row.get(2).toString.toLong, props.toList)
org.apache.spark.graphx
Expand Down

0 comments on commit 3c0f4c6

Please sign in to comment.