Skip to content

Commit

Permalink
minor fix
Browse files Browse the repository at this point in the history
  • Loading branch information
scwf committed Dec 30, 2014
1 parent 9bf12f8 commit 83b6fc3
Show file tree
Hide file tree
Showing 4 changed files with 19 additions and 11 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,7 @@ private[sql] case class JSONRelation(

private def baseRDD = sqlContext.sparkContext.textFile(fileName)

override val schema =
userSpecifiedSchema.getOrElse(
override val schema = userSpecifiedSchema.getOrElse(
JsonRDD.inferSchema(
baseRDD,
samplingRatio,
Expand Down
21 changes: 16 additions & 5 deletions sql/core/src/main/scala/org/apache/spark/sql/sources/ddl.scala
Original file line number Diff line number Diff line change
Expand Up @@ -100,9 +100,15 @@ private[sql] class DDLParser extends StandardTokenParsers with PackratParsers wi
protected lazy val pair: Parser[(String, String)] = ident ~ stringLit ^^ { case k ~ v => (k,v) }

protected lazy val column: Parser[StructField] =
ident ~ ident ^^ { case name ~ typ =>
( ident ~ ident ^^ { case name ~ typ =>
StructField(name, metastoreTypes.toDataType(typ))
}
|
ident ~ ("decimal" ~ "(" ~> numericLit) ~ ("," ~> numericLit <~ ")") ^^ {
case name ~ precision ~ scale =>
StructField(name, DecimalType(precision.toInt, scale.toInt))
}
)
}

/**
Expand All @@ -121,8 +127,8 @@ private[sql] class MetastoreTypes extends RegexParsers {
"bigint" ^^^ LongType |
"binary" ^^^ BinaryType |
"boolean" ^^^ BooleanType |
fixedDecimalType | // Hive 0.13+ decimal with precision/scale
"decimal" ^^^ DecimalType.Unlimited | // Hive 0.12 decimal with no precision/scale
fixedDecimalType | // decimal with precision/scale
"decimal" ^^^ DecimalType.Unlimited | // decimal with no precision/scale
"date" ^^^ DateType |
"timestamp" ^^^ TimestampType |
"varchar\\((\\d+)\\)".r ^^^ StringType
Expand Down Expand Up @@ -204,8 +210,13 @@ private[sql] case class CreateTableUsing(
}
val dataSource =
clazz.newInstance().asInstanceOf[org.apache.spark.sql.sources.SchemaRelationProvider]
val relation = dataSource.createRelation(
sqlContext, new CaseInsensitiveMap(options), Some(StructType(tableCols)))
val relation = if(tableCols.isEmpty) {
dataSource.createRelation(
sqlContext, new CaseInsensitiveMap(options))
} else {
dataSource.createRelation(
sqlContext, new CaseInsensitiveMap(options), Some(StructType(tableCols)))
}

sqlContext.baseRelationToSchemaRDD(relation).registerTempTable(tableName)
Seq.empty
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,7 @@ trait RelationProvider {
* Note: the parameters' keywords are case insensitive and this insensitivity is enforced
* by the Map that is passed to the function.
*/
def createRelation(
sqlContext: SQLContext,
parameters: Map[String, String]): BaseRelation
def createRelation(sqlContext: SQLContext, parameters: Map[String, String]): BaseRelation
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ class QueryTest extends PlanTest {
""".stripMargin)
}

if (prepareAnswer(convertedAnswer) != prepareAnswer(sparkAnswer)) { // issues here, sparkAnswer may be GenericRow[]
if (prepareAnswer(convertedAnswer) != prepareAnswer(sparkAnswer)) {
fail(s"""
|Results do not match for query:
|${rdd.logicalPlan}
Expand Down

0 comments on commit 83b6fc3

Please sign in to comment.