From 673168ee46c020dd894758a13560e7e60eea8ed8 Mon Sep 17 00:00:00 2001 From: Herman van Hovell Date: Wed, 8 Feb 2017 16:05:15 +0100 Subject: [PATCH] Code Review. --- .../org/apache/spark/sql/types/package.scala | 2 +- .../org/apache/spark/sql/hive/hiveUDFs.scala | 3 --- .../spark/sql/hive/orc/OrcSourceSuite.scala | 19 +++++-------------- 3 files changed, 6 insertions(+), 18 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/package.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/package.scala index 1f3de962781f9..f29cbc2069e39 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/package.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/package.scala @@ -23,7 +23,7 @@ package org.apache.spark.sql */ package object types { /** - * Metadata key used to store the the raw hive type string in the metadata of StructField. This + * Metadata key used to store the raw hive type string in the metadata of StructField. This * is relevant for datatypes that do not have a direct Spark SQL counterpart, such as CHAR and * VARCHAR. We need to preserve the original type in order to invoke the correct object * inspector in Hive. diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala index c830edf60bee8..4590197548104 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala @@ -51,9 +51,6 @@ private[hive] case class HiveSimpleUDF( @transient lazy val function = funcWrapper.createFunction[UDF]() - { - function - } @transient private lazy val method = function.getResolver.getEvalMethod(children.map(_.dataType.toTypeInfo).asJava) diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcSourceSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcSourceSuite.scala index 09a78f7692415..a05b29c579b34 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcSourceSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcSourceSuite.scala @@ -152,18 +152,7 @@ abstract class OrcSuite extends QueryTest with TestHiveSingleton with BeforeAndA assert(new OrcOptions(Map("Orc.Compress" -> "NONE")).compressionCodec == "NONE") } - test("SPARK-18220: read Hive orc table with varchar column") { - val hiveClient = spark.sharedState.externalCatalog.asInstanceOf[HiveExternalCatalog].client - try { - hiveClient.runSqlHive("CREATE TABLE orc_varchar(a VARCHAR(10)) STORED AS orc") - hiveClient.runSqlHive("INSERT INTO TABLE orc_varchar SELECT 'a' FROM (SELECT 1) t") - checkAnswer(spark.table("orc_varchar"), Row("a")) - } finally { - hiveClient.runSqlHive("DROP TABLE IF EXISTS orc_varchar") - } - } - - test("SPARK-19459: read char/varchar column written by Hive") { + test("SPARK-19459/SPARK-18220: read char/varchar column written by Hive") { val hiveClient = spark.sharedState.externalCatalog.asInstanceOf[HiveExternalCatalog].client val location = Utils.createTempDir().toURI try { @@ -174,7 +163,7 @@ abstract class OrcSuite extends QueryTest with TestHiveSingleton with BeforeAndA | b CHAR(10), | c VARCHAR(10)) |STORED AS orc""".stripMargin) - // Hive throws an exception if I assign the location in the create table statment. + // Hive throws an exception if I assign the location in the create table statement. hiveClient.runSqlHive( s"ALTER TABLE hive_orc SET LOCATION '$location'") hiveClient.runSqlHive( @@ -190,7 +179,9 @@ abstract class OrcSuite extends QueryTest with TestHiveSingleton with BeforeAndA | c VARCHAR(10)) |STORED AS orc |LOCATION '$location'""".stripMargin) - checkAnswer(spark.table("spark_orc"), Row("a", "b ", "c")) + val result = Row("a", "b ", "c") + checkAnswer(spark.table("hive_orc"), result) + checkAnswer(spark.table("spark_orc"), result) } finally { hiveClient.runSqlHive("DROP TABLE IF EXISTS hive_orc") hiveClient.runSqlHive("DROP TABLE IF EXISTS spark_orc")