diff --git a/pom.xml b/pom.xml
index 6fc56a86d44ac..ddbab6fd798d4 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1265,6 +1265,7 @@
create-source-jar
jar-no-fork
+ test-jar-no-fork
@@ -1472,6 +1473,25 @@
org.scalatest
scalatest-maven-plugin
+
+
+ org.apache.maven.plugins
+ maven-jar-plugin
+
+
+ prepare-test-jar
+ prepare-package
+
+ test-jar
+
+
+
+ log4j.properties
+
+
+
+
+
diff --git a/sql/hive/pom.xml b/sql/hive/pom.xml
index 0e3f4eb98cbf7..47b564079f394 100644
--- a/sql/hive/pom.xml
+++ b/sql/hive/pom.xml
@@ -89,6 +89,20 @@
junit
test
+
+ org.apache.spark
+ spark-sql_${scala.binary.version}
+ test-jar
+ ${project.version}
+ test
+
+
+ org.apache.spark
+ spark-catalyst_${scala.binary.version}
+ test-jar
+ ${project.version}
+ test
+
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/QueryTest.scala b/sql/hive/src/test/scala/org/apache/spark/sql/QueryTest.scala
deleted file mode 100644
index 0270e63557963..0000000000000
--- a/sql/hive/src/test/scala/org/apache/spark/sql/QueryTest.scala
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.spark.sql
-
-import scala.collection.JavaConversions._
-
-import org.apache.spark.sql.catalyst.plans._
-import org.apache.spark.sql.catalyst.util._
-
-
-/**
- * *** DUPLICATED FROM sql/core. ***
- *
- * It is hard to have maven allow one subproject depend on another subprojects test code.
- * So, we duplicate this code here.
- */
-class QueryTest extends PlanTest {
-
- /**
- * Runs the plan and makes sure the answer contains all of the keywords, or the
- * none of keywords are listed in the answer
- * @param rdd the [[DataFrame]] to be executed
- * @param exists true for make sure the keywords are listed in the output, otherwise
- * to make sure none of the keyword are not listed in the output
- * @param keywords keyword in string array
- */
- def checkExistence(rdd: DataFrame, exists: Boolean, keywords: String*) {
- val outputs = rdd.collect().map(_.mkString).mkString
- for (key <- keywords) {
- if (exists) {
- assert(outputs.contains(key), s"Failed for $rdd ($key doens't exist in result)")
- } else {
- assert(!outputs.contains(key), s"Failed for $rdd ($key existed in the result)")
- }
- }
- }
-
- /**
- * Runs the plan and makes sure the answer matches the expected result.
- * @param rdd the [[DataFrame]] to be executed
- * @param expectedAnswer the expected result in a [[Seq]] of [[Row]]s.
- */
- protected def checkAnswer(rdd: DataFrame, expectedAnswer: Seq[Row]): Unit = {
- QueryTest.checkAnswer(rdd, expectedAnswer) match {
- case Some(errorMessage) => fail(errorMessage)
- case None =>
- }
- }
-
- protected def checkAnswer(rdd: DataFrame, expectedAnswer: Row): Unit = {
- checkAnswer(rdd, Seq(expectedAnswer))
- }
-
- def sqlTest(sqlString: String, expectedAnswer: Seq[Row])(implicit sqlContext: SQLContext): Unit = {
- test(sqlString) {
- checkAnswer(sqlContext.sql(sqlString), expectedAnswer)
- }
- }
-}
-
-object QueryTest {
- /**
- * Runs the plan and makes sure the answer matches the expected result.
- * If there was exception during the execution or the contents of the DataFrame does not
- * match the expected result, an error message will be returned. Otherwise, a [[None]] will
- * be returned.
- * @param rdd the [[DataFrame]] to be executed
- * @param expectedAnswer the expected result in a [[Seq]] of [[Row]]s.
- */
- def checkAnswer(rdd: DataFrame, expectedAnswer: Seq[Row]): Option[String] = {
- val isSorted = rdd.logicalPlan.collect { case s: logical.Sort => s }.nonEmpty
- def prepareAnswer(answer: Seq[Row]): Seq[Row] = {
- // Converts data to types that we can do equality comparison using Scala collections.
- // For BigDecimal type, the Scala type has a better definition of equality test (similar to
- // Java's java.math.BigDecimal.compareTo).
- val converted: Seq[Row] = answer.map { s =>
- Row.fromSeq(s.toSeq.map {
- case d: java.math.BigDecimal => BigDecimal(d)
- case o => o
- })
- }
- if (!isSorted) converted.sortBy(_.toString) else converted
- }
- val sparkAnswer = try rdd.collect().toSeq catch {
- case e: Exception =>
- val errorMessage =
- s"""
- |Exception thrown while executing query:
- |${rdd.queryExecution}
- |== Exception ==
- |$e
- |${org.apache.spark.sql.catalyst.util.stackTraceToString(e)}
- """.stripMargin
- return Some(errorMessage)
- }
-
- if (prepareAnswer(expectedAnswer) != prepareAnswer(sparkAnswer)) {
- val errorMessage =
- s"""
- |Results do not match for query:
- |${rdd.logicalPlan}
- |== Analyzed Plan ==
- |${rdd.queryExecution.analyzed}
- |== Physical Plan ==
- |${rdd.queryExecution.executedPlan}
- |== Results ==
- |${sideBySide(
- s"== Correct Answer - ${expectedAnswer.size} ==" +:
- prepareAnswer(expectedAnswer).map(_.toString),
- s"== Spark Answer - ${sparkAnswer.size} ==" +:
- prepareAnswer(sparkAnswer).map(_.toString)).mkString("\n")}
- """.stripMargin
- return Some(errorMessage)
- }
-
- return None
- }
-
- def checkAnswer(rdd: DataFrame, expectedAnswer: java.util.List[Row]): String = {
- checkAnswer(rdd, expectedAnswer.toSeq) match {
- case Some(errorMessage) => errorMessage
- case None => null
- }
- }
-}
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/plans/PlanTest.scala b/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/plans/PlanTest.scala
deleted file mode 100644
index 98f1c0e69e29d..0000000000000
--- a/sql/hive/src/test/scala/org/apache/spark/sql/catalyst/plans/PlanTest.scala
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.spark.sql.catalyst.plans
-
-import org.apache.spark.sql.catalyst.expressions.{Alias, AttributeReference, ExprId}
-import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
-import org.apache.spark.sql.catalyst.util._
-import org.scalatest.FunSuite
-
-/**
- * *** DUPLICATED FROM sql/catalyst/plans. ***
- *
- * It is hard to have maven allow one subproject depend on another subprojects test code.
- * So, we duplicate this code here.
- */
-class PlanTest extends FunSuite {
-
- /**
- * Since attribute references are given globally unique ids during analysis,
- * we must normalize them to check if two different queries are identical.
- */
- protected def normalizeExprIds(plan: LogicalPlan) = {
- plan transformAllExpressions {
- case a: AttributeReference =>
- AttributeReference(a.name, a.dataType, a.nullable)(exprId = ExprId(0))
- case a: Alias =>
- Alias(a.child, a.name)(exprId = ExprId(0))
- }
- }
-
- /** Fails the test if the two plans do not match */
- protected def comparePlans(plan1: LogicalPlan, plan2: LogicalPlan) {
- val normalized1 = normalizeExprIds(plan1)
- val normalized2 = normalizeExprIds(plan2)
- if (normalized1 != normalized2)
- fail(
- s"""
- |== FAIL: Plans do not match ===
- |${sideBySide(normalized1.treeString, normalized2.treeString).mkString("\n")}
- """.stripMargin)
- }
-}
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala
index 44d24273e722a..13a6041da1605 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala
@@ -24,21 +24,6 @@ import org.apache.spark.sql.{AnalysisException, DataFrame, QueryTest}
import org.apache.spark.storage.RDDBlockId
class CachedTableSuite extends QueryTest {
- /**
- * Throws a test failed exception when the number of cached tables differs from the expected
- * number.
- */
- def assertCached(query: DataFrame, numCachedTables: Int = 1): Unit = {
- val planWithCaching = query.queryExecution.withCachedData
- val cachedData = planWithCaching collect {
- case cached: InMemoryRelation => cached
- }
-
- assert(
- cachedData.size == numCachedTables,
- s"Expected query to contain $numCachedTables, but it actually had ${cachedData.size}\n" +
- planWithCaching)
- }
def rddIdOf(tableName: String): Int = {
val executedPlan = table(tableName).queryExecution.executedPlan
diff --git a/streaming/pom.xml b/streaming/pom.xml
index 96508d83f4049..7726688c46a2e 100644
--- a/streaming/pom.xml
+++ b/streaming/pom.xml
@@ -97,34 +97,6 @@
target/scala-${scala.binary.version}/classes
target/scala-${scala.binary.version}/test-classes
-
-
- org.apache.maven.plugins
- maven-jar-plugin
-
-
-
- test-jar
-
-
-
- test-jar-on-test-compile
- test-compile
-
- test-jar
-
-
-
-
-
org.apache.maven.plugins
maven-shade-plugin