From ed0980ff523a7fbb267989db4540ba4d45ad3c45 Mon Sep 17 00:00:00 2001 From: CodingCat Date: Mon, 24 Mar 2014 21:52:57 -0400 Subject: [PATCH] make SparkHiveHadoopWriter belongs to spark package --- .../src/main/scala/org/apache/spark/SparkHadoopWriter.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/hive/src/main/scala/org/apache/spark/SparkHadoopWriter.scala b/sql/hive/src/main/scala/org/apache/spark/SparkHadoopWriter.scala index bc6c9a8ef7925..d96c2f70e0c74 100644 --- a/sql/hive/src/main/scala/org/apache/spark/SparkHadoopWriter.scala +++ b/sql/hive/src/main/scala/org/apache/spark/SparkHadoopWriter.scala @@ -32,7 +32,7 @@ import org.apache.hadoop.io.Writable * Internal helper class that saves an RDD using a Hive OutputFormat. * It is based on [[SparkHadoopWriter]]. */ -protected[apache] +protected[spark] class SparkHiveHadoopWriter( @transient jobConf: JobConf, fileSinkConf: FileSinkDesc)