diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileFormatWriter.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileFormatWriter.scala index 679d70c26881c..2c31d2a84c258 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileFormatWriter.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileFormatWriter.scala @@ -122,7 +122,7 @@ object FileFormatWriter extends Logging { } } val partitionSet = AttributeSet(partitionColumns) - val dataColumns = queryExecution.executedPlan.output.filterNot(partitionSet.contains) + val dataColumns = allColumns.filterNot(partitionSet.contains) val bucketIdExpression = bucketSpec.map { spec => val bucketColumns = spec.bucketColumnNames.map(c => dataColumns.find(_.name == c).get) diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala index 80e9176be7a76..618e5b68ff8c0 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala @@ -470,24 +470,22 @@ class InsertIntoHiveTableSuite extends QueryTest with TestHiveSingleton with Bef test("SPARK-21165: the query schema of INSERT is changed after optimization") { withSQLConf(("hive.exec.dynamic.partition.mode", "nonstrict")) { - withTempView("tempView") { - withTable("dest", "view1") { - Seq(("a", "b", 3)).toDF("word", "first", "length").write.saveAsTable("view1") - - spark.sql( - """ - |CREATE TABLE dest (word string, length int) - |PARTITIONED BY (first string) - """.stripMargin) + withTable("tab1", "tab2") { + Seq(("a", "b", 3)).toDF("word", "first", "length").write.saveAsTable("tab1") - spark.sql( - """ - |INSERT INTO TABLE dest PARTITION(first) - |SELECT word, length, cast(first as string) as first FROM view1 - """.stripMargin) + spark.sql( + """ + |CREATE TABLE tab2 (word string, length int) + |PARTITIONED BY (first string) + """.stripMargin) - checkAnswer(spark.table("dest"), Row("a", 3, "b")) - } + spark.sql( + """ + |INSERT INTO TABLE tab2 PARTITION(first) + |SELECT word, length, cast(first as string) as first FROM tab1 + """.stripMargin) + + checkAnswer(spark.table("tab2"), Row("a", 3, "b")) } } }