Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[SPARK-20410][SQL] Make sparkConf a def in SharedSQLContext #17705

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,12 @@ package org.apache.spark.sql

import org.scalatest.BeforeAndAfter

class SingleLevelAggregateHashMapSuite extends DataFrameAggregateSuite with BeforeAndAfter {
import org.apache.spark.SparkConf

protected override def beforeAll(): Unit = {
sparkConf.set("spark.sql.codegen.fallback", "false")
sparkConf.set("spark.sql.codegen.aggregate.map.twolevel.enable", "false")
super.beforeAll()
}
class SingleLevelAggregateHashMapSuite extends DataFrameAggregateSuite with BeforeAndAfter {
override protected def sparkConf: SparkConf = super.sparkConf
.set("spark.sql.codegen.fallback", "false")
.set("spark.sql.codegen.aggregate.map.twolevel.enable", "false")

// adding some checking after each test is run, assuring that the configs are not changed
// in test code
Expand All @@ -38,12 +37,9 @@ class SingleLevelAggregateHashMapSuite extends DataFrameAggregateSuite with Befo
}

class TwoLevelAggregateHashMapSuite extends DataFrameAggregateSuite with BeforeAndAfter {

protected override def beforeAll(): Unit = {
sparkConf.set("spark.sql.codegen.fallback", "false")
sparkConf.set("spark.sql.codegen.aggregate.map.twolevel.enable", "true")
super.beforeAll()
}
override protected def sparkConf: SparkConf = super.sparkConf
.set("spark.sql.codegen.fallback", "false")
.set("spark.sql.codegen.aggregate.map.twolevel.enable", "true")

// adding some checking after each test is run, assuring that the configs are not changed
// in test code
Expand All @@ -55,15 +51,14 @@ class TwoLevelAggregateHashMapSuite extends DataFrameAggregateSuite with BeforeA
}
}

class TwoLevelAggregateHashMapWithVectorizedMapSuite extends DataFrameAggregateSuite with
BeforeAndAfter {
class TwoLevelAggregateHashMapWithVectorizedMapSuite
extends DataFrameAggregateSuite
with BeforeAndAfter {

protected override def beforeAll(): Unit = {
sparkConf.set("spark.sql.codegen.fallback", "false")
sparkConf.set("spark.sql.codegen.aggregate.map.twolevel.enable", "true")
sparkConf.set("spark.sql.codegen.aggregate.map.vectorized.enable", "true")
super.beforeAll()
}
override protected def sparkConf: SparkConf = super.sparkConf
.set("spark.sql.codegen.fallback", "false")
.set("spark.sql.codegen.aggregate.map.twolevel.enable", "true")
.set("spark.sql.codegen.aggregate.map.vectorized.enable", "true")

// adding some checking after each test is run, assuring that the configs are not changed
// in test code
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,22 +20,20 @@ package org.apache.spark.sql
import com.esotericsoftware.kryo.{Kryo, Serializer}
import com.esotericsoftware.kryo.io.{Input, Output}

import org.apache.spark.SparkConf
import org.apache.spark.serializer.KryoRegistrator
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.test.TestSparkSession

/**
* Test suite to test Kryo custom registrators.
*/
class DatasetSerializerRegistratorSuite extends QueryTest with SharedSQLContext {
import testImplicits._

/**
* Initialize the [[TestSparkSession]] with a [[KryoRegistrator]].
*/
protected override def beforeAll(): Unit = {
sparkConf.set("spark.kryo.registrator", TestRegistrator().getClass.getCanonicalName)
super.beforeAll()

override protected def sparkConf: SparkConf = {
// Make sure we use the KryoRegistrator
super.sparkConf.set("spark.kryo.registrator", TestRegistrator().getClass.getCanonicalName)
}

test("Kryo registrator") {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,22 +18,17 @@ package org.apache.spark.sql.execution

import org.apache.hadoop.fs.Path

import org.apache.spark.SparkConf
import org.apache.spark.sql.QueryTest
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.util.Utils

/**
* Suite that tests the redaction of DataSourceScanExec
*/
class DataSourceScanExecRedactionSuite extends QueryTest with SharedSQLContext {

import Utils._

override def beforeAll(): Unit = {
sparkConf.set("spark.redaction.string.regex",
"file:/[\\w_]+")
super.beforeAll()
}
override protected def sparkConf: SparkConf = super.sparkConf
.set("spark.redaction.string.regex", "file:/[\\w_]+")

test("treeString is redacted") {
withTempDir { dir =>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ import org.apache.spark.util.Utils
class FileSourceStrategySuite extends QueryTest with SharedSQLContext with PredicateHelper {
import testImplicits._

protected override val sparkConf = new SparkConf().set("spark.default.parallelism", "1")
protected override def sparkConf = super.sparkConf.set("spark.default.parallelism", "1")

test("unpartitioned table, single partition") {
val table =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@ import org.apache.spark.sql.test.SharedSQLContext
class CompactibleFileStreamLogSuite extends SparkFunSuite with SharedSQLContext {

/** To avoid caching of FS objects */
override protected val sparkConf =
new SparkConf().set(s"spark.hadoop.fs.$scheme.impl.disable.cache", "true")
override protected def sparkConf =
super.sparkConf.set(s"spark.hadoop.fs.$scheme.impl.disable.cache", "true")

import CompactibleFileStreamLog._

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,8 @@ import org.apache.spark.util.UninterruptibleThread
class HDFSMetadataLogSuite extends SparkFunSuite with SharedSQLContext {

/** To avoid caching of FS objects */
override protected val sparkConf =
new SparkConf().set(s"spark.hadoop.fs.$scheme.impl.disable.cache", "true")
override protected def sparkConf =
super.sparkConf.set(s"spark.hadoop.fs.$scheme.impl.disable.cache", "true")

private implicit def toOption[A](a: A): Option[A] = Option(a)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,9 @@ import org.apache.spark.sql.internal.SQLConf
*/
trait SharedSQLContext extends SQLTestUtils with BeforeAndAfterEach {

protected val sparkConf = new SparkConf()
protected def sparkConf = {
new SparkConf().set("spark.hadoop.fs.file.impl", classOf[DebugFilesystem].getName)
}

/**
* The [[TestSparkSession]] to use for all tests in this suite.
Expand All @@ -50,8 +52,7 @@ trait SharedSQLContext extends SQLTestUtils with BeforeAndAfterEach {
protected implicit def sqlContext: SQLContext = _spark.sqlContext

protected def createSparkSession: TestSparkSession = {
new TestSparkSession(
sparkConf.set("spark.hadoop.fs.file.impl", classOf[DebugFilesystem].getName))
new TestSparkSession(sparkConf)
}

/**
Expand Down