Skip to content

Commit

Permalink
Use ResetSystemProperties in DistributedSuite
Browse files Browse the repository at this point in the history
  • Loading branch information
JoshRosen committed Dec 19, 2014
1 parent 9e3e0dd commit 628f46c
Showing 1 changed file with 2 additions and 11 deletions.
13 changes: 2 additions & 11 deletions core/src/test/scala/org/apache/spark/DistributedSuite.scala
Original file line number Diff line number Diff line change
Expand Up @@ -17,28 +17,23 @@

package org.apache.spark

import org.scalatest.BeforeAndAfter
import org.scalatest.FunSuite
import org.scalatest.concurrent.Timeouts._
import org.scalatest.Matchers
import org.scalatest.time.{Millis, Span}

import org.apache.spark.storage.{RDDBlockId, StorageLevel}
import org.apache.spark.util.ResetSystemProperties

class NotSerializableClass
class NotSerializableExn(val notSer: NotSerializableClass) extends Throwable() {}


class DistributedSuite extends FunSuite with Matchers with BeforeAndAfter
class DistributedSuite extends FunSuite with Matchers with ResetSystemProperties
with LocalSparkContext {

val clusterUrl = "local-cluster[2,1,512]"

after {
System.clearProperty("spark.reducer.maxMbInFlight")
System.clearProperty("spark.storage.memoryFraction")
}

test("task throws not serializable exception") {
// Ensures that executors do not crash when an exn is not serializable. If executors crash,
// this test will hang. Correct behavior is that executors don't crash but fail tasks
Expand Down Expand Up @@ -92,7 +87,6 @@ class DistributedSuite extends FunSuite with Matchers with BeforeAndAfter
val groups = pairs.groupByKey(2).map(x => (x._1, x._2.size)).collect()
assert(groups.length === 16)
assert(groups.map(_._2).sum === 2000)
// Note that spark.reducer.maxMbInFlight will be cleared in the test suite's after{} block
}

test("accumulators") {
Expand Down Expand Up @@ -210,15 +204,13 @@ class DistributedSuite extends FunSuite with Matchers with BeforeAndAfter
}

test("compute without caching when no partitions fit in memory") {
System.setProperty("spark.storage.memoryFraction", "0.0001")
sc = new SparkContext(clusterUrl, "test")
// data will be 4 million * 4 bytes = 16 MB in size, but our memoryFraction set the cache
// to only 50 KB (0.0001 of 512 MB), so no partitions should fit in memory
val data = sc.parallelize(1 to 4000000, 2).persist(StorageLevel.MEMORY_ONLY_SER)
assert(data.count() === 4000000)
assert(data.count() === 4000000)
assert(data.count() === 4000000)
System.clearProperty("spark.storage.memoryFraction")
}

test("compute when only some partitions fit in memory") {
Expand All @@ -231,7 +223,6 @@ class DistributedSuite extends FunSuite with Matchers with BeforeAndAfter
assert(data.count() === 4000000)
assert(data.count() === 4000000)
assert(data.count() === 4000000)
System.clearProperty("spark.storage.memoryFraction")
}

test("passing environment variables to cluster") {
Expand Down

0 comments on commit 628f46c

Please sign in to comment.