diff --git a/core/src/main/scala/org/apache/spark/SparkContext.scala b/core/src/main/scala/org/apache/spark/SparkContext.scala index 316b9f0ed8a04..579c963094a78 100644 --- a/core/src/main/scala/org/apache/spark/SparkContext.scala +++ b/core/src/main/scala/org/apache/spark/SparkContext.scala @@ -228,8 +228,12 @@ class SparkContext( @volatile private[spark] var dagScheduler = new DAGScheduler(this) dagScheduler.start() - private[spark] val cleaner = new ContextCleaner(this) - cleaner.start() + private[spark] val cleaner: Option[ContextCleaner] = + if (conf.getBoolean("spark.cleaner.automatic", true)) { + Some(new ContextCleaner(this)) + } else None + + cleaner.foreach(_.start()) postEnvironmentUpdate() @@ -646,7 +650,7 @@ class SparkContext( */ def broadcast[T](value: T): Broadcast[T] = { val bc = env.broadcastManager.newBroadcast[T](value, isLocal) - cleaner.registerBroadcastForCleanup(bc) + cleaner.foreach(_.registerBroadcastForCleanup(bc)) bc } @@ -841,7 +845,7 @@ class SparkContext( dagScheduler = null if (dagSchedulerCopy != null) { metadataCleaner.cancel() - cleaner.stop() + cleaner.foreach(_.stop()) dagSchedulerCopy.stop() listenerBus.stop() taskScheduler = null diff --git a/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala b/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala index a92922166f595..acd152dda89d4 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala @@ -42,7 +42,7 @@ import org.apache.spark.scheduler.SchedulingMode.SchedulingMode * * THREADING: SchedulerBackends and task-submitting clients can call this class from multiple * threads, so it needs locks in public API methods to maintain its state. In addition, some - * SchedulerBackends sycnchronize on themselves when they want to send events here, and then + * SchedulerBackends synchronize on themselves when they want to send events here, and then * acquire a lock on us, so we need to make sure that we don't try to lock the backend while * we are holding a lock on ourselves. */