diff --git a/core/src/main/scala/org/apache/spark/Aggregator.scala b/core/src/main/scala/org/apache/spark/Aggregator.scala
index 09add7a497ca1..ec643251bd5e9 100644
--- a/core/src/main/scala/org/apache/spark/Aggregator.scala
+++ b/core/src/main/scala/org/apache/spark/Aggregator.scala
@@ -20,7 +20,7 @@ package org.apache.spark
import org.apache.spark.util.collection.{AppendOnlyMap, ExternalAppendOnlyMap}
/**
- * SEMI-PRIVATE
+ * UNSTABLE DEVELOPER API
*
* A set of functions used to aggregate data.
*
diff --git a/core/src/main/scala/org/apache/spark/Dependency.scala b/core/src/main/scala/org/apache/spark/Dependency.scala
index e23b7ad4a11b0..aae8409ec083c 100644
--- a/core/src/main/scala/org/apache/spark/Dependency.scala
+++ b/core/src/main/scala/org/apache/spark/Dependency.scala
@@ -21,7 +21,7 @@ import org.apache.spark.rdd.RDD
import org.apache.spark.serializer.Serializer
/**
- * SEMI-PRIVATE
+ * UNSTABLE DEVELOPER API
*
* Base class for dependencies.
*/
@@ -29,7 +29,7 @@ abstract class Dependency[T](val rdd: RDD[T]) extends Serializable
/**
- * SEMI-PRIVATE
+ * UNSTABLE DEVELOPER API
*
* Base class for dependencies where each partition of the parent RDD is used by at most one
* partition of the child RDD. Narrow dependencies allow for pipelined execution.
@@ -45,7 +45,7 @@ abstract class NarrowDependency[T](rdd: RDD[T]) extends Dependency(rdd) {
/**
- * SEMI-PRIVATE
+ * UNSTABLE DEVELOPER API
*
* Represents a dependency on the output of a shuffle stage.
* @param rdd the parent RDD
@@ -65,7 +65,7 @@ class ShuffleDependency[K, V](
/**
- * SEMI-PRIVATE
+ * UNSTABLE DEVELOPER API
*
* Represents a one-to-one dependency between partitions of the parent and child RDDs.
*/
@@ -75,7 +75,7 @@ class OneToOneDependency[T](rdd: RDD[T]) extends NarrowDependency[T](rdd) {
/**
- * SEMI-PRIVATE
+ * UNSTABLE DEVELOPER API
*
* Represents a one-to-one dependency between ranges of partitions in the parent and child RDDs.
* @param rdd the parent RDD
diff --git a/core/src/main/scala/org/apache/spark/FutureAction.scala b/core/src/main/scala/org/apache/spark/FutureAction.scala
index 6eb5689b5004b..bea19913228fc 100644
--- a/core/src/main/scala/org/apache/spark/FutureAction.scala
+++ b/core/src/main/scala/org/apache/spark/FutureAction.scala
@@ -25,7 +25,7 @@ import org.apache.spark.rdd.RDD
import org.apache.spark.scheduler.{JobFailed, JobSucceeded, JobWaiter}
/**
- * EXPERIMENTAL
+ * EXPERIMENTAL API
*
* A future for the result of an action to support cancellation. This is an extension of the
* Scala Future interface to support cancellation.
@@ -150,7 +150,7 @@ class SimpleFutureAction[T] private[spark](jobWaiter: JobWaiter[_], resultFunc:
/**
- * EXPERIMENTAL
+ * EXPERIMENTAL API
*
* A [[FutureAction]] for actions that could trigger multiple Spark jobs. Examples include take,
* takeSample. Cancellation works by setting the cancelled flag to true and interrupting the
diff --git a/core/src/main/scala/org/apache/spark/TaskContext.scala b/core/src/main/scala/org/apache/spark/TaskContext.scala
index 928ad0830f6f8..d4f8fcf8f4320 100644
--- a/core/src/main/scala/org/apache/spark/TaskContext.scala
+++ b/core/src/main/scala/org/apache/spark/TaskContext.scala
@@ -22,7 +22,7 @@ import scala.collection.mutable.ArrayBuffer
import org.apache.spark.executor.TaskMetrics
/**
- * SEMI-PRIVATE
+ * UNSTABLE DEVELOPER API
*
* Contextual information about a task which can be read or mutated during execution.
*/
diff --git a/core/src/main/scala/org/apache/spark/executor/TaskMetrics.scala b/core/src/main/scala/org/apache/spark/executor/TaskMetrics.scala
index bd63b1148d54b..656c9d337c637 100644
--- a/core/src/main/scala/org/apache/spark/executor/TaskMetrics.scala
+++ b/core/src/main/scala/org/apache/spark/executor/TaskMetrics.scala
@@ -20,7 +20,7 @@ package org.apache.spark.executor
import org.apache.spark.storage.{BlockId, BlockStatus}
/**
- * SEMI-PRIVATE
+ * UNSTABLE DEVELOPER API
*
* Metrics tracked during the execution of a task.
*/
@@ -88,7 +88,7 @@ object TaskMetrics {
/**
- * SEMI-PRIVATE
+ * UNSTABLE DEVELOPER API
*
* Metrics pertaining to shuffle data read in a given task.
*/
@@ -127,7 +127,7 @@ class ShuffleReadMetrics extends Serializable {
}
/**
- * SEMI-PRIVATE
+ * UNSTABLE DEVELOPER API
*
* Metrics pertaining to shuffle data written in a given task.
*/
diff --git a/core/src/main/scala/org/apache/spark/io/CompressionCodec.scala b/core/src/main/scala/org/apache/spark/io/CompressionCodec.scala
index a7035b647a701..4f9e24f452d52 100644
--- a/core/src/main/scala/org/apache/spark/io/CompressionCodec.scala
+++ b/core/src/main/scala/org/apache/spark/io/CompressionCodec.scala
@@ -25,7 +25,7 @@ import org.xerial.snappy.{SnappyInputStream, SnappyOutputStream}
import org.apache.spark.SparkConf
/**
- * SEMI-PRIVATE
+ * UNSTABLE DEVELOPER API
*
* CompressionCodec allows the customization of choosing different compression implementations
* to be used in block storage.
@@ -58,7 +58,7 @@ private[spark] object CompressionCodec {
/**
- * SEMI-PRIVATE
+ * UNSTABLE DEVELOPER API
*
* LZF implementation of [[org.apache.spark.io.CompressionCodec]].
*
@@ -77,7 +77,7 @@ class LZFCompressionCodec(conf: SparkConf) extends CompressionCodec {
/**
- * SEMI-PRIVATE
+ * UNSTABLE DEVELOPER API
*
* Snappy implementation of [[org.apache.spark.io.CompressionCodec]].
* Block size can be configured by spark.io.compression.snappy.block.size.
diff --git a/core/src/main/scala/org/apache/spark/partial/BoundedDouble.scala b/core/src/main/scala/org/apache/spark/partial/BoundedDouble.scala
index a3ec79775e05f..0aa79de87bf99 100644
--- a/core/src/main/scala/org/apache/spark/partial/BoundedDouble.scala
+++ b/core/src/main/scala/org/apache/spark/partial/BoundedDouble.scala
@@ -18,7 +18,7 @@
package org.apache.spark.partial
/**
- * EXPERIMENTAL
+ * EXPERIMENTAL API
*
* A Double value with error bars and associated confidence.
*/
diff --git a/core/src/main/scala/org/apache/spark/partial/PartialResult.scala b/core/src/main/scala/org/apache/spark/partial/PartialResult.scala
index 4d8f49b5a354b..2e2591973c665 100644
--- a/core/src/main/scala/org/apache/spark/partial/PartialResult.scala
+++ b/core/src/main/scala/org/apache/spark/partial/PartialResult.scala
@@ -18,7 +18,7 @@
package org.apache.spark.partial
/**
- * EXPERIMENTAL
+ * EXPERIMENTAL API
*/
class PartialResult[R](initialVal: R, isFinal: Boolean) {
private var finalValue: Option[R] = if (isFinal) Some(initialVal) else None
diff --git a/core/src/main/scala/org/apache/spark/rdd/AsyncRDDActions.scala b/core/src/main/scala/org/apache/spark/rdd/AsyncRDDActions.scala
index e0e5d08ffe6fc..992b67dfff1af 100644
--- a/core/src/main/scala/org/apache/spark/rdd/AsyncRDDActions.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/AsyncRDDActions.scala
@@ -26,7 +26,7 @@ import scala.reflect.ClassTag
import org.apache.spark.{ComplexFutureAction, FutureAction, Logging}
/**
- * EXPERIMENTAL
+ * EXPERIMENTAL API
*
* A set of asynchronous RDD actions available through an implicit conversion.
* Import `org.apache.spark.SparkContext._` at the top of your program to use these functions.
diff --git a/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala b/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
index 30970af325c6c..15d0aea052b1b 100644
--- a/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
@@ -70,7 +70,7 @@ private[spark] class HadoopPartition(rddId: Int, idx: Int, @transient s: InputSp
}
/**
- * SEMI-PRIVATE
+ * UNSTABLE DEVELOPER API
*
* An RDD that provides core functionality for reading data stored in Hadoop (e.g., files in HDFS,
* sources in HBase, or S3), using the older MapReduce API (`org.apache.hadoop.mapred`).
diff --git a/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala b/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala
index 8a34292bf0e85..da178db4bd500 100644
--- a/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala
@@ -36,7 +36,7 @@ class NewHadoopPartition(rddId: Int, val index: Int, @transient rawSplit: InputS
}
/**
- * SEMI-PRIVATE
+ * UNSTABLE DEVELOPER API
*
* An RDD that provides core functionality for reading data stored in Hadoop (e.g., files in HDFS,
* sources in HBase, or S3), using the new MapReduce API (`org.apache.hadoop.mapreduce`).
diff --git a/core/src/main/scala/org/apache/spark/rdd/PartitionPruningRDD.scala b/core/src/main/scala/org/apache/spark/rdd/PartitionPruningRDD.scala
index 9f3fa08a5ccb8..f20cb20d66f61 100644
--- a/core/src/main/scala/org/apache/spark/rdd/PartitionPruningRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/PartitionPruningRDD.scala
@@ -46,7 +46,7 @@ private[spark] class PruneDependency[T](rdd: RDD[T], @transient partitionFilterF
/**
- * SEMI-PRIVATE
+ * UNSTABLE DEVELOPER API
*
* A RDD used to prune RDD partitions/partitions so we can avoid launching tasks on
* all partitions. An example use case: If we know the RDD is partitioned by range,
@@ -67,7 +67,7 @@ class PartitionPruningRDD[T: ClassTag](
/**
- * SEMI-PRIVATE
+ * UNSTABLE DEVELOPER API
*/
object PartitionPruningRDD {
diff --git a/core/src/main/scala/org/apache/spark/rdd/ShuffledRDD.scala b/core/src/main/scala/org/apache/spark/rdd/ShuffledRDD.scala
index 80843c5addff8..72f9e7dff84cf 100644
--- a/core/src/main/scala/org/apache/spark/rdd/ShuffledRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/ShuffledRDD.scala
@@ -28,7 +28,7 @@ private[spark] class ShuffledRDDPartition(val idx: Int) extends Partition {
}
/**
- * SEMI-PRIVATE
+ * UNSTABLE DEVELOPER API
*
* The resulting RDD from a shuffle (e.g. repartitioning of data).
* @param prev the parent RDD.
diff --git a/core/src/main/scala/org/apache/spark/scheduler/SparkListener.scala b/core/src/main/scala/org/apache/spark/scheduler/SparkListener.scala
index 67f6ac9995926..99ce7db947ea1 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/SparkListener.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/SparkListener.scala
@@ -27,23 +27,23 @@ import org.apache.spark.executor.TaskMetrics
import org.apache.spark.storage.BlockManagerId
import org.apache.spark.util.{Distribution, Utils}
-/** SEMI-PRIVATE */
+/** UNSTABLE DEVELOPER API */
sealed trait SparkListenerEvent
-/** SEMI-PRIVATE */
+/** UNSTABLE DEVELOPER API */
case class SparkListenerStageSubmitted(stageInfo: StageInfo, properties: Properties = null)
extends SparkListenerEvent
-/** SEMI-PRIVATE */
+/** UNSTABLE DEVELOPER API */
case class SparkListenerStageCompleted(stageInfo: StageInfo) extends SparkListenerEvent
-/** SEMI-PRIVATE */
+/** UNSTABLE DEVELOPER API */
case class SparkListenerTaskStart(stageId: Int, taskInfo: TaskInfo) extends SparkListenerEvent
-/** SEMI-PRIVATE */
+/** UNSTABLE DEVELOPER API */
case class SparkListenerTaskGettingResult(taskInfo: TaskInfo) extends SparkListenerEvent
-/** SEMI-PRIVATE */
+/** UNSTABLE DEVELOPER API */
case class SparkListenerTaskEnd(
stageId: Int,
taskType: String,
@@ -52,26 +52,26 @@ case class SparkListenerTaskEnd(
taskMetrics: TaskMetrics)
extends SparkListenerEvent
-/** SEMI-PRIVATE */
+/** UNSTABLE DEVELOPER API */
case class SparkListenerJobStart(jobId: Int, stageIds: Seq[Int], properties: Properties = null)
extends SparkListenerEvent
-/** SEMI-PRIVATE */
+/** UNSTABLE DEVELOPER API */
case class SparkListenerJobEnd(jobId: Int, jobResult: JobResult) extends SparkListenerEvent
-/** SEMI-PRIVATE */
+/** UNSTABLE DEVELOPER API */
case class SparkListenerEnvironmentUpdate(environmentDetails: Map[String, Seq[(String, String)]])
extends SparkListenerEvent
-/** SEMI-PRIVATE */
+/** UNSTABLE DEVELOPER API */
case class SparkListenerBlockManagerAdded(blockManagerId: BlockManagerId, maxMem: Long)
extends SparkListenerEvent
-/** SEMI-PRIVATE */
+/** UNSTABLE DEVELOPER API */
case class SparkListenerBlockManagerRemoved(blockManagerId: BlockManagerId)
extends SparkListenerEvent
-/** SEMI-PRIVATE */
+/** UNSTABLE DEVELOPER API */
case class SparkListenerUnpersistRDD(rddId: Int) extends SparkListenerEvent
/** An event used in the listener to shutdown the listener daemon thread. */
@@ -79,7 +79,7 @@ private[spark] case object SparkListenerShutdown extends SparkListenerEvent
/**
- * SEMI-PRIVATE
+ * UNSTABLE DEVELOPER API
*
* Interface for listening to events from the Spark scheduler. Note that this is an internal
* interface which might change in different Spark releases.
@@ -143,7 +143,7 @@ trait SparkListener {
}
/**
- * SEMI-PRIVATE
+ * UNSTABLE DEVELOPER API
*
* Simple SparkListener that logs a few summary statistics when each stage completes
*/
diff --git a/core/src/main/scala/org/apache/spark/serializer/JavaSerializer.scala b/core/src/main/scala/org/apache/spark/serializer/JavaSerializer.scala
index bd75d31601204..2d901735e1324 100644
--- a/core/src/main/scala/org/apache/spark/serializer/JavaSerializer.scala
+++ b/core/src/main/scala/org/apache/spark/serializer/JavaSerializer.scala
@@ -94,7 +94,7 @@ private[spark] class JavaSerializerInstance(counterReset: Int) extends Serialize
}
/**
- * SEMI-PRIVATE
+ * UNSTABLE DEVELOPER API
*
* A Spark serializer that uses Java's built-in serialization.
*
diff --git a/core/src/main/scala/org/apache/spark/serializer/Serializer.scala b/core/src/main/scala/org/apache/spark/serializer/Serializer.scala
index c7fd92ce59548..1133cc9104d10 100644
--- a/core/src/main/scala/org/apache/spark/serializer/Serializer.scala
+++ b/core/src/main/scala/org/apache/spark/serializer/Serializer.scala
@@ -26,7 +26,7 @@ import org.apache.spark.util.{ByteBufferInputStream, NextIterator}
import org.apache.spark.SparkEnv
/**
- * SEMI-PRIVATE
+ * UNSTABLE DEVELOPER API
*
* A serializer. Because some serialization libraries are not thread safe, this class is used to
* create [[org.apache.spark.serializer.SerializerInstance]] objects that do the actual
@@ -55,7 +55,7 @@ object Serializer {
/**
- * SEMI-PRIVATE
+ * UNSTABLE DEVELOPER API
*
* An instance of a serializer, for use by one thread at a time.
*/
@@ -89,7 +89,7 @@ trait SerializerInstance {
/**
- * SEMI-PRIVATE
+ * UNSTABLE DEVELOPER API
*
* A stream for writing serialized objects.
*/
@@ -108,7 +108,7 @@ trait SerializationStream {
/**
- * SEMI-PRIVATE
+ * UNSTABLE DEVELOPER API
*
* A stream for reading serialized objects.
*/
diff --git a/core/src/main/scala/org/apache/spark/util/BoundedPriorityQueue.scala b/core/src/main/scala/org/apache/spark/util/BoundedPriorityQueue.scala
index a118048ce2865..5767daf7f094a 100644
--- a/core/src/main/scala/org/apache/spark/util/BoundedPriorityQueue.scala
+++ b/core/src/main/scala/org/apache/spark/util/BoundedPriorityQueue.scala
@@ -24,7 +24,7 @@ import scala.collection.JavaConverters._
import scala.collection.generic.Growable
/**
- * SEMI-PRIVATE
+ * UNSTABLE DEVELOPER API
*
* Bounded priority queue. This class wraps the original PriorityQueue
* class and modifies it such that only the top K elements are retained.
diff --git a/core/src/main/scala/org/apache/spark/util/MutablePair.scala b/core/src/main/scala/org/apache/spark/util/MutablePair.scala
index 482a1858e7fb3..34fd30044170f 100644
--- a/core/src/main/scala/org/apache/spark/util/MutablePair.scala
+++ b/core/src/main/scala/org/apache/spark/util/MutablePair.scala
@@ -18,7 +18,7 @@
package org.apache.spark.util
/**
- * SEMI-PRIVATE
+ * UNSTABLE DEVELOPER API
*
* A tuple of 2 elements. This can be used as an alternative to Scala's Tuple2 when we want to
* minimize object allocation.
diff --git a/core/src/main/scala/org/apache/spark/util/random/Pseudorandom.scala b/core/src/main/scala/org/apache/spark/util/random/Pseudorandom.scala
index 768c0c31efb78..80dd5deed8502 100644
--- a/core/src/main/scala/org/apache/spark/util/random/Pseudorandom.scala
+++ b/core/src/main/scala/org/apache/spark/util/random/Pseudorandom.scala
@@ -18,7 +18,7 @@
package org.apache.spark.util.random
/**
- * SEMI-PRIVATE
+ * UNSTABLE DEVELOPER API
*
* A class with pseudorandom behavior.
*/
diff --git a/core/src/main/scala/org/apache/spark/util/random/RandomSampler.scala b/core/src/main/scala/org/apache/spark/util/random/RandomSampler.scala
index 406d32c4e0a1a..57d1927b7d686 100644
--- a/core/src/main/scala/org/apache/spark/util/random/RandomSampler.scala
+++ b/core/src/main/scala/org/apache/spark/util/random/RandomSampler.scala
@@ -23,7 +23,7 @@ import cern.jet.random.Poisson
import cern.jet.random.engine.DRand
/**
- * SEMI-PRIVATE
+ * UNSTABLE DEVELOPER API
*
* A pseudorandom sampler. It is possible to change the sampled item type. For example, we might
* want to add weights for stratified sampling or importance sampling. Should only use
@@ -42,7 +42,7 @@ trait RandomSampler[T, U] extends Pseudorandom with Cloneable with Serializable
}
/**
- * SEMI-PRIVATE
+ * UNSTABLE DEVELOPER API
*
* A sampler based on Bernoulli trials.
*
@@ -71,7 +71,7 @@ class BernoulliSampler[T](lb: Double, ub: Double, complement: Boolean = false)
}
/** Ps
- * SEMI-PRIVATE
+ * UNSTABLE DEVELOPER API
*
* A sampler based on values drawn from Poisson distribution.
*