From 2061a765a1dc4c0059636d2db6274562019bce57 Mon Sep 17 00:00:00 2001 From: MechCoder Date: Fri, 19 Jun 2015 01:40:58 +0530 Subject: [PATCH] Add tests for simultaneous training and prediction Minor style fixes --- docs/mllib-clustering.md | 20 ++---- .../mllib/api/python/PythonMLLibAPI.scala | 13 ++-- python/pyspark/mllib/clustering.py | 66 +++++++++---------- python/pyspark/mllib/tests.py | 62 +++++++++++++---- 4 files changed, 93 insertions(+), 68 deletions(-) diff --git a/docs/mllib-clustering.md b/docs/mllib-clustering.md index fdee8e7300abd..d1f0a7f06c526 100644 --- a/docs/mllib-clustering.md +++ b/docs/mllib-clustering.md @@ -599,11 +599,9 @@ ssc.awaitTermination() First we import the neccessary classes. {% highlight python %} - from pyspark.mllib.linalg import Vectors from pyspark.mllib.regression import LabeledPoint from pyspark.mllib.clustering import StreamingKMeans - {% endhighlight %} Then we make an input stream of vectors for training, as well as a stream of labeled data @@ -611,36 +609,30 @@ points for testing. We assume a StreamingContext `ssc` has been created, see [Spark Streaming Programming Guide](streaming-programming-guide.html#initializing) for more info. {% highlight python %} +def parse(lp): + label = float(lp[lp.find('(') + 1: lp.find(',')]) + vec = Vectors.dense(lp[lp.find('[') + 1: lp.find(']')].split(',')) + return LabeledPoint(label, vec) trainingData = ssc.textFileStream("/training/data/dir").map(Vectors.parse) -testData = ssc.textFileStream("/testing/data/dir").map(LabeledPoint.parse) - +testData = ssc.textFileStream("/testing/data/dir").map(parse) {% endhighlight %} We create a model with random clusters and specify the number of clusters to find {% highlight python %} - -numDimensions = 3 -numClusters = 2 -model = StreamingKMeans() -model.setK(numClusters) -model.setDecayFactor(1.0) -model.setRandomCenters(numDimensions, 0.0) - +model = StreamingKMeans(k=2, decayFactor=1.0).setRandomCenters(3, 1.0, 0) {% endhighlight %} Now register the streams for training and testing and start the job, printing the predicted cluster assignments on new data points as they arrive. {% highlight python %} - model.trainOn(trainingData) model.predictOnValues(testData.map(lambda lp: (lp.label, lp.features))) ssc.start() ssc.awaitTermination() - {% endhighlight %} diff --git a/mllib/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala b/mllib/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala index 75e64f88543f9..2897865af6912 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala @@ -968,12 +968,13 @@ private[python] class PythonMLLibAPI extends Serializable { * Java stub for the update method of StreamingKMeansModel. */ def updateStreamingKMeansModel( - clusterCenters: java.util.ArrayList[Vector], - clusterWeights: java.util.ArrayList[Double], - data: JavaRDD[Vector], decayFactor: Double, - timeUnit: String) : JList[Object] = { - val model = new StreamingKMeansModel( - clusterCenters.asScala.toArray, clusterWeights.asScala.toArray) + clusterCenters: JList[Vector], + clusterWeights: JList[Double], + data: JavaRDD[Vector], + decayFactor: Double, + timeUnit: String): JList[Object] = { + val model = new StreamingKMeansModel( + clusterCenters.asScala.toArray, clusterWeights.asScala.toArray) .update(data, decayFactor, timeUnit) List[AnyRef](model.clusterCenters, Vectors.dense(model.clusterWeights)).asJava } diff --git a/python/pyspark/mllib/clustering.py b/python/pyspark/mllib/clustering.py index 3a88a6aa68c0d..a680adaf2744a 100644 --- a/python/pyspark/mllib/clustering.py +++ b/python/pyspark/mllib/clustering.py @@ -275,18 +275,19 @@ class StreamingKMeansModel(KMeansModel): .. note:: Experimental Clustering model which can perform an online update of the centroids. - The update formula is given by + The update formula for each centroid is given by c_t+1 = [(c_t * n_t * a) + (x_t * m_t)] / [n_t + m_t] n_t+1 = n_t * a + m_t where - c_t: Centroid at the n_th iteration. - n_t: Number of weights at the n_th iteration. - x_t: Centroid of the new data closest to c_t - m_t: Number of weights of the new data closest to c_t - c_t+1: New centroid + c_t: Centroid at the n_th iteration. + n_t: Number of samples (or) weights associated with the centroid + at the n_th iteration. + x_t: Centroid of the new data closest to c_t. + m_t: Number of samples (or) weights of the new data closest to c_t + c_t+1: New centroid. n_t+1: New number of weights. - a: Decay Factor, which gives the forgetfulnes + a: Decay Factor, which gives the forgetfulness. Note that if a is set to 1, it is the weighted mean of the previous and new data. If it set to zero, the old centroids are completely @@ -304,7 +305,7 @@ class StreamingKMeansModel(KMeansModel): True >>> stkm.predict([0.9, 0.9]) == stkm.predict([1.1, 1.1]) == 1 True - >>> stkm.getClusterWeights + >>> stkm.clusterWeights [3.0, 3.0] >>> decayFactor = 0.0 >>> data = sc.parallelize([DenseVector([1.5, 1.5]), DenseVector([0.2, 0.2])]) @@ -312,19 +313,22 @@ class StreamingKMeansModel(KMeansModel): >>> stkm.centers array([[ 0.2, 0.2], [ 1.5, 1.5]]) - >>> stkm.getClusterWeights + >>> stkm.clusterWeights [1.0, 1.0] >>> stkm.predict([0.2, 0.2]) 0 >>> stkm.predict([1.5, 1.5]) 1 + + :param clusterCenters: Initial cluster centers. + :param clusterWeights: List of weights assigned to each cluster. """ def __init__(self, clusterCenters, clusterWeights): super(StreamingKMeansModel, self).__init__(centers=clusterCenters) self._clusterWeights = list(clusterWeights) @property - def getClusterWeights(self): + def clusterWeights(self): """Convenience method to return the cluster weights.""" return self._clusterWeights @@ -332,13 +336,10 @@ def getClusterWeights(self): def update(self, data, decayFactor, timeUnit): """Update the centroids, according to data - Parameters - ---------- - data: Should be a RDD that represents the new data. - - decayFactor: forgetfulness of the previous centroids. + :param data: Should be a RDD that represents the new data. + :param decayFactor: forgetfulness of the previous centroids. + :param timeUnit: Can be "batches" or "points" - timeUnit: Can be "batches" or "points" If points, then the decay factor is raised to the power of number of new points and if batches, it is used as it is. """ @@ -365,17 +366,10 @@ class StreamingKMeans(object): Provides methods to set k, decayFactor, timeUnit to train and predict the incoming data - Parameters - ---------- - k: int - Number of clusters - - decayFactor: float - Forgetfulness of the previous centroid. - - timeUnit: str, "batches" or "points" - If points, then the decayfactor is raised to the power of new - points. + :param k: int, number of clusters + :param decayFactor: float, forgetfulness of the previous centroids. + :param timeUnit: can be "batches" or "points". If points, then the + decayfactor is raised to the power of no. of new points. """ def __init__(self, k=2, decayFactor=1.0, timeUnit="batches"): self._k = k @@ -384,10 +378,14 @@ def __init__(self, k=2, decayFactor=1.0, timeUnit="batches"): raise ValueError( "timeUnit should be 'batches' or 'points', got %s." % timeUnit) self._timeUnit = timeUnit - self.latestModel = None + self._model = None + + def latestModel(self): + """Return the latest model""" + return self._model def _validate(self, dstream): - if self.latestModel is None: + if self._model is None: raise ValueError( "Initial centers should be set either by setInitialCenters " "or setRandomCenters.") @@ -416,7 +414,7 @@ def setHalfLife(self, halfLife, timeUnit): return self def setInitialCenters(self, centers, weights): - self.latestModel = StreamingKMeansModel(centers, weights) + self._model = StreamingKMeansModel(centers, weights) return self def setRandomCenters(self, dim, weight, seed): @@ -427,7 +425,7 @@ def setRandomCenters(self, dim, weight, seed): rng = random.RandomState(seed) clusterCenters = rng.randn(self._k, dim) clusterWeights = tile(weight, self._k) - self.latestModel = StreamingKMeansModel(clusterCenters, clusterWeights) + self._model = StreamingKMeansModel(clusterCenters, clusterWeights) return self def trainOn(self, dstream): @@ -435,7 +433,7 @@ def trainOn(self, dstream): self._validate(dstream) def update(rdd): - self.latestModel.update(rdd, self._decayFactor, self._timeUnit) + self._model.update(rdd, self._decayFactor, self._timeUnit) dstream.foreachRDD(update) @@ -445,7 +443,7 @@ def predictOn(self, dstream): Returns a transformed dstream object """ self._validate(dstream) - return dstream.map(lambda x: self.latestModel.predict(x)) + return dstream.map(lambda x: self._model.predict(x)) def predictOnValues(self, dstream): """ @@ -453,7 +451,7 @@ def predictOnValues(self, dstream): Returns a transformed dstream object. """ self._validate(dstream) - return dstream.mapValues(lambda x: self.latestModel.predict(x)) + return dstream.mapValues(lambda x: self._model.predict(x)) def _test(): diff --git a/python/pyspark/mllib/tests.py b/python/pyspark/mllib/tests.py index 146851a5e00b3..6c67b3489577a 100644 --- a/python/pyspark/mllib/tests.py +++ b/python/pyspark/mllib/tests.py @@ -79,6 +79,11 @@ def setUp(self): def tearDown(self): self.ssc.stop(False) + @staticmethod + def _ssc_wait(start_time, end_time, sleep_time): + while time() - start_time < end_time: + sleep(0.01) + def _squared_distance(a, b): if isinstance(a, Vector): @@ -878,25 +883,23 @@ def test_model_transform(self): class StreamingKMeansTest(MLLibStreamingTestCase): def test_model_params(self): + """Test that the model params are set correctly""" stkm = StreamingKMeans() stkm.setK(5).setDecayFactor(0.0) self.assertEquals(stkm._k, 5) self.assertEquals(stkm._decayFactor, 0.0) # Model not set yet. - self.assertIsNone(stkm.latestModel) + self.assertIsNone(stkm.latestModel()) self.assertRaises(ValueError, stkm.trainOn, [0.0, 1.0]) stkm.setInitialCenters([[0.0, 0.0], [1.0, 1.0]], [1.0, 1.0]) - self.assertEquals(stkm.latestModel.centers, [[0.0, 0.0], [1.0, 1.0]]) - self.assertEquals(stkm.latestModel.getClusterWeights, [1.0, 1.0]) - - @staticmethod - def _ssc_wait(start_time, end_time, sleep_time): - while time() - start_time < end_time: - sleep(0.01) + self.assertEquals( + stkm.latestModel().centers, [[0.0, 0.0], [1.0, 1.0]]) + self.assertEquals(stkm.latestModel().clusterWeights, [1.0, 1.0]) def test_accuracy_for_single_center(self): + """Test that the parameters obtained are correct for a single center.""" numBatches, numPoints, k, d, r, seed = 5, 5, 1, 5, 0.1, 0 centers, batches = self.streamingKMeansDataGenerator( numBatches, numPoints, k, d, r, seed) @@ -905,13 +908,14 @@ def test_accuracy_for_single_center(self): input_stream = self.ssc.queueStream( [self.sc.parallelize(batch, 1) for batch in batches]) stkm.trainOn(input_stream) + t = time() self.ssc.start() self._ssc_wait(t, 10.0, 0.01) - self.assertEquals(stkm.latestModel.getClusterWeights, [25.0]) + self.assertEquals(stkm.latestModel().clusterWeights, [25.0]) realCenters = array_sum(array(centers), axis=0) for i in range(d): - modelCenters = stkm.latestModel.centers[0][i] + modelCenters = stkm.latestModel().centers[0][i] self.assertAlmostEqual(centers[0][i], modelCenters, 1) self.assertAlmostEqual(realCenters[i], modelCenters, 1) @@ -927,7 +931,7 @@ def streamingKMeansDataGenerator(self, batches, numPoints, for i in range(batches)] def test_trainOn_model(self): - # Test the model on toy data with four clusters. + """Test the model on toy data with four clusters.""" stkm = StreamingKMeans() initCenters = [[1.0, 1.0], [-1.0, 1.0], [-1.0, -1.0], [1.0, -1.0]] weights = [1.0, 1.0, 1.0, 1.0] @@ -948,15 +952,16 @@ def test_trainOn_model(self): # Give enough time to train the model. self._ssc_wait(t, 6.0, 0.01) - finalModel = stkm.latestModel + finalModel = stkm.latestModel() self.assertTrue(all(finalModel.centers == array(initCenters))) - self.assertEquals(finalModel.getClusterWeights, [5.0, 5.0, 5.0, 5.0]) + self.assertEquals(finalModel.clusterWeights, [5.0, 5.0, 5.0, 5.0]) def test_predictOn_model(self): + """Test that the model predicts correctly on toy data.""" initCenters = [[1.0, 1.0], [-1.0, 1.0], [-1.0, -1.0], [1.0, -1.0]] weights = [1.0, 1.0, 1.0, 1.0] stkm = StreamingKMeans() - stkm.latestModel = StreamingKMeansModel(initCenters, weights) + stkm._model = StreamingKMeansModel(initCenters, weights) predict_data = [[[1.5, 1.5]], [[-1.5, 1.5]], [[-1.5, -1.5]], [[1.5, -1.5]]] predict_data = [sc.parallelize(batch, 1) for batch in predict_data] @@ -976,6 +981,35 @@ def update(rdd): self._ssc_wait(t, 6.0, 0.01) self.assertEquals(result, [[0], [1], [2], [3]]) + def test_trainOn_predictOn(self): + """Test that prediction happens on the updated model.""" + stkm = StreamingKMeans(decayFactor=0.0, k=2) + stkm.setInitialCenters([[0.0], [1.0]], [1.0, 1.0]) + + # Since decay factor is set to zero, once the first batch + # is passed the clusterCenters are updated to [-0.5, 0.7] + # which causes 0.2 & 0.3 to be classified as 1, even though the + # classification based in the initial model would have been 0 + # proving that the model is updated. + batches = [[[-0.5], [0.6], [0.8]], [[0.2], [-0.1], [0.3]]] + batches = [sc.parallelize(batch) for batch in batches] + input_stream = self.ssc.queueStream(batches) + predict_results = [] + + def collect(rdd): + rdd_collect = rdd.collect() + if rdd_collect: + predict_results.append(rdd_collect) + + stkm.trainOn(input_stream) + predict_stream = stkm.predictOn(input_stream) + predict_stream.foreachRDD(collect) + + t = time() + self.ssc.start() + self._ssc_wait(t, 6.0, 0.01) + self.assertEqual(predict_results, [[0, 1, 1], [1, 0, 1]]) + if __name__ == "__main__": if not _have_scipy: