From 580fbc27abe82b5a3e8b54c6bfeafe9aa42e3ed4 Mon Sep 17 00:00:00 2001 From: giwa Date: Thu, 14 Aug 2014 23:46:45 -0700 Subject: [PATCH] modified streaming test case to add coment --- python/pyspark/streaming_tests.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/python/pyspark/streaming_tests.py b/python/pyspark/streaming_tests.py index 6d85a7faae859..02996ccce9a3e 100644 --- a/python/pyspark/streaming_tests.py +++ b/python/pyspark/streaming_tests.py @@ -18,6 +18,9 @@ """ Unit tests for PySpark; additional tests are implemented as doctests in individual modules. +Other option is separate this test case with other tests. +This makes sense becuase streaming tests takes long time due to waiting time +for stoping callback server. This file will merged to tests.py. But for now, this file is separated due to focusing to streaming test case @@ -45,7 +48,7 @@ def tearDown(self): self.ssc._sc.stop() # Why does it long time to terminaete StremaingContext and SparkContext? # Should we change the sleep time if this depends on machine spec? - time.sleep(8) + time.sleep(10) @classmethod def tearDownClass(cls): @@ -302,7 +305,7 @@ def _run_stream(self, test_input, test_func, expected_output, numSlices=None): """Start stream and return the output""" # Generate input stream with user-defined input numSlices = numSlices or self.numInputPartitions - test_input_stream = self.ssc._testInputStream2(test_input, numSlices) + test_input_stream = self.ssc._testInputStream(test_input, numSlices) # Apply test function to stream test_stream = test_func(test_input_stream) # Add job to get output from stream