Skip to content

Commit

Permalink
[SPARK-1837] NumericRange should be partitioned in the same way as ot…
Browse files Browse the repository at this point in the history
…her...

... sequences

Author: Kan Zhang <[email protected]>

Closes #776 from kanzhang/SPARK-1837 and squashes the following commits:

e48f018 [Kan Zhang] [SPARK-1837] code refactoring
67c33b5 [Kan Zhang] minor change
403f9b1 [Kan Zhang] [SPARK-1837] NumericRange should be partitioned in the same way as other sequences
  • Loading branch information
kanzhang authored and mateiz committed Jun 14, 2014
1 parent b52603b commit 7dd9fc6
Show file tree
Hide file tree
Showing 2 changed files with 37 additions and 12 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,15 @@ private object ParallelCollectionRDD {
if (numSlices < 1) {
throw new IllegalArgumentException("Positive number of slices required")
}
// Sequences need to be sliced at the same set of index positions for operations
// like RDD.zip() to behave as expected
def positions(length: Long, numSlices: Int): Iterator[(Int, Int)] = {
(0 until numSlices).iterator.map(i => {
val start = ((i * length) / numSlices).toInt
val end = (((i + 1) * length) / numSlices).toInt
(start, end)
})
}
seq match {
case r: Range.Inclusive => {
val sign = if (r.step < 0) {
Expand All @@ -128,30 +137,28 @@ private object ParallelCollectionRDD {
r.start, r.end + sign, r.step).asInstanceOf[Seq[T]], numSlices)
}
case r: Range => {
(0 until numSlices).map(i => {
val start = ((i * r.length.toLong) / numSlices).toInt
val end = (((i + 1) * r.length.toLong) / numSlices).toInt
new Range(r.start + start * r.step, r.start + end * r.step, r.step)
}).asInstanceOf[Seq[Seq[T]]]
positions(r.length, numSlices).map({
case (start, end) =>
new Range(r.start + start * r.step, r.start + end * r.step, r.step)
}).toSeq.asInstanceOf[Seq[Seq[T]]]
}
case nr: NumericRange[_] => {
// For ranges of Long, Double, BigInteger, etc
val slices = new ArrayBuffer[Seq[T]](numSlices)
val sliceSize = (nr.size + numSlices - 1) / numSlices // Round up to catch everything
var r = nr
for (i <- 0 until numSlices) {
for ((start, end) <- positions(nr.length, numSlices)) {
val sliceSize = end - start
slices += r.take(sliceSize).asInstanceOf[Seq[T]]
r = r.drop(sliceSize)
}
slices
}
case _ => {
val array = seq.toArray // To prevent O(n^2) operations for List etc
(0 until numSlices).map(i => {
val start = ((i * array.length.toLong) / numSlices).toInt
val end = (((i + 1) * array.length.toLong) / numSlices).toInt
array.slice(start, end).toSeq
})
positions(array.length, numSlices).map({
case (start, end) =>
array.slice(start, end).toSeq
}).toSeq
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,24 @@ class ParallelCollectionSplitSuite extends FunSuite with Checkers {
assert(slices.forall(_.isInstanceOf[Range]))
}

test("identical slice sizes between Range and NumericRange") {
val r = ParallelCollectionRDD.slice(1 to 7, 4)
val nr = ParallelCollectionRDD.slice(1L to 7L, 4)
assert(r.size === 4)
for (i <- 0 until r.size) {
assert(r(i).size === nr(i).size)
}
}

test("identical slice sizes between List and NumericRange") {
val r = ParallelCollectionRDD.slice(List(1, 2), 4)
val nr = ParallelCollectionRDD.slice(1L to 2L, 4)
assert(r.size === 4)
for (i <- 0 until r.size) {
assert(r(i).size === nr(i).size)
}
}

test("large ranges don't overflow") {
val N = 100 * 1000 * 1000
val data = 0 until N
Expand Down

0 comments on commit 7dd9fc6

Please sign in to comment.