Skip to content

Commit

Permalink
Merge branch 'master' of github.com:tbfenet/incubator-spark
Browse files Browse the repository at this point in the history
PartitionPruningRDD is using index from parent

I was getting a ArrayIndexOutOfBoundsException exception after doing union on pruned RDD. The index it was using on the partition was the index in the original RDD not the new pruned RDD.

(cherry picked from commit 2fead51)
Signed-off-by: Reynold Xin <[email protected]>
  • Loading branch information
rxin committed Nov 20, 2013
1 parent 37126e8 commit f678e10
Show file tree
Hide file tree
Showing 3 changed files with 91 additions and 48 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -33,11 +33,13 @@ class PruneDependency[T](rdd: RDD[T], @transient partitionFilterFunc: Int => Boo
extends NarrowDependency[T](rdd) {

@transient
val partitions: Array[Partition] = rdd.partitions.zipWithIndex
.filter(s => partitionFilterFunc(s._2))
val partitions: Array[Partition] = rdd.partitions
.filter(s => partitionFilterFunc(s.index)).zipWithIndex
.map { case(split, idx) => new PartitionPruningRDDPartition(idx, split) : Partition }

override def getParents(partitionId: Int) = List(partitions(partitionId).index)
override def getParents(partitionId: Int) = {
List(partitions(partitionId).asInstanceOf[PartitionPruningRDDPartition].parentSplit.index)
}
}


Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.spark.rdd

import org.scalatest.FunSuite
import org.apache.spark.{TaskContext, Partition, SharedSparkContext}


class PartitionPruningRDDSuite extends FunSuite with SharedSparkContext {


test("Pruned Partitions inherit locality prefs correctly") {

val rdd = new RDD[Int](sc, Nil) {
override protected def getPartitions = {
Array[Partition](
new TestPartition(0, 1),
new TestPartition(1, 1),
new TestPartition(2, 1))
}

def compute(split: Partition, context: TaskContext) = {
Iterator()
}
}
val prunedRDD = PartitionPruningRDD.create(rdd, {
x => if (x == 2) true else false
})
assert(prunedRDD.partitions.length == 1)
val p = prunedRDD.partitions(0)
assert(p.index == 0)
assert(p.asInstanceOf[PartitionPruningRDDPartition].parentSplit.index == 2)
}


test("Pruned Partitions can be unioned ") {

val rdd = new RDD[Int](sc, Nil) {
override protected def getPartitions = {
Array[Partition](
new TestPartition(0, 4),
new TestPartition(1, 5),
new TestPartition(2, 6))
}

def compute(split: Partition, context: TaskContext) = {
List(split.asInstanceOf[TestPartition].testValue).iterator
}
}
val prunedRDD1 = PartitionPruningRDD.create(rdd, {
x => if (x == 0) true else false
})

val prunedRDD2 = PartitionPruningRDD.create(rdd, {
x => if (x == 2) true else false
})

val merged = prunedRDD1 ++ prunedRDD2
assert(merged.count() == 2)
val take = merged.take(2)
assert(take.apply(0) == 4)
assert(take.apply(1) == 6)
}
}

class TestPartition(i: Int, value: Int) extends Partition with Serializable {
def index = i

def testValue = this.value

}

0 comments on commit f678e10

Please sign in to comment.