Skip to content

Commit

Permalink
code cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
CodingCat committed Mar 24, 2014
1 parent af88939 commit 258f92c
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 4 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ import org.apache.hadoop.fs.Path
import org.apache.spark.rdd.HadoopRDD

/**
* Internal helper class that saves an RDD using a Hadoop OutputFormat.
* Internal helper class that saves an RDD using a Hadoop OutputFormat.
*
* Saves the RDD using a JobConf, which should contain an output key class, an output value class,
* a filename to write to, etc, exactly like in a Hadoop MapReduce job.
Expand Down
4 changes: 1 addition & 3 deletions core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
Original file line number Diff line number Diff line change
Expand Up @@ -233,12 +233,10 @@ private[spark] object HadoopRDD {
def putCachedMetadata(key: String, value: Any) =
SparkEnv.get.hadoopJobMetadata.put(key, value)

/** Add Hadoop configuration specific to a single partition and attempt. */
def addLocalConfiguration(jobTrackerId: String, jobId: Int, splitId: Int, attemptId: Int,
conf: JobConf) {
// generate job id
//val stageId = context.stageId
val jobID = new JobID(jobTrackerId, jobId)
//val attemptId = (attemptId % Int.MaxValue).toInt
val taId = new TaskAttemptID(new TaskID(jobID, true, splitId), attemptId)

conf.set("mapred.tip.id", taId.getTaskID.toString)
Expand Down

0 comments on commit 258f92c

Please sign in to comment.