Skip to content

Commit

Permalink
Fix Ordering and Naming of Imports in Examples
Browse files Browse the repository at this point in the history
  • Loading branch information
techaddict committed Apr 16, 2014
1 parent 6c7e543 commit 3ce69c3
Show file tree
Hide file tree
Showing 20 changed files with 54 additions and 29 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,18 @@
package org.apache.spark.examples

import java.nio.ByteBuffer

import scala.collection.JavaConversions._
import scala.collection.mutable.ListBuffer
import scala.collection.immutable.Map

import org.apache.cassandra.hadoop.ConfigHelper
import org.apache.cassandra.hadoop.cql3.CqlPagingInputFormat
import org.apache.cassandra.hadoop.cql3.CqlConfigHelper
import org.apache.cassandra.hadoop.cql3.CqlOutputFormat
import org.apache.cassandra.utils.ByteBufferUtil
import org.apache.hadoop.mapreduce.Job

import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,19 +17,21 @@

package org.apache.spark.examples

import org.apache.hadoop.mapreduce.Job
import java.nio.ByteBuffer
import java.util.SortedMap

import scala.collection.JavaConversions._

import org.apache.cassandra.db.IColumn
import org.apache.cassandra.hadoop.ColumnFamilyOutputFormat
import org.apache.cassandra.hadoop.ConfigHelper
import org.apache.cassandra.hadoop.ColumnFamilyInputFormat
import org.apache.cassandra.thrift._
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import java.nio.ByteBuffer
import java.util.SortedMap
import org.apache.cassandra.db.IColumn
import org.apache.cassandra.utils.ByteBufferUtil
import scala.collection.JavaConversions._
import org.apache.hadoop.mapreduce.Job

import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._

/*
* This example demonstrates using Spark with Cassandra with the New Hadoop API and Cassandra
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,10 @@

package org.apache.spark.examples

import java.util.Random

import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import java.util.Random

object GroupByTest {
def main(args: Array[String]) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,13 @@

package org.apache.spark.examples

import org.apache.spark._
import org.apache.spark.rdd.NewHadoopRDD
import org.apache.hadoop.hbase.{HBaseConfiguration, HTableDescriptor}
import org.apache.hadoop.hbase.client.HBaseAdmin
import org.apache.hadoop.hbase.{HBaseConfiguration, HTableDescriptor}
import org.apache.hadoop.hbase.mapreduce.TableInputFormat

import org.apache.spark._
import org.apache.spark.rdd.NewHadoopRDD

object HBaseTest {
def main(args: Array[String]) {
val sc = new SparkContext(args(0), "HBaseTest",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,10 @@
package org.apache.spark.examples

import scala.math.sqrt
import cern.jet.math._

import cern.colt.matrix._
import cern.colt.matrix.linalg._
import cern.jet.math._

/**
* Alternating least squares matrix factorization.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
package org.apache.spark.examples

import java.util.Random

import breeze.linalg.{Vector, DenseVector}

object LocalFileLR {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,13 @@
package org.apache.spark.examples

import java.util.Random
import org.apache.spark.SparkContext._

import scala.collection.mutable.HashMap
import scala.collection.mutable.HashSet

import breeze.linalg.{Vector, DenseVector}
import breeze.linalg.squaredDistance
import breeze.linalg.{Vector, DenseVector, squaredDistance}

import org.apache.spark.SparkContext._

/**
* K-means clustering.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
package org.apache.spark.examples

import java.util.Random

import breeze.linalg.{Vector, DenseVector}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,9 @@
package org.apache.spark.examples

import scala.math.random

import org.apache.spark._
import SparkContext._
import org.apache.spark.SparkContext._

object LocalPi {
def main(args: Array[String]) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package org.apache.spark.examples

import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._

/**
* Executes a roll up-style query against Apache logs.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@

package org.apache.spark.examples

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.SparkContext

object MultiBroadcastTest {
def main(args: Array[String]) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,10 @@

package org.apache.spark.examples

import java.util.Random

import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import java.util.Random

object SimpleSkewedGroupByTest {
def main(args: Array[String]) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,10 @@

package org.apache.spark.examples

import java.util.Random

import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import java.util.Random

object SkewedGroupByTest {
def main(args: Array[String]) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,11 @@
package org.apache.spark.examples

import scala.math.sqrt
import cern.jet.math._

import cern.colt.matrix._
import cern.colt.matrix.linalg._
import cern.jet.math._

import org.apache.spark._

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,15 @@
package org.apache.spark.examples

import java.util.Random

import scala.math.exp

import breeze.linalg.{Vector, DenseVector}

import org.apache.spark._
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.scheduler.InputFormatInfo

import breeze.linalg.{Vector, DenseVector}

/**
* Logistic regression based classification.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,12 @@
package org.apache.spark.examples

import java.util.Random

import breeze.linalg.{Vector, DenseVector, squaredDistance}

import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._

import breeze.linalg.{Vector, DenseVector}
import breeze.linalg.squaredDistance

/**
* K-means clustering.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,13 @@
package org.apache.spark.examples

import java.util.Random

import scala.math.exp
import org.apache.spark._

import breeze.linalg.{Vector, DenseVector}

import org.apache.spark._

/**
* Logistic regression based classification.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ package org.apache.spark.examples
import org.apache.spark.SparkContext._
import org.apache.spark.SparkContext


/**
* Computes the PageRank of URLs from an input file. Input file should
* be in format of:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,12 @@

package org.apache.spark.examples

import org.apache.spark._
import SparkContext._
import scala.util.Random
import scala.collection.mutable

import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._

/**
* Transitive closure on a graph.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,16 @@
package org.apache.spark.examples

import java.util.Random

import scala.math.exp

import breeze.linalg.{Vector, DenseVector}

import org.apache.spark._
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.scheduler.InputFormatInfo
import org.apache.spark.storage.StorageLevel

import breeze.linalg.{Vector, DenseVector}

/**
* Logistic regression based classification.
Expand Down

0 comments on commit 3ce69c3

Please sign in to comment.