Skip to content

Commit

Permalink
Fixed coding style issues in sql/core
Browse files Browse the repository at this point in the history
  • Loading branch information
liancheng committed Mar 23, 2014
1 parent fae7b02 commit 0b56f77
Show file tree
Hide file tree
Showing 12 changed files with 33 additions and 32 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,12 @@ import scala.language.implicitConversions
import scala.reflect._
import scala.collection.mutable.ArrayBuffer

import org.apache.spark._
import org.apache.spark.SparkContext._
import org.apache.spark.{Aggregator, InterruptibleIterator, Logging}
import org.apache.spark.util.collection.AppendOnlyMap

/* Implicit conversions */
import org.apache.spark.SparkContext._

/**
* Extra functions on RDDs that perform only local operations. These can be used when data has
* already been partitioned correctly.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.plans.{Inner, JoinType}
import org.apache.spark.sql.catalyst.types.BooleanType
import org.apache.spark.{OneToOneDependency, Dependency, Partition, TaskContext}
import org.apache.spark.{Dependency, OneToOneDependency, Partition, TaskContext}

/**
* <span class="badge" style="float: right; background-color: darkblue;">ALPHA COMPONENT</span>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ package execution


import com.esotericsoftware.kryo.{Kryo, Serializer}
import com.esotericsoftware.kryo.io.{Output, Input}
import com.esotericsoftware.kryo.io.{Input, Output}

import org.apache.spark.{SparkConf, RangePartitioner, HashPartitioner}
import org.apache.spark.rdd.ShuffledRDD
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,13 @@ package org.apache.spark.sql
package execution

import org.apache.spark.SparkContext
import org.apache.spark.rdd.PartitionLocalRDDFunctions._
import org.apache.spark.sql.catalyst.errors._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.physical._

/* Implicit conversions */
import org.apache.spark.rdd.PartitionLocalRDDFunctions._

/**
* Groups input data by `groupingExpressions` and computes the `aggregateExpressions` for each
* group.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ import org.apache.spark.SparkContext

import org.apache.spark.sql.catalyst.errors._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.physical.{UnspecifiedDistribution, OrderedDistribution}
import org.apache.spark.sql.catalyst.plans.physical.{OrderedDistribution, UnspecifiedDistribution}
import org.apache.spark.sql.catalyst.ScalaReflection

case class Project(projectList: Seq[NamedExpression], child: SparkPlan) extends UnaryNode {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,5 +25,4 @@ package org.apache.spark.sql
* documented here in order to make it easier for others to understand the performance
* characteristics of query plans that are generated by Spark SQL.
*/
package object execution {
}
package object execution
Original file line number Diff line number Diff line change
Expand Up @@ -23,23 +23,22 @@ import scala.collection.JavaConversions._

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.permission.FsAction
import org.apache.hadoop.fs.{Path, FileSystem}
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.mapreduce.Job

import parquet.hadoop.metadata.{FileMetaData, ParquetMetadata}
import parquet.hadoop.util.ContextUtil
import parquet.hadoop.{Footer, ParquetFileWriter, ParquetFileReader}
import parquet.hadoop.{Footer, ParquetFileReader, ParquetFileWriter}
import parquet.io.api.{Binary, RecordConsumer}
import parquet.schema.PrimitiveType.{PrimitiveTypeName => ParquetPrimitiveTypeName}
import parquet.schema.Type.Repetition
import parquet.schema.{MessageTypeParser, MessageType}
import parquet.schema.{MessageType, MessageTypeParser}
import parquet.schema.{PrimitiveType => ParquetPrimitiveType}
import parquet.schema.{Type => ParquetType}

import org.apache.spark.sql.catalyst.analysis.UnresolvedException
import org.apache.spark.sql.catalyst.expressions.{Row, AttributeReference, Attribute}
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, BaseRelation}
import org.apache.spark.sql.catalyst.types.ArrayType
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference, Row}
import org.apache.spark.sql.catalyst.plans.logical.{BaseRelation, LogicalPlan}
import org.apache.spark.sql.catalyst.types._

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,14 +27,14 @@ import org.apache.hadoop.mapreduce._
import org.apache.hadoop.mapreduce.lib.output.{FileOutputFormat => NewFileOutputFormat}

import parquet.hadoop.util.ContextUtil
import parquet.hadoop.{ParquetOutputFormat, ParquetInputFormat}
import parquet.hadoop.{ParquetInputFormat, ParquetOutputFormat}
import parquet.io.InvalidRecordException
import parquet.schema.MessageType

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.expressions.{Row, Attribute, Expression}
import org.apache.spark.sql.execution.{SparkPlan, UnaryNode, LeafNode}
import org.apache.spark.{TaskContext, SerializableWritable, SparkContext}
import org.apache.spark.sql.catalyst.expressions.{Attribute, Expression, Row}
import org.apache.spark.sql.execution.{LeafNode, SparkPlan, UnaryNode}
import org.apache.spark.{SerializableWritable, SparkContext, TaskContext}

/**
* Parquet table scan operator. Imports the file that backs the given
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,12 @@ import org.apache.hadoop.conf.Configuration
import parquet.column.ParquetProperties
import parquet.hadoop.ParquetOutputFormat
import parquet.hadoop.api.ReadSupport.ReadContext
import parquet.hadoop.api.{WriteSupport, ReadSupport}
import parquet.hadoop.api.{ReadSupport, WriteSupport}
import parquet.io.api._
import parquet.schema.{MessageTypeParser, MessageType}
import parquet.schema.{MessageType, MessageTypeParser}

import org.apache.spark.Logging
import org.apache.spark.sql.catalyst.expressions.{Row, Attribute}
import org.apache.spark.sql.catalyst.expressions.{Attribute, Row}
import org.apache.spark.sql.catalyst.types._

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ import org.apache.hadoop.mapreduce.Job

import parquet.hadoop.ParquetWriter
import parquet.hadoop.util.ContextUtil
import parquet.schema.{MessageTypeParser, MessageType}
import parquet.schema.{MessageType, MessageTypeParser}

import org.apache.spark.sql.catalyst.expressions.GenericRow
import org.apache.spark.sql.catalyst.util.getTempFilePath
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,12 @@
package org.apache.spark.sql
package execution

import org.apache.spark.sql.QueryTest
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.test._

import TestSQLContext._
import org.apache.spark.sql.QueryTest
/* Implicit conversions */
import org.apache.spark.sql.test.TestSQLContext._

/**
* This is an example TGF that uses UnresolvedAttributes 'name and 'age to access specific columns
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,18 +19,17 @@ package org.apache.spark.sql.parquet

import org.scalatest.{BeforeAndAfterAll, FunSuite}

import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.mapreduce.Job
import parquet.hadoop.ParquetFileWriter
import parquet.hadoop.util.ContextUtil
import parquet.schema.MessageTypeParser

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.expressions.Row
import org.apache.spark.sql.catalyst.util.getTempFilePath
import org.apache.spark.sql.test.TestSQLContext

import org.apache.hadoop.mapreduce.Job
import org.apache.hadoop.fs.{Path, FileSystem}

import parquet.schema.MessageTypeParser
import parquet.hadoop.ParquetFileWriter
import parquet.hadoop.util.ContextUtil

class ParquetQuerySuite extends FunSuite with BeforeAndAfterAll {
override def beforeAll() {
ParquetTestData.writeFile()
Expand Down

0 comments on commit 0b56f77

Please sign in to comment.