diff --git a/src/main/scala/org/apache/spark/sql/TiStrategy.scala b/src/main/scala/org/apache/spark/sql/TiStrategy.scala index 558645eaad..043dc29da1 100644 --- a/src/main/scala/org/apache/spark/sql/TiStrategy.scala +++ b/src/main/scala/org/apache/spark/sql/TiStrategy.scala @@ -25,7 +25,7 @@ import com.pingcap.tispark.TiUtils._ import com.pingcap.tispark.{BasicExpression, TiConfigConst, TiDBRelation, TiUtils} import org.apache.spark.internal.Logging import org.apache.spark.sql.catalyst.expressions.aggregate.{AggregateExpression, _} -import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, AttributeReference, AttributeSet, Cast, Divide, ExprId, Expression, IntegerLiteral, NamedExpression, SortOrder} +import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, AttributeSet, Cast, Divide, Expression, IntegerLiteral, NamedExpression, SortOrder} import org.apache.spark.sql.catalyst.expressions.NamedExpression.newExprId import org.apache.spark.sql.catalyst.planning.{PhysicalAggregation, PhysicalOperation} import org.apache.spark.sql.catalyst.plans.logical @@ -35,8 +35,6 @@ import org.apache.spark.sql.execution.datasources.LogicalRelation import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.types._ -import scala.collection.mutable - // TODO: Too many hacks here since we hijack the planning // but we don't have full control over planning stage // We cannot pass context around during planning so @@ -203,8 +201,7 @@ class TiStrategy(context: SQLContext) extends Strategy with Logging { child: LogicalPlan, project: Seq[NamedExpression] ): SparkPlan = { - // If sortOrder is not null, limit must be greater than 0 - if (limit < 0 || (sortOrder == null && limit == 0)) { + if (limit < 0 || (sortOrder.isEmpty && limit == 0)) { return execution.TakeOrderedAndProjectExec(limit, sortOrder, project, planLater(child)) }