Skip to content

Commit

Permalink
Initialize converters lazily so that the attributes are resolved first
Browse files Browse the repository at this point in the history
  • Loading branch information
JoshRosen committed May 29, 2015
1 parent 6ad0ebb commit 3f7b2d8
Showing 1 changed file with 3 additions and 12 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,10 @@

package org.apache.spark.sql.catalyst.expressions

import java.io.{ObjectInputStream, IOException}

import scala.collection.Map

import org.apache.spark.sql.catalyst.{CatalystTypeConverters, trees}
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils

/**
* An expression that produces zero or more rows given a single input row.
Expand Down Expand Up @@ -85,16 +82,10 @@ case class UserDefinedGenerator(
}.asInstanceOf[(Row => Row)]
}

initializeConverters()

@throws(classOf[IOException])
private def readObject(ois: ObjectInputStream): Unit = Utils.tryOrIOException {
ois.defaultReadObject()
initializeConverters()
}

override def eval(input: Row): TraversableOnce[Row] = {
// TODO(davies): improve this
if (inputRow == null) {
initializeConverters()
}
// Convert the objects into Scala Type before calling function, we need schema to support UDT
function(convertToScala(inputRow(input)))
}
Expand Down

0 comments on commit 3f7b2d8

Please sign in to comment.