Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[SPARK-13030][ML] Create OneHotEncoderEstimator for OneHotEncoder as Estimator #19527

Closed
wants to merge 14 commits into from
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,12 @@ import org.apache.spark.sql.types.{DoubleType, NumericType, StructType}
* The output vectors are sparse.
*
* @see `StringIndexer` for converting categorical values into category indices
* @deprecated `OneHotEncoderEstimator` will be renamed `OneHotEncoder` and this `OneHotEncoder`
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Note for the future: For 3.0, it'd be nice to do what you're describing here but also leave OneHotEncoderEstimator as a deprecated alias. That way, user code won't break but will have deprecation warnings when upgrading to 3.0.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ok. Sounds good.

* will be removed in 3.0.0.
*/
@Since("1.4.0")
@deprecated("`OneHotEncoderEstimator` will be renamed `OneHotEncoder` and this `OneHotEncoder`" +
" will be removed in 3.0.0.", "2.3.0")
class OneHotEncoder @Since("1.4.0") (@Since("1.4.0") override val uid: String) extends Transformer
with HasInputCol with HasOutputCol with DefaultParamsWritable {

Expand Down Expand Up @@ -78,56 +82,16 @@ class OneHotEncoder @Since("1.4.0") (@Since("1.4.0") override val uid: String) e
override def transformSchema(schema: StructType): StructType = {
val inputColName = $(inputCol)
val outputColName = $(outputCol)
val inputFields = schema.fields

require(schema(inputColName).dataType.isInstanceOf[NumericType],
s"Input column must be of type NumericType but got ${schema(inputColName).dataType}")
val inputFields = schema.fields
require(!inputFields.exists(_.name == outputColName),
s"Output column $outputColName already exists.")

val inputAttr = Attribute.fromStructField(schema(inputColName))
val outputAttrNames: Option[Array[String]] = inputAttr match {
case nominal: NominalAttribute =>
if (nominal.values.isDefined) {
nominal.values
} else if (nominal.numValues.isDefined) {
nominal.numValues.map(n => Array.tabulate(n)(_.toString))
} else {
None
}
case binary: BinaryAttribute =>
if (binary.values.isDefined) {
binary.values
} else {
Some(Array.tabulate(2)(_.toString))
}
case _: NumericAttribute =>
throw new RuntimeException(
s"The input column $inputColName cannot be numeric.")
case _ =>
None // optimistic about unknown attributes
}

val filteredOutputAttrNames = outputAttrNames.map { names =>
if ($(dropLast)) {
require(names.length > 1,
s"The input column $inputColName should have at least two distinct values.")
names.dropRight(1)
} else {
names
}
}

val outputAttrGroup = if (filteredOutputAttrNames.isDefined) {
val attrs: Array[Attribute] = filteredOutputAttrNames.get.map { name =>
BinaryAttribute.defaultAttr.withName(name)
}
new AttributeGroup($(outputCol), attrs)
} else {
new AttributeGroup($(outputCol))
}

val outputFields = inputFields :+ outputAttrGroup.toStructField()
val outputField = OneHotEncoderCommon.transformOutputColumnSchema(
schema(inputColName), $(dropLast), outputColName)
val outputFields = inputFields :+ outputField
StructType(outputFields)
}

Expand All @@ -136,30 +100,17 @@ class OneHotEncoder @Since("1.4.0") (@Since("1.4.0") override val uid: String) e
// schema transformation
val inputColName = $(inputCol)
val outputColName = $(outputCol)
val shouldDropLast = $(dropLast)
var outputAttrGroup = AttributeGroup.fromStructField(

val outputAttrGroupFromSchema = AttributeGroup.fromStructField(
transformSchema(dataset.schema)(outputColName))
if (outputAttrGroup.size < 0) {
// If the number of attributes is unknown, we check the values from the input column.
val numAttrs = dataset.select(col(inputColName).cast(DoubleType)).rdd.map(_.getDouble(0))
.treeAggregate(0.0)(
(m, x) => {
assert(x <= Int.MaxValue,
s"OneHotEncoder only supports up to ${Int.MaxValue} indices, but got $x")
assert(x >= 0.0 && x == x.toInt,
s"Values from column $inputColName must be indices, but got $x.")
math.max(m, x)
},
(m0, m1) => {
math.max(m0, m1)
}
).toInt + 1
val outputAttrNames = Array.tabulate(numAttrs)(_.toString)
val filtered = if (shouldDropLast) outputAttrNames.dropRight(1) else outputAttrNames
val outputAttrs: Array[Attribute] =
filtered.map(name => BinaryAttribute.defaultAttr.withName(name))
outputAttrGroup = new AttributeGroup(outputColName, outputAttrs)

val outputAttrGroup = if (outputAttrGroupFromSchema.size < 0) {
OneHotEncoderCommon.getOutputAttrGroupFromData(
dataset, $(dropLast), Seq(inputColName), Seq(outputColName))(0)
} else {
outputAttrGroupFromSchema
}

val metadata = outputAttrGroup.toMetadata()

// data transformation
Expand Down
Loading