From fd7eb99fb7d71e8c799f05a601f46c0001c7debd Mon Sep 17 00:00:00 2001 From: Imran Younus Date: Tue, 2 Feb 2016 10:49:14 -0800 Subject: [PATCH] fixed indentation --- .../ml/regression/LinearRegression.scala | 33 +++++++------------ 1 file changed, 11 insertions(+), 22 deletions(-) diff --git a/mllib/src/main/scala/org/apache/spark/ml/regression/LinearRegression.scala b/mllib/src/main/scala/org/apache/spark/ml/regression/LinearRegression.scala index b82605556ed76..e253f25c0ea65 100644 --- a/mllib/src/main/scala/org/apache/spark/ml/regression/LinearRegression.scala +++ b/mllib/src/main/scala/org/apache/spark/ml/regression/LinearRegression.scala @@ -74,8 +74,7 @@ class LinearRegression @Since("1.3.0") (@Since("1.3.0") override val uid: String /** * Set the regularization parameter. * Default is 0.0. - * - * @group setParam + * @group setParam */ @Since("1.3.0") def setRegParam(value: Double): this.type = set(regParam, value) @@ -84,8 +83,7 @@ class LinearRegression @Since("1.3.0") (@Since("1.3.0") override val uid: String /** * Set if we should fit the intercept * Default is true. - * - * @group setParam + * @group setParam */ @Since("1.5.0") def setFitIntercept(value: Boolean): this.type = set(fitIntercept, value) @@ -98,8 +96,7 @@ class LinearRegression @Since("1.3.0") (@Since("1.3.0") override val uid: String * the models should be always converged to the same solution when no regularization * is applied. In R's GLMNET package, the default behavior is true as well. * Default is true. - * - * @group setParam + * @group setParam */ @Since("1.5.0") def setStandardization(value: Boolean): this.type = set(standardization, value) @@ -110,8 +107,7 @@ class LinearRegression @Since("1.3.0") (@Since("1.3.0") override val uid: String * For alpha = 0, the penalty is an L2 penalty. For alpha = 1, it is an L1 penalty. * For 0 < alpha < 1, the penalty is a combination of L1 and L2. * Default is 0.0 which is an L2 penalty. - * - * @group setParam + * @group setParam */ @Since("1.4.0") def setElasticNetParam(value: Double): this.type = set(elasticNetParam, value) @@ -120,8 +116,7 @@ class LinearRegression @Since("1.3.0") (@Since("1.3.0") override val uid: String /** * Set the maximum number of iterations. * Default is 100. - * - * @group setParam + * @group setParam */ @Since("1.3.0") def setMaxIter(value: Int): this.type = set(maxIter, value) @@ -131,8 +126,7 @@ class LinearRegression @Since("1.3.0") (@Since("1.3.0") override val uid: String * Set the convergence tolerance of iterations. * Smaller value will lead to higher accuracy with the cost of more iterations. * Default is 1E-6. - * - * @group setParam + * @group setParam */ @Since("1.4.0") def setTol(value: Double): this.type = set(tol, value) @@ -142,8 +136,7 @@ class LinearRegression @Since("1.3.0") (@Since("1.3.0") override val uid: String * Whether to over-/under-sample training instances according to the given weights in weightCol. * If empty, all instances are treated equally (weight 1.0). * Default is empty, so all instances have weight one. - * - * @group setParam + * @group setParam */ @Since("1.6.0") def setWeightCol(value: String): this.type = set(weightCol, value) @@ -157,8 +150,7 @@ class LinearRegression @Since("1.3.0") (@Since("1.3.0") override val uid: String * solution to the linear regression problem. * The default value is "auto" which means that the solver algorithm is * selected automatically. - * - * @group setParam + * @group setParam */ @Since("1.6.0") def setSolver(value: String): this.type = set(solver, value) @@ -422,8 +414,7 @@ class LinearRegressionModel private[ml] ( /** * Evaluates the model on a testset. - * - * @param dataset Test dataset to evaluate model on. + * @param dataset Test dataset to evaluate model on. */ // TODO: decide on a good name before exposing to public API private[regression] def evaluate(dataset: DataFrame): LinearRegressionSummary = { @@ -521,8 +512,7 @@ object LinearRegressionModel extends MLReadable[LinearRegressionModel] { * :: Experimental :: * Linear regression training results. Currently, the training summary ignores the * training coefficients except for the objective trace. - * - * @param predictions predictions outputted by the model's `transform` method. + * @param predictions predictions outputted by the model's `transform` method. * @param objectiveHistory objective function (scaled loss + regularization) at each iteration. */ @Since("1.5.0") @@ -546,8 +536,7 @@ class LinearRegressionTrainingSummary private[regression] ( /** * :: Experimental :: * Linear regression results evaluated on a dataset. - * - * @param predictions predictions outputted by the model's `transform` method. + * @param predictions predictions outputted by the model's `transform` method. */ @Since("1.5.0") @Experimental