Skip to content

Commit

Permalink
fixed indentation
Browse files Browse the repository at this point in the history
  • Loading branch information
iyounus committed Feb 2, 2016
1 parent 2480dc1 commit fd7eb99
Showing 1 changed file with 11 additions and 22 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,7 @@ class LinearRegression @Since("1.3.0") (@Since("1.3.0") override val uid: String
/**
* Set the regularization parameter.
* Default is 0.0.
*
* @group setParam
* @group setParam
*/
@Since("1.3.0")
def setRegParam(value: Double): this.type = set(regParam, value)
Expand All @@ -84,8 +83,7 @@ class LinearRegression @Since("1.3.0") (@Since("1.3.0") override val uid: String
/**
* Set if we should fit the intercept
* Default is true.
*
* @group setParam
* @group setParam
*/
@Since("1.5.0")
def setFitIntercept(value: Boolean): this.type = set(fitIntercept, value)
Expand All @@ -98,8 +96,7 @@ class LinearRegression @Since("1.3.0") (@Since("1.3.0") override val uid: String
* the models should be always converged to the same solution when no regularization
* is applied. In R's GLMNET package, the default behavior is true as well.
* Default is true.
*
* @group setParam
* @group setParam
*/
@Since("1.5.0")
def setStandardization(value: Boolean): this.type = set(standardization, value)
Expand All @@ -110,8 +107,7 @@ class LinearRegression @Since("1.3.0") (@Since("1.3.0") override val uid: String
* For alpha = 0, the penalty is an L2 penalty. For alpha = 1, it is an L1 penalty.
* For 0 < alpha < 1, the penalty is a combination of L1 and L2.
* Default is 0.0 which is an L2 penalty.
*
* @group setParam
* @group setParam
*/
@Since("1.4.0")
def setElasticNetParam(value: Double): this.type = set(elasticNetParam, value)
Expand All @@ -120,8 +116,7 @@ class LinearRegression @Since("1.3.0") (@Since("1.3.0") override val uid: String
/**
* Set the maximum number of iterations.
* Default is 100.
*
* @group setParam
* @group setParam
*/
@Since("1.3.0")
def setMaxIter(value: Int): this.type = set(maxIter, value)
Expand All @@ -131,8 +126,7 @@ class LinearRegression @Since("1.3.0") (@Since("1.3.0") override val uid: String
* Set the convergence tolerance of iterations.
* Smaller value will lead to higher accuracy with the cost of more iterations.
* Default is 1E-6.
*
* @group setParam
* @group setParam
*/
@Since("1.4.0")
def setTol(value: Double): this.type = set(tol, value)
Expand All @@ -142,8 +136,7 @@ class LinearRegression @Since("1.3.0") (@Since("1.3.0") override val uid: String
* Whether to over-/under-sample training instances according to the given weights in weightCol.
* If empty, all instances are treated equally (weight 1.0).
* Default is empty, so all instances have weight one.
*
* @group setParam
* @group setParam
*/
@Since("1.6.0")
def setWeightCol(value: String): this.type = set(weightCol, value)
Expand All @@ -157,8 +150,7 @@ class LinearRegression @Since("1.3.0") (@Since("1.3.0") override val uid: String
* solution to the linear regression problem.
* The default value is "auto" which means that the solver algorithm is
* selected automatically.
*
* @group setParam
* @group setParam
*/
@Since("1.6.0")
def setSolver(value: String): this.type = set(solver, value)
Expand Down Expand Up @@ -422,8 +414,7 @@ class LinearRegressionModel private[ml] (

/**
* Evaluates the model on a testset.
*
* @param dataset Test dataset to evaluate model on.
* @param dataset Test dataset to evaluate model on.
*/
// TODO: decide on a good name before exposing to public API
private[regression] def evaluate(dataset: DataFrame): LinearRegressionSummary = {
Expand Down Expand Up @@ -521,8 +512,7 @@ object LinearRegressionModel extends MLReadable[LinearRegressionModel] {
* :: Experimental ::
* Linear regression training results. Currently, the training summary ignores the
* training coefficients except for the objective trace.
*
* @param predictions predictions outputted by the model's `transform` method.
* @param predictions predictions outputted by the model's `transform` method.
* @param objectiveHistory objective function (scaled loss + regularization) at each iteration.
*/
@Since("1.5.0")
Expand All @@ -546,8 +536,7 @@ class LinearRegressionTrainingSummary private[regression] (
/**
* :: Experimental ::
* Linear regression results evaluated on a dataset.
*
* @param predictions predictions outputted by the model's `transform` method.
* @param predictions predictions outputted by the model's `transform` method.
*/
@Since("1.5.0")
@Experimental
Expand Down

0 comments on commit fd7eb99

Please sign in to comment.