diff --git a/python-package/lightgbm/basic.py b/python-package/lightgbm/basic.py index 3a07cc1cb8f3..0e04539a69d2 100644 --- a/python-package/lightgbm/basic.py +++ b/python-package/lightgbm/basic.py @@ -1784,7 +1784,7 @@ def __sample(self, seqs: List[Sequence], total_nrow: int) -> Tuple[List[np.ndarr def __init_from_seqs( self, seqs: List[Sequence], - ref_dataset: Optional[_DatasetHandle] = None + ref_dataset: Optional[_DatasetHandle] ) -> "Dataset": """ Initialize data from list of Sequence objects. @@ -4275,7 +4275,7 @@ def __inner_eval( self, data_name: str, data_idx: int, - feval: Optional[Union[_LGBM_CustomEvalFunction, List[_LGBM_CustomEvalFunction]]] = None + feval: Optional[Union[_LGBM_CustomEvalFunction, List[_LGBM_CustomEvalFunction]]] ) -> List[_LGBM_BoosterEvalMethodResultType]: """Evaluate training or validation data.""" if data_idx >= self.__num_dataset: diff --git a/python-package/lightgbm/callback.py b/python-package/lightgbm/callback.py index 3cd77a831d8f..330bfd5c76a7 100644 --- a/python-package/lightgbm/callback.py +++ b/python-package/lightgbm/callback.py @@ -49,7 +49,7 @@ def __init__(self, best_iteration: int, best_score: _EvalResultTuple) -> None: "evaluation_result_list"]) -def _format_eval_result(value: _EvalResultTuple, show_stdv: bool = True) -> str: +def _format_eval_result(value: _EvalResultTuple, show_stdv: bool) -> str: """Format metric string.""" if len(value) == 4: return f"{value[0]}'s {value[1]}: {value[2]:g}" @@ -338,7 +338,7 @@ def _init(self, env: CallbackEnv) -> None: def _final_iteration_check(self, env: CallbackEnv, eval_name_splitted: List[str], i: int) -> None: if env.iteration == env.end_iteration - 1: if self.verbose: - best_score_str = '\t'.join([_format_eval_result(x) for x in self.best_score_list[i]]) + best_score_str = '\t'.join([_format_eval_result(x, show_stdv=True) for x in self.best_score_list[i]]) _log_info('Did not meet early stopping. ' f'Best iteration is:\n[{self.best_iter[i] + 1}]\t{best_score_str}') if self.first_metric_only: @@ -364,7 +364,7 @@ def __call__(self, env: CallbackEnv) -> None: continue # train data for lgb.cv or sklearn wrapper (underlying lgb.train) elif env.iteration - self.best_iter[i] >= self.stopping_rounds: if self.verbose: - eval_result_str = '\t'.join([_format_eval_result(x) for x in self.best_score_list[i]]) + eval_result_str = '\t'.join([_format_eval_result(x, show_stdv=True) for x in self.best_score_list[i]]) _log_info(f"Early stopping, best iteration is:\n[{self.best_iter[i] + 1}]\t{eval_result_str}") if self.first_metric_only: _log_info(f"Evaluated only: {eval_name_splitted[-1]}") diff --git a/python-package/lightgbm/engine.py b/python-package/lightgbm/engine.py index 93e7953fc6a6..c112f762e946 100644 --- a/python-package/lightgbm/engine.py +++ b/python-package/lightgbm/engine.py @@ -434,10 +434,10 @@ def _make_n_folds( nfold: int, params: Dict[str, Any], seed: int, - fpreproc: Optional[_LGBM_PreprocFunction] = None, - stratified: bool = True, - shuffle: bool = True, - eval_train_metric: bool = False + fpreproc: Optional[_LGBM_PreprocFunction], + stratified: bool, + shuffle: bool, + eval_train_metric: bool ) -> CVBooster: """Make a n-fold list of Booster from random indices.""" full_data = full_data.construct() @@ -685,7 +685,7 @@ def cv( .set_categorical_feature(categorical_feature) results = collections.defaultdict(list) - cvfolds = _make_n_folds(train_set, folds=folds, nfold=nfold, + cvfolds = _make_n_folds(full_data=train_set, folds=folds, nfold=nfold, params=params, seed=seed, fpreproc=fpreproc, stratified=stratified, shuffle=shuffle, eval_train_metric=eval_train_metric) diff --git a/python-package/lightgbm/plotting.py b/python-package/lightgbm/plotting.py index 483dcddbc96a..58b26b0f5f3f 100644 --- a/python-package/lightgbm/plotting.py +++ b/python-package/lightgbm/plotting.py @@ -20,13 +20,13 @@ ] -def _check_not_tuple_of_2_elements(obj: Any, obj_name: str = 'obj') -> None: +def _check_not_tuple_of_2_elements(obj: Any, obj_name: str) -> None: """Check object is not tuple or does not have 2 elements.""" if not isinstance(obj, tuple) or len(obj) != 2: raise TypeError(f"{obj_name} must be a tuple of 2 elements.") -def _float2str(value: float, precision: Optional[int] = None) -> str: +def _float2str(value: float, precision: Optional[int]) -> str: return (f"{value:.{precision}f}" if precision is not None and not isinstance(value, str) else str(value))