Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[python-package] add type hints on Booster eval methods #5433

Merged
merged 3 commits into from
Aug 25, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 36 additions & 7 deletions python-package/lightgbm/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,9 @@
from .compat import PANDAS_INSTALLED, concat, dt_DataTable, pd_CategoricalDtype, pd_DataFrame, pd_Series
from .libpath import find_lib_path

_LGBM_EvalFunctionResultType = Tuple[str, float, bool]
_LGBM_BoosterEvalMethodResultType = Tuple[str, str, float, bool]

ZERO_THRESHOLD = 1e-35


Expand Down Expand Up @@ -2617,6 +2620,16 @@ def _dump_text(self, filename: Union[str, Path]) -> "Dataset":
[np.ndarray, Dataset],
Tuple[np.ndarray, np.ndarray]
]
_LGBM_CustomEvalFunction = Union[
Callable[
[np.ndarray, Dataset],
_LGBM_EvalFunctionResultType
],
Callable[
[np.ndarray, Dataset],
List[_LGBM_EvalFunctionResultType]
]
]


class Booster:
Expand Down Expand Up @@ -3255,7 +3268,12 @@ def lower_bound(self) -> float:
ctypes.byref(ret)))
return ret.value

def eval(self, data, name, feval=None):
def eval(
self,
data: Dataset,
name: str,
feval: Optional[Union[_LGBM_CustomEvalFunction, List[_LGBM_CustomEvalFunction]]] = None
) -> List[_LGBM_BoosterEvalMethodResultType]:
"""Evaluate for data.

Parameters
Expand Down Expand Up @@ -3286,7 +3304,7 @@ def eval(self, data, name, feval=None):
Returns
-------
result : list
List with evaluation results.
List with (dataset_name, eval_name, eval_result, is_higher_better) tuples.
"""
if not isinstance(data, Dataset):
raise TypeError("Can only eval for Dataset instance")
Expand All @@ -3305,7 +3323,10 @@ def eval(self, data, name, feval=None):

return self.__inner_eval(name, data_idx, feval)

def eval_train(self, feval=None):
def eval_train(
self,
feval: Optional[Union[_LGBM_CustomEvalFunction, List[_LGBM_CustomEvalFunction]]] = None
) -> List[_LGBM_BoosterEvalMethodResultType]:
"""Evaluate for training data.

Parameters
Expand All @@ -3332,11 +3353,14 @@ def eval_train(self, feval=None):
Returns
-------
result : list
List with evaluation results.
List with (train_dataset_name, eval_name, eval_result, is_higher_better) tuples.
"""
return self.__inner_eval(self._train_data_name, 0, feval)

def eval_valid(self, feval=None):
def eval_valid(
self,
feval: Optional[Union[_LGBM_CustomEvalFunction, List[_LGBM_CustomEvalFunction]]] = None
) -> List[_LGBM_BoosterEvalMethodResultType]:
"""Evaluate for validation data.

Parameters
Expand All @@ -3363,7 +3387,7 @@ def eval_valid(self, feval=None):
Returns
-------
result : list
List with evaluation results.
List with (validation_dataset_name, eval_name, eval_result, is_higher_better) tuples.
"""
return [item for i in range(1, self.__num_dataset)
for item in self.__inner_eval(self.name_valid_sets[i - 1], i, feval)]
Expand Down Expand Up @@ -3969,7 +3993,12 @@ def add(root):
else:
return hist, bin_edges

def __inner_eval(self, data_name, data_idx, feval=None):
def __inner_eval(
self,
data_name: str,
data_idx: int,
feval: Optional[Union[_LGBM_CustomEvalFunction, List[_LGBM_CustomEvalFunction]]] = None
) -> List[_LGBM_BoosterEvalMethodResultType]:
"""Evaluate training or validation data."""
if data_idx >= self.__num_dataset:
raise ValueError("Data_idx should be smaller than number of dataset")
Expand Down
4 changes: 2 additions & 2 deletions python-package/lightgbm/callback.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,10 @@
from functools import partial
from typing import Any, Callable, Dict, List, Tuple, Union

from .basic import _ConfigAliases, _log_info, _log_warning
from .basic import _ConfigAliases, _LGBM_BoosterEvalMethodResultType, _log_info, _log_warning

_EvalResultTuple = Union[
List[Tuple[str, str, float, bool]],
List[_LGBM_BoosterEvalMethodResultType],
List[Tuple[str, str, float, bool, float]]
]

Expand Down
11 changes: 5 additions & 6 deletions python-package/lightgbm/sklearn.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,16 +6,15 @@

import numpy as np

from .basic import Booster, Dataset, LightGBMError, _choose_param_value, _ConfigAliases, _log_warning
from .basic import (Booster, Dataset, LightGBMError, _choose_param_value, _ConfigAliases, _LGBM_EvalFunctionResultType,
_log_warning)
from .callback import record_evaluation
from .compat import (SKLEARN_INSTALLED, LGBMNotFittedError, _LGBMAssertAllFinite, _LGBMCheckArray,
_LGBMCheckClassificationTargets, _LGBMCheckSampleWeight, _LGBMCheckXY, _LGBMClassifierBase,
_LGBMComputeSampleWeight, _LGBMCpuCount, _LGBMLabelEncoder, _LGBMModelBase, _LGBMRegressorBase,
dt_DataTable, pd_DataFrame)
from .engine import train

_EvalResultType = Tuple[str, float, bool]

_LGBM_ScikitCustomObjectiveFunction = Union[
Callable[
[np.ndarray, np.ndarray],
Expand All @@ -33,15 +32,15 @@
_LGBM_ScikitCustomEvalFunction = Union[
Callable[
[np.ndarray, np.ndarray],
Union[_EvalResultType, List[_EvalResultType]]
Union[_LGBM_EvalFunctionResultType, List[_LGBM_EvalFunctionResultType]]
],
Callable[
[np.ndarray, np.ndarray, np.ndarray],
Union[_EvalResultType, List[_EvalResultType]]
Union[_LGBM_EvalFunctionResultType, List[_LGBM_EvalFunctionResultType]]
],
Callable[
[np.ndarray, np.ndarray, np.ndarray, np.ndarray],
Union[_EvalResultType, List[_EvalResultType]]
Union[_LGBM_EvalFunctionResultType, List[_LGBM_EvalFunctionResultType]]
],
]

Expand Down