Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[python] Remove silent argument #4800

Merged
merged 11 commits into from
Nov 20, 2021
Merged
50 changes: 8 additions & 42 deletions python-package/lightgbm/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -412,8 +412,6 @@ class _ConfigAliases:
"two_round": {"two_round",
"two_round_loading",
"use_two_round_loading"},
"verbosity": {"verbosity",
"verbose"},
"weight_column": {"weight_column",
"weight"}}

Expand Down Expand Up @@ -1173,7 +1171,7 @@ class Dataset:
"""Dataset in LightGBM."""

def __init__(self, data, label=None, reference=None,
weight=None, group=None, init_score=None, silent='warn',
weight=None, group=None, init_score=None,
feature_name='auto', categorical_feature='auto', params=None,
free_raw_data=True):
"""Initialize Dataset.
Expand All @@ -1197,8 +1195,6 @@ def __init__(self, data, label=None, reference=None,
where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
init_score : list, list of lists (for multi-class task), numpy array, pandas Series, pandas DataFrame (for multi-class task), or None, optional (default=None)
Init score for Dataset.
silent : bool, optional (default=False)
Whether to print messages during construction.
feature_name : list of str, or 'auto', optional (default="auto")
Feature names.
If 'auto' and data is pandas DataFrame, data columns names are used.
Expand All @@ -1223,7 +1219,6 @@ def __init__(self, data, label=None, reference=None,
self.weight = weight
self.group = group
self.init_score = init_score
self.silent = silent
self.feature_name = feature_name
self.categorical_feature = categorical_feature
self.params = deepcopy(params)
Expand Down Expand Up @@ -1465,8 +1460,7 @@ def _set_init_score_by_predictor(self, predictor, data, used_indices=None):

def _lazy_init(self, data, label=None, reference=None,
weight=None, group=None, init_score=None, predictor=None,
silent=False, feature_name='auto',
categorical_feature='auto', params=None):
feature_name='auto', categorical_feature='auto', params=None):
if data is None:
self.handle = None
return self
Expand All @@ -1488,14 +1482,6 @@ def _lazy_init(self, data, label=None, reference=None,
if key in args_names:
_log_warning(f'{key} keyword has been found in `params` and will be ignored.\n'
f'Please use {key} argument of the Dataset constructor to pass this parameter.')
# user can set verbose with params, it has higher priority
if silent != "warn":
_log_warning("'silent' argument is deprecated and will be removed in a future release of LightGBM. "
"Pass 'verbose' parameter via 'params' instead.")
else:
silent = False
if not any(verbose_alias in params for verbose_alias in _ConfigAliases.get("verbosity")) and silent:
params["verbose"] = -1
# get categorical features
if categorical_feature is not None:
categorical_indices = set()
Expand Down Expand Up @@ -1816,7 +1802,7 @@ def construct(self):
self._lazy_init(self.data, label=self.label, reference=self.reference,
weight=self.weight, group=self.group,
init_score=self.init_score, predictor=self._predictor,
silent=self.silent, feature_name=self.feature_name, params=self.params)
feature_name=self.feature_name, params=self.params)
else:
# construct subset
used_indices = list_to_1d_numpy(self.used_indices, np.int32, name='used_indices')
Expand Down Expand Up @@ -1847,14 +1833,12 @@ def construct(self):
self._lazy_init(self.data, label=self.label,
weight=self.weight, group=self.group,
init_score=self.init_score, predictor=self._predictor,
silent=self.silent, feature_name=self.feature_name,
categorical_feature=self.categorical_feature, params=self.params)
feature_name=self.feature_name, categorical_feature=self.categorical_feature, params=self.params)
if self.free_raw_data:
self.data = None
return self

def create_valid(self, data, label=None, weight=None, group=None,
init_score=None, silent='warn', params=None):
def create_valid(self, data, label=None, weight=None, group=None, init_score=None, params=None):
"""Create validation data align with current Dataset.

Parameters
Expand All @@ -1874,8 +1858,6 @@ def create_valid(self, data, label=None, weight=None, group=None,
where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
init_score : list, list of lists (for multi-class task), numpy array, pandas Series, pandas DataFrame (for multi-class task), or None, optional (default=None)
Init score for Dataset.
silent : bool, optional (default=False)
Whether to print messages during construction.
params : dict or None, optional (default=None)
Other parameters for validation Dataset.

Expand All @@ -1886,7 +1868,7 @@ def create_valid(self, data, label=None, weight=None, group=None,
"""
ret = Dataset(data, label=label, reference=self,
weight=weight, group=group, init_score=init_score,
silent=silent, params=params, free_raw_data=self.free_raw_data)
params=params, free_raw_data=self.free_raw_data)
ret._predictor = self._predictor
ret.pandas_categorical = self.pandas_categorical
return ret
Expand Down Expand Up @@ -2562,7 +2544,7 @@ def _dump_text(self, filename):
class Booster:
"""Booster in LightGBM."""

def __init__(self, params=None, train_set=None, model_file=None, model_str=None, silent='warn'):
def __init__(self, params=None, train_set=None, model_file=None, model_str=None):
"""Initialize the Booster.

Parameters
Expand All @@ -2575,8 +2557,6 @@ def __init__(self, params=None, train_set=None, model_file=None, model_str=None,
Path to the model file.
model_str : str or None, optional (default=None)
Model will be loaded from this string.
silent : bool, optional (default=False)
Whether to print messages during construction.
"""
self.handle = None
self.network = False
Expand All @@ -2587,14 +2567,6 @@ def __init__(self, params=None, train_set=None, model_file=None, model_str=None,
self.best_iteration = -1
self.best_score = {}
params = {} if params is None else deepcopy(params)
# user can set verbose with params, it has higher priority
if silent != 'warn':
_log_warning("'silent' argument is deprecated and will be removed in a future release of LightGBM. "
"Pass 'verbose' parameter via 'params' instead.")
else:
silent = False
if not any(verbose_alias in params for verbose_alias in _ConfigAliases.get("verbosity")) and silent:
params["verbose"] = -1
if train_set is not None:
# Training task
if not isinstance(train_set, Dataset):
Expand Down Expand Up @@ -3388,12 +3360,6 @@ def model_from_string(self, model_str, verbose='warn'):
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
if verbose in {'warn', '_silent_false'}:
verbose = verbose == 'warn'
else:
_log_warning("'verbose' argument is deprecated and will be removed in a future release of LightGBM.")
if verbose:
_log_info(f'Finished loading model, total used {int(out_num_iterations.value)} iterations')
self.__num_class = out_num_class.value
self.pandas_categorical = _load_pandas_categorical(model_str=model_str)
return self
Expand Down Expand Up @@ -3608,7 +3574,7 @@ def refit(self, data, label, decay_rate=0.9, **kwargs):
default_value=None
)
new_params["linear_tree"] = bool(out_is_linear.value)
train_set = Dataset(data, label, silent=True, params=new_params)
train_set = Dataset(data, label, params=new_params)
new_params['refit_decay_rate'] = decay_rate
new_booster = Booster(new_params, train_set)
# Copy models
Expand Down
6 changes: 0 additions & 6 deletions python-package/lightgbm/dask.py
Original file line number Diff line number Diff line change
Expand Up @@ -1108,7 +1108,6 @@ def __init__(
reg_lambda: float = 0.,
random_state: Optional[Union[int, np.random.RandomState]] = None,
n_jobs: int = -1,
silent: bool = "warn",
importance_type: str = 'split',
client: Optional[Client] = None,
**kwargs: Any
Expand All @@ -1134,7 +1133,6 @@ def __init__(
reg_lambda=reg_lambda,
random_state=random_state,
n_jobs=n_jobs,
silent=silent,
importance_type=importance_type,
**kwargs
)
Expand Down Expand Up @@ -1293,7 +1291,6 @@ def __init__(
reg_lambda: float = 0.,
random_state: Optional[Union[int, np.random.RandomState]] = None,
n_jobs: int = -1,
silent: bool = "warn",
importance_type: str = 'split',
client: Optional[Client] = None,
**kwargs: Any
Expand All @@ -1319,7 +1316,6 @@ def __init__(
reg_lambda=reg_lambda,
random_state=random_state,
n_jobs=n_jobs,
silent=silent,
importance_type=importance_type,
**kwargs
)
Expand Down Expand Up @@ -1458,7 +1454,6 @@ def __init__(
reg_lambda: float = 0.,
random_state: Optional[Union[int, np.random.RandomState]] = None,
n_jobs: int = -1,
silent: bool = "warn",
importance_type: str = 'split',
client: Optional[Client] = None,
**kwargs: Any
Expand All @@ -1484,7 +1479,6 @@ def __init__(
reg_lambda=reg_lambda,
random_state=random_state,
n_jobs=n_jobs,
silent=silent,
importance_type=importance_type,
**kwargs
)
Expand Down
15 changes: 0 additions & 15 deletions python-package/lightgbm/sklearn.py
Original file line number Diff line number Diff line change
Expand Up @@ -398,7 +398,6 @@ def __init__(
reg_lambda: float = 0.,
random_state: Optional[Union[int, np.random.RandomState]] = None,
n_jobs: int = -1,
silent: Union[bool, str] = 'warn',
importance_type: str = 'split',
**kwargs
):
Expand Down Expand Up @@ -463,8 +462,6 @@ def __init__(
If None, default seeds in C++ code are used.
n_jobs : int, optional (default=-1)
Number of parallel threads to use for training (can be changed at prediction time).
silent : bool, optional (default=True)
Whether to print messages while running boosting.
importance_type : str, optional (default='split')
The type of feature importance to be filled into ``feature_importances_``.
If 'split', result contains numbers of times the feature is used in a model.
Expand Down Expand Up @@ -528,7 +525,6 @@ def __init__(
self.reg_lambda = reg_lambda
self.random_state = random_state
self.n_jobs = n_jobs
self.silent = silent
self.importance_type = importance_type
self._Booster = None
self._evals_result = None
Expand Down Expand Up @@ -632,17 +628,6 @@ def fit(self, X, y,
self._fobj = None
params['objective'] = self._objective

# user can set verbose with kwargs, it has higher priority
if self.silent != "warn":
_log_warning("'silent' argument is deprecated and will be removed in a future release of LightGBM. "
"Pass 'verbose' parameter via keyword arguments instead.")
silent = self.silent
else:
silent = True
if not any(verbose_alias in params for verbose_alias in _ConfigAliases.get("verbosity")) and silent:
params['verbose'] = -1
params.pop('silent', None)

params.pop('importance_type', None)
params.pop('n_estimators', None)
params.pop('class_weight', None)
Expand Down
12 changes: 6 additions & 6 deletions tests/python_package_test/test_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -1686,8 +1686,8 @@ def preprocess_data(dtrain, dtest, params):
def test_metrics():
X, y = load_digits(n_class=2, return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
lgb_train = lgb.Dataset(X_train, y_train, silent=True)
lgb_valid = lgb.Dataset(X_test, y_test, reference=lgb_train, silent=True)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_valid = lgb.Dataset(X_test, y_test, reference=lgb_train)

evals_result = {}
params_verbose = {'verbose': -1}
Expand Down Expand Up @@ -1991,7 +1991,7 @@ def train_booster(params=params_obj_verbose, **kwargs):
assert 'error' in evals_result['valid_0']

X, y = load_digits(n_class=3, return_X_y=True)
lgb_train = lgb.Dataset(X, y, silent=True)
lgb_train = lgb.Dataset(X, y)

obj_multi_aliases = ['multiclass', 'softmax', 'multiclassova', 'multiclass_ova', 'ova', 'ovr']
for obj_multi_alias in obj_multi_aliases:
Expand Down Expand Up @@ -2065,8 +2065,8 @@ def test_multiple_feval_train():

X_train, X_validation, y_train, y_validation = train_test_split(X, y, test_size=0.2)

train_dataset = lgb.Dataset(data=X_train, label=y_train, silent=True)
validation_dataset = lgb.Dataset(data=X_validation, label=y_validation, reference=train_dataset, silent=True)
train_dataset = lgb.Dataset(data=X_train, label=y_train)
validation_dataset = lgb.Dataset(data=X_validation, label=y_validation, reference=train_dataset)
evals_result = {}
lgb.train(
params=params,
Expand All @@ -2087,7 +2087,7 @@ def test_multiple_feval_cv():

params = {'verbose': -1, 'objective': 'binary', 'metric': 'binary_logloss'}

train_dataset = lgb.Dataset(data=X, label=y, silent=True)
train_dataset = lgb.Dataset(data=X, label=y)

cv_results = lgb.cv(
params=params,
Expand Down
12 changes: 6 additions & 6 deletions tests/python_package_test/test_plotting.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def test_plot_importance(params, breast_cancer_split, train_data):
assert ax0.get_ylabel() == 'Features'
assert len(ax0.patches) <= 30

gbm1 = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, silent=True)
gbm1 = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, verbose=-1)
gbm1.fit(X_train, y_train)

ax1 = lgb.plot_importance(gbm1, color='r', title='t', xlabel='x', ylabel='y')
Expand Down Expand Up @@ -75,7 +75,7 @@ def test_plot_importance(params, breast_cancer_split, train_data):
assert ax3.get_ylabel() == 'y @importance_type@'
assert len(ax3.patches) <= 30

gbm2 = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, silent=True, importance_type="gain")
gbm2 = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, verbose=-1, importance_type="gain")
gbm2.fit(X_train, y_train)

def get_bounds_of_first_patch(axes):
Expand Down Expand Up @@ -107,7 +107,7 @@ def test_plot_split_value_histogram(params, breast_cancer_split, train_data):
assert ax0.get_ylabel() == 'Count'
assert len(ax0.patches) <= 2

gbm1 = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, silent=True)
gbm1 = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, verbose=-1)
gbm1.fit(X_train, y_train)

ax1 = lgb.plot_split_value_histogram(gbm1, gbm1.booster_.feature_name()[27], figsize=(10, 5),
Expand Down Expand Up @@ -142,7 +142,7 @@ def test_plot_split_value_histogram(params, breast_cancer_split, train_data):
reason='matplotlib or graphviz is not installed')
def test_plot_tree(breast_cancer_split):
X_train, _, y_train, _ = breast_cancer_split
gbm = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, silent=True)
gbm = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, verbose=-1)
gbm.fit(X_train, y_train, verbose=False)

with pytest.raises(IndexError):
Expand All @@ -160,7 +160,7 @@ def test_create_tree_digraph(breast_cancer_split):
X_train, _, y_train, _ = breast_cancer_split

constraints = [-1, 1] * int(X_train.shape[1] / 2)
gbm = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, silent=True, monotone_constraints=constraints)
gbm = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, verbose=-1, monotone_constraints=constraints)
gbm.fit(X_train, y_train, verbose=False)

with pytest.raises(IndexError):
Expand Down Expand Up @@ -264,7 +264,7 @@ def test_plot_metrics(params, breast_cancer_split, train_data):
with pytest.raises(ValueError, match="eval results cannot be empty."):
lgb.plot_metric(evals_result1)

gbm2 = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, silent=True)
gbm2 = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, verbose=-1)
gbm2.fit(X_train, y_train, eval_set=[(X_test, y_test)], verbose=False)
ax4 = lgb.plot_metric(gbm2, title=None, xlabel=None, ylabel=None)
assert isinstance(ax4, matplotlib.axes.Axes)
Expand Down
Loading