diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index e568f324f9a31c..a548b019d83b6e 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -1614,7 +1614,7 @@ def _get_resized_embeddings( new_num_tokens = old_embeddings.weight.shape[0] new_num_tokens = ((new_num_tokens + pad_to_multiple_of - 1) // pad_to_multiple_of) * pad_to_multiple_of else: - logger.warning( + logger.info( "You are resizing the embedding layer without providing a `pad_to_multiple_of` parameter. This means that the new embedding" f" dimension will be {new_num_tokens}. This might induce some performance reduction as *Tensor Cores* will not be available." " For more details about this, or help on choosing the correct value for resizing, refer to this guide:"