Skip to content

Commit

Permalink
[keras/engine/base_layer.py,keras/engine/base_layer_utils.py,keras/en…
Browse files Browse the repository at this point in the history
…gine/base_layer_v1.py,keras/engine/base_preprocessing_layer.py,keras/engine/data_adapter.py,keras/engine/functional.py,keras/engine/input_layer.py,keras/engine/training.py,keras/engine/training_v1.py] Standardise docstring usage of "Default to"
  • Loading branch information
SamuelMarks committed Apr 13, 2023
1 parent 0f8e81f commit 9ad7371
Show file tree
Hide file tree
Showing 9 changed files with 45 additions and 42 deletions.
8 changes: 4 additions & 4 deletions keras/engine/base_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -458,7 +458,7 @@ def __init__(

# Whether the layer will track any layers that is set as attribute on
# itself as sub-layers, the weights from the sub-layers will be included
# in the parent layer's variables() as well. Default to True, which
# in the parent layer's variables() as well. Defaults to `True`, which
# means auto tracking is turned on. Certain subclass might want to turn
# it off, like Sequential model.
self._auto_track_sub_layers = True
Expand Down Expand Up @@ -3830,9 +3830,9 @@ def __init__(
force_generator: boolean, default to False, whether to force the
RandomGenerator to use the code branch of tf.random.Generator.
rng_type: string, the rng type that will be passed to backend
RandomGenerator. Default to `None`, which will allow RandomGenerator
to choose types by itself. Valid values are "stateful", "stateless",
"legacy_stateful".
RandomGenerator. `None` will allow RandomGenerator to choose
types by itself. Valid values are "stateful", "stateless",
"legacy_stateful". Defaults to `None`.
**kwargs: other keyword arguments that will be passed to the parent
*class
"""
Expand Down
4 changes: 2 additions & 2 deletions keras/engine/base_layer_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,8 +98,8 @@ def make_variable(
or "non_trainable_variables" (e.g. BatchNorm mean, stddev).
Note, if the current variable scope is marked as non-trainable
then this parameter is ignored and any added variables are also
marked as non-trainable. `trainable` defaults to `True` unless
`synchronization` is set to `ON_READ`.
marked as non-trainable. `trainable` becomes `True` unless
`synchronization` is set to `ON_READ`. Defaults to `None`.
caching_device: Passed to `tf.Variable`.
validate_shape: Passed to `tf.Variable`.
constraint: Constraint instance (callable).
Expand Down
2 changes: 1 addition & 1 deletion keras/engine/base_layer_v1.py
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,7 @@ def __init__(

# Whether the layer will track any layers that are set as attribute on
# itself as sub-layers, the weights from the sub-layers will be included
# in the parent layer's variables() as well. Default to True, which
# in the parent layer's variables() as well. Defaults to `True`, which
# means auto tracking is turned on. Certain subclass might want to turn
# it off, like the Sequential model.
self._auto_track_sub_layers = True
Expand Down
8 changes: 4 additions & 4 deletions keras/engine/base_preprocessing_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,14 +140,14 @@ def compile(self, run_eagerly=None, steps_per_execution=None):
"""Configures the layer for `adapt`.
Arguments:
run_eagerly: Bool. Defaults to `False`. If `True`, this `Model`'s
run_eagerly: Bool. If `True`, this `Model`'s
logic will not be wrapped in a `tf.function`. Recommended to leave
this as `None` unless your `Model` cannot be run inside a
`tf.function`.
steps_per_execution: Int. Defaults to 1. The number of batches to run
`tf.function`. Defaults to `False`.
steps_per_execution: Int. The number of batches to run
during each `tf.function` call. Running multiple batches inside a
single `tf.function` call can greatly improve performance on TPUs or
small models with a large Python overhead.
small models with a large Python overhead. Defaults to `1`.
"""
if steps_per_execution is None:
steps_per_execution = 1
Expand Down
4 changes: 2 additions & 2 deletions keras/engine/data_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,7 @@ def __init__(
_check_data_cardinality(inputs)

# If batch_size is not passed but steps is, calculate from the input
# data. Default to 32 for backwards compat.
# data. Defaults to `32` for backwards compatibility.
if not batch_size:
batch_size = int(math.ceil(num_samples / steps)) if steps else 32

Expand Down Expand Up @@ -645,7 +645,7 @@ def __init__(
dataset = dataset.shuffle(num_samples)

# If batch_size is not passed but steps is, calculate from the input
# data. Default to 32 for backwards compatibility.
# data. Defaults to `32` for backwards compatibility.
if not batch_size:
batch_size = int(math.ceil(num_samples / steps)) if steps else 32

Expand Down
4 changes: 2 additions & 2 deletions keras/engine/functional.py
Original file line number Diff line number Diff line change
Expand Up @@ -1647,8 +1647,8 @@ def __init__(self, module, method_name=None, **kwargs):
Args:
module: The `tf.Module` instance to be wrapped.
method_name: (Optional) str. The name of the method to use as the
forward pass of the module. If not set, defaults to '__call__' if
defined, or 'call'.
forward pass of the module. If not set, becomes '__call__' if
defined, or 'call'. Defaults to `None`.
**kwargs: Additional keywrod arguments. See `tf.keras.layers.Layer`.
Raises:
Expand Down
4 changes: 2 additions & 2 deletions keras/engine/input_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,12 +88,12 @@ class InputLayer(base_layer.Layer):
will use the `tf.TypeSpec` of this tensor rather
than creating a new placeholder tensor.
sparse: Boolean, whether the placeholder created is meant to be sparse.
Default to `False`.
Defaults to `False`.
ragged: Boolean, whether the placeholder created is meant to be ragged.
In this case, values of `None` in the `shape` argument represent
ragged dimensions. For more information about `tf.RaggedTensor`, see
[this guide](https://www.tensorflow.org/guide/ragged_tensor).
Default to `False`.
Defaults to `False`.
type_spec: A `tf.TypeSpec` object to create Input from. This
`tf.TypeSpec` represents the entire batch. When provided, all other
args except name must be `None`.
Expand Down
49 changes: 26 additions & 23 deletions keras/engine/training.py
Original file line number Diff line number Diff line change
Expand Up @@ -673,12 +673,13 @@ def compile(
coefficients.
weighted_metrics: List of metrics to be evaluated and weighted by
`sample_weight` or `class_weight` during training and testing.
run_eagerly: Bool. Defaults to `False`. If `True`, this `Model`'s
logic will not be wrapped in a `tf.function`. Recommended to leave
this as `None` unless your `Model` cannot be run inside a
`tf.function`. `run_eagerly=True` is not supported when using
`tf.distribute.experimental.ParameterServerStrategy`.
steps_per_execution: Int. Defaults to 1. The number of batches to
run_eagerly: Bool. If `True`, this `Model`'s logic will not be
wrapped in a `tf.function`. Recommended to leave this as `None`
unless your `Model` cannot be run inside a `tf.function`.
`run_eagerly=True` is not supported when using
`tf.distribute.experimental.ParameterServerStrategy`. Defaults to
`False`.
steps_per_execution: Int. The number of batches to
run during each `tf.function` call. Running multiple batches
inside a single `tf.function` call can greatly improve performance
on TPUs or small models with a large Python overhead. At most, one
Expand All @@ -687,7 +688,7 @@ def compile(
the size of the epoch. Note that if `steps_per_execution` is set
to `N`, `Callback.on_batch_begin` and `Callback.on_batch_end`
methods will only be called every `N` batches (i.e. before/after
each `tf.function` execution).
each `tf.function` execution). Defaults to `1`.
jit_compile: If `True`, compile the model training step with XLA.
[XLA](https://www.tensorflow.org/xla) is an optimizing compiler
for machine learning.
Expand All @@ -708,9 +709,10 @@ def compile(
not process the same data. The number of shards should be at least
the number of workers for good performance. A value of 'auto'
turns on exact evaluation and uses a heuristic for the number of
shards based on the number of workers. Defaults to 0, meaning no
shards based on the number of workers. 0, meaning no
visitation guarantee is provided. NOTE: Custom implementations of
`Model.test_step` will be ignored when doing exact evaluation.
Defaults to `0`.
**kwargs: Arguments supported for backwards compatibility only.
"""
if jit_compile and not tf_utils.can_jit_compile(warn=True):
Expand Down Expand Up @@ -1457,11 +1459,11 @@ def fit(
of index `epochs` is reached.
verbose: 'auto', 0, 1, or 2. Verbosity mode.
0 = silent, 1 = progress bar, 2 = one line per epoch.
'auto' defaults to 1 for most cases, but 2 when used with
'auto' becomes 1 for most cases, but 2 when used with
`ParameterServerStrategy`. Note that the progress bar is not
particularly useful when logged to a file, so verbose=2 is
recommended when not running interactively (eg, in a production
environment).
environment). Defaults to 'auto'.
callbacks: List of `keras.callbacks.Callback` instances.
List of callbacks to apply during training.
See `tf.keras.callbacks`. Note
Expand Down Expand Up @@ -2059,11 +2061,11 @@ def evaluate(
they generate batches).
verbose: `"auto"`, 0, 1, or 2. Verbosity mode.
0 = silent, 1 = progress bar, 2 = single line.
`"auto"` defaults to 1 for most cases, and to 2 when used with
`"auto"` becomes 1 for most cases, and to 2 when used with
`ParameterServerStrategy`. Note that the progress bar is not
particularly useful when logged to a file, so `verbose=2` is
recommended when not running interactively (e.g. in a production
environment).
environment). Defaults to 'auto'.
sample_weight: Optional Numpy array of weights for the test samples,
used for weighting the loss function. You can either pass a flat
(1D) Numpy array with the same length as the input samples
Expand Down Expand Up @@ -2419,11 +2421,11 @@ def predict(
(since they generate batches).
verbose: `"auto"`, 0, 1, or 2. Verbosity mode.
0 = silent, 1 = progress bar, 2 = single line.
`"auto"` defaults to 1 for most cases, and to 2 when used with
`"auto"` becomes 1 for most cases, and to 2 when used with
`ParameterServerStrategy`. Note that the progress bar is not
particularly useful when logged to a file, so `verbose=2` is
recommended when not running interactively (e.g. in a production
environment).
environment). Defaults to 'auto'.
steps: Total number of steps (batches of samples)
before declaring the prediction round finished.
Ignored with the default value of `None`. If x is a `tf.data`
Expand Down Expand Up @@ -2958,7 +2960,7 @@ def save(self, filepath, overwrite=True, save_format=None, **kwargs):
SavedModel format arguments:
include_optimizer: Only applied to SavedModel and legacy HDF5
formats. If False, do not save the optimizer state.
Defaults to True.
Defaults to `True`.
signatures: Only applies to SavedModel format. Signatures to save
with the SavedModel. See the `signatures` argument in
`tf.saved_model.save` for details.
Expand Down Expand Up @@ -3051,7 +3053,7 @@ def save_weights(
target location, or provide the user with a manual prompt.
save_format: Either 'tf' or 'h5'. A `filepath` ending in '.h5' or
'.keras' will default to HDF5 if `save_format` is `None`.
Otherwise `None` defaults to 'tf'.
Otherwise, `None` becomes 'tf'. Defaults to `None`.
options: Optional `tf.train.CheckpointOptions` object that specifies
options for saving weights.
Expand Down Expand Up @@ -3366,17 +3368,17 @@ def summary(
(e.g. set this to adapt the display to different
terminal window sizes).
positions: Relative or absolute positions of log elements
in each line. If not provided,
defaults to `[0.3, 0.6, 0.70, 1.]`
in each line. If not provided, becomes
`[0.3, 0.6, 0.70, 1.]`. Defaults to `None`.
print_fn: Print function to use. By default, prints to `stdout`.
If `stdout` doesn't work in your environment, change to `print`.
It will be called on each line of the summary.
You can set it to a custom function
in order to capture the string summary.
expand_nested: Whether to expand the nested models.
If not provided, defaults to `False`.
Defaults to `False`.
show_trainable: Whether to show if a layer is trainable.
If not provided, defaults to `False`.
Defaults to `False`.
layer_range: a list or tuple of 2 strings,
which is the starting layer name and ending layer name
(both inclusive) indicating the range of layers to be printed
Expand Down Expand Up @@ -3942,7 +3944,8 @@ def _get_compile_args(self, user_metrics=True):
Args:
user_metrics: Whether to return user-supplied metrics or `Metric`
objects. Defaults to returning the user-supplied metrics.
objects. If True, returns the user-supplied metrics.
Defaults to `True`.
Returns:
Dictionary of arguments that were used when compiling the model.
Expand Down Expand Up @@ -4186,11 +4189,11 @@ def _get_verbosity(verbose, distribute_strategy):
distribute_strategy._should_use_with_coordinator
or not io_utils.is_interactive_logging_enabled()
):
# Default to epoch-level logging for PSStrategy or using absl
# Defaults to epoch-level logging for PSStrategy or using absl
# logging.
return 2
else:
return 1 # Default to batch-level logging otherwise.
return 1 # Defaults to batch-level logging otherwise.
return verbose


Expand Down
4 changes: 2 additions & 2 deletions keras/engine/training_v1.py
Original file line number Diff line number Diff line change
Expand Up @@ -269,10 +269,10 @@ def compile(
output names (strings) to scalar coefficients.
sample_weight_mode: If you need to do timestep-wise
sample weighting (2D weights), set this to `"temporal"`.
`None` defaults to sample-wise weights (1D).
`None` becomes sample-wise weights (1D).
If the model has multiple outputs, you can use a different
`sample_weight_mode` on each output by passing a
dictionary or a list of modes.
dictionary or a list of modes. Defaults to `None`.
weighted_metrics: List of metrics to be evaluated and weighted
by sample_weight or class_weight during training and testing.
target_tensors: By default, Keras will create placeholders for the
Expand Down

0 comments on commit 9ad7371

Please sign in to comment.