Skip to content

Commit

Permalink
fix the rest
Browse files Browse the repository at this point in the history
  • Loading branch information
haifeng-jin authored May 31, 2022
1 parent 564b8d9 commit 5cf72f4
Show file tree
Hide file tree
Showing 25 changed files with 44 additions and 74 deletions.
1 change: 1 addition & 0 deletions keras/benchmarks/layer_benchmarks/run_xprof.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from __future__ import division as _division
from __future__ import print_function as _print_function

import os
import time
import uuid

Expand Down
2 changes: 1 addition & 1 deletion keras/callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -1564,7 +1564,7 @@ def _save_model(self, epoch, batch, logs):
)

self._maybe_remove_file()
except IsADirectoryError as e: # h5py 3.x
except IsADirectoryError: # h5py 3.x
raise IOError(
"Please specify a non-directory filepath for "
"ModelCheckpoint. Filepath used is an existing "
Expand Down
7 changes: 4 additions & 3 deletions keras/callbacks_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@

import keras
from keras.callbacks import BackupAndRestore
from keras.callbacks import Callback
from keras.callbacks import BackupAndRestoreExperimental
from keras.callbacks import Callback
from keras.engine import sequential
from keras.layers import Activation
from keras.layers import Dense
Expand Down Expand Up @@ -387,7 +387,7 @@ def on_epoch_end(self, epoch, log=None):
if epoch == 5 or epoch == 12:
raise RuntimeError("Interruption")

log_dir = self.get_temp_dir()
self.get_temp_dir()

# The following asserts that the train counter is fault tolerant.
self.assertEqual(model._train_counter.numpy(), 0)
Expand Down Expand Up @@ -462,7 +462,8 @@ def _test_backup_and_restore_callback_at_steps(
)

class InterruptingCallback(keras.callbacks.Callback):
"""A callback to intentionally introduce interruption to training."""
"""A callback to intentionally introduce interruption to
training."""

batch_count = 0

Expand Down
21 changes: 11 additions & 10 deletions keras/distribute/worker_training_state.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,8 @@ def __init__(self, model, checkpoint_dir, save_freq="epoch"):
backend.set_value(
self._ckpt_saved_batch, self.CKPT_SAVED_BATCH_UNUSED_VALUE
)
# _ckpt_saved_epoch and _ckpt_saved_batch gets tracked and is included in
# the checkpoint file when backing up.
# _ckpt_saved_epoch and _ckpt_saved_batch gets tracked and is included
# in the checkpoint file when backing up.
checkpoint = tf.train.Checkpoint(
model=self._model,
ckpt_saved_epoch=self._ckpt_saved_epoch,
Expand Down Expand Up @@ -155,8 +155,8 @@ def maybe_load_initial_counters_from_ckpt(
Returns:
If the training is recovering from previous failure under multi-worker
training setting, return the (epoch, step) the training is supposed to
continue at. Otherwise, return the `initial_epoch, initial_step` the user
passes in.
continue at. Otherwise, return the `initial_epoch, initial_step` the
user passes in.
"""

initial_step = 0
Expand All @@ -165,19 +165,20 @@ def maybe_load_initial_counters_from_ckpt(
if mode == mode_keys.ModeKeys.TRAIN:
if self._save_freq == "epoch":
if epoch >= 0:
# The most recently saved epoch is one epoch prior to the epoch it
# failed at, so return the value of 'self._ckpt_saved_epoch' plus one.
# The most recently saved epoch is one epoch prior to the
# epoch it failed at, so return the value of
# 'self._ckpt_saved_epoch' plus one.
initial_epoch = epoch + 1
else:
if batch >= 0 and epoch >= 0:
# If the checkpoint was last saved at last batch of the epoch, return
# the next epoch number and batch=0
# If the checkpoint was last saved at last batch of the
# epoch, return the next epoch number and batch=0
if batch == steps_per_epoch - 1:
initial_epoch = epoch + 1
initial_step = 0
else:
# If the checkpoint was not last saved at last batch of the epoch,
# return the same epoch and next batch number
# If the checkpoint was not last saved at last batch of
# the epoch, return the same epoch and next batch number
initial_epoch = epoch
initial_step = batch + 1
return (initial_epoch, initial_step)
4 changes: 1 addition & 3 deletions keras/dtensor/lazy_variable.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,9 +181,7 @@ def __init__(
# TODO(scottzhu): This method and create_and_initialize might be removed if
# we decide to just use the tf.Variable to replace this class.
def initialize(self):
with ops.name_scope(
self._name, "Variable", skip_on_eager=False
) as name:
with ops.name_scope(self._name, "Variable", skip_on_eager=False):
with ops.colocate_with(self._handle), ops.name_scope("Initializer"):
if callable(self._initial_value):
initial_value = self._initial_value()
Expand Down
4 changes: 2 additions & 2 deletions keras/engine/base_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -685,6 +685,7 @@ def add_weight(
and dtype.is_floating
):
old_getter = getter

# Wrap variable constructor to return an AutoCastVariable.
def getter(*args, **kwargs): # pylint: disable=function-redefined
variable = old_getter(*args, **kwargs)
Expand Down Expand Up @@ -3082,9 +3083,8 @@ def __setattr__(self, name, value):
if (
name == "_self_setattr_tracking"
or not getattr(self, "_self_setattr_tracking", True)
or
# Exclude @property.setters from tracking
hasattr(self.__class__, name)
or hasattr(self.__class__, name)
):
try:
super(tf.__internal__.tracking.AutoTrackable, self).__setattr__(
Expand Down
6 changes: 2 additions & 4 deletions keras/engine/base_layer_v1.py
Original file line number Diff line number Diff line change
Expand Up @@ -1279,10 +1279,9 @@ def add_update(self, updates):
if (
tf.distribute.has_strategy()
and tf.distribute.in_cross_replica_context()
and
# When saving the model, the distribution strategy context should be
# ignored, following the default path for adding updates.
not call_context.saving
and not call_context.saving
):
# Updates don't need to be run in a cross-replica context.
return
Expand Down Expand Up @@ -2330,9 +2329,8 @@ def __setattr__(self, name, value):
if (
name == "_self_setattr_tracking"
or not getattr(self, "_self_setattr_tracking", True)
or
# Exclude @property.setters from tracking
hasattr(self.__class__, name)
or hasattr(self.__class__, name)
):
try:
super(tf.__internal__.tracking.AutoTrackable, self).__setattr__(
Expand Down
3 changes: 1 addition & 2 deletions keras/engine/functional.py
Original file line number Diff line number Diff line change
Expand Up @@ -1237,9 +1237,8 @@ def _should_skip_first_node(layer):
if layer._self_tracked_trackables:
return (
isinstance(layer, Functional)
and
# Filter out Sequential models without an input shape.
isinstance(
and isinstance(
layer._self_tracked_trackables[0], input_layer_module.InputLayer
)
)
Expand Down
2 changes: 1 addition & 1 deletion keras/engine/saving.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,4 +18,4 @@
Everything has been moved to keras/saving/. This file will be deleted soon.
"""

from keras.saving import * # noqa: F401
from keras.saving import * # noqa: F401,F403
4 changes: 2 additions & 2 deletions keras/engine/sequential.py
Original file line number Diff line number Diff line change
Expand Up @@ -333,7 +333,7 @@ def _build_graph_network_for_inferred_shape(
# Create Functional API connection by calling the
# current layer
layer_output = layer(layer_input)
except: # pylint:disable=bare-except
except: # noqa: E722
# Functional API calls may fail for a number of
# reasons: 1) The layer may be buggy. In this case
# it will be easier for the user to debug if we fail
Expand Down Expand Up @@ -367,7 +367,7 @@ def _build_graph_network_for_inferred_shape(
# not be supporting such layers.
self._init_graph_network(inputs, outputs)
self._graph_initialized = True
except: # pylint:disable=bare-except
except: # noqa: E722
self._use_legacy_deferred_behavior = True
self._inferred_input_shape = new_shape

Expand Down
6 changes: 3 additions & 3 deletions keras/engine/training.py
Original file line number Diff line number Diff line change
Expand Up @@ -1547,7 +1547,7 @@ def fit(
(
data_handler._initial_epoch,
data_handler._initial_step,
) = self._maybe_load_initial_counters_from_ckpt( # pylint: disable=protected-access
) = self._maybe_load_initial_counters_from_ckpt(
steps_per_epoch_inferred, initial_epoch
)
logs = None
Expand Down Expand Up @@ -3523,8 +3523,8 @@ def _maybe_load_initial_counters_from_ckpt(
Returns:
If the training is recovering from previous failure under multi-worker
training setting, return the (epoch, step) the training is supposed to
continue at. Otherwise, return the `initial_epoch, initial_step` the user
passes in.
continue at. Otherwise, return the `initial_epoch, initial_step` the
user passes in.
"""
initial_step = 0
if self._training_state is not None:
Expand Down
8 changes: 3 additions & 5 deletions keras/engine/training_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -1723,7 +1723,7 @@ def test_mixed_precision(self):
"mse",
run_eagerly=test_utils.should_run_eagerly(),
)
history = model.fit(x, y, epochs=2)
model.fit(x, y, epochs=2)
policy.set_global_policy("float32")

@test_combinations.run_all_keras_modes
Expand Down Expand Up @@ -2368,10 +2368,8 @@ def test_class_weights(self):
y_train[:batch_size],
class_weight=class_weight,
)
ref_score = model.evaluate(
x_test, y_test, verbose=0
) # pylint: disable=unused-variable
score = model.evaluate( # pylint: disable=unused-variable
ref_score = model.evaluate(x_test, y_test, verbose=0) # noqa: F841
score = model.evaluate( # noqa: F841
x_test[test_ids, :], y_test[test_ids, :], verbose=0
)
# TODO(b/152990697): Fix the class weights test here.
Expand Down
2 changes: 1 addition & 1 deletion keras/integration_test/multi_worker_tutorial_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ class MultiWorkerTutorialTest(parameterized.TestCase, tf.test.TestCase):
def skip_fetch_failure_exception(self):
try:
yield
except zipfile.BadZipfile as e:
except zipfile.BadZipfile:
# There can be a race when multiple processes are downloading the
# data. Skip the test if that results in loading errors.
self.skipTest(
Expand Down
4 changes: 1 addition & 3 deletions keras/layers/normalization/batch_normalization.py
Original file line number Diff line number Diff line change
Expand Up @@ -898,9 +898,7 @@ def _compose_transforms(scale, offset, then_scale, then_offset):
# Determine a boolean value for `training`: could be True, False, or
# None.
training_value = control_flow_util.constant_value(training)
if (
training_value == False
): # pylint: disable=singleton-comparison,g-explicit-bool-comparison
if training_value == False: # noqa: E712
mean, variance = self.moving_mean, self.moving_variance
else:
if self.adjustment:
Expand Down
3 changes: 1 addition & 2 deletions keras/legacy_tf_layers/migration_utils_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,9 +209,8 @@ def test_num_rand_ops_disallow_repeated_ops_seed(self):
a_prime = tf.random.uniform(shape=(3, 1))
a_prime = a_prime * 3
error_string = "An exception should have been raised before this"
error_raised = "An exception should have been raised before this"
try:
c = tf.random.uniform(shape=(3, 1))
tf.random.uniform(shape=(3, 1))
raise RuntimeError(error_string)

except ValueError as err:
Expand Down
18 changes: 0 additions & 18 deletions keras/mixed_precision/loss_scale_optimizer_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -385,17 +385,8 @@ def testClipping(self, opt_cls, strategy_fn, use_tf_function):
self.assertEqual(self.evaluate(opt.loss_scale), 8)

# Test Inf gradients are still skipped instead of being clipped
<<<<<<< HEAD
loss = lambda: var * float("Inf")
run_fn = lambda: opt.minimize(loss, var_list=[var])
=======
def run_fn():
def loss():
return var * float("Inf")

return opt.minimize(loss, var_list=[var])

>>>>>>> 0bb24689 (fix F811)
run_op = strategy.experimental_run(run_fn)
self._run_if_in_graph_mode(run_op)
self.assertAllClose(
Expand Down Expand Up @@ -426,17 +417,8 @@ def testDynamicUpdate(self, opt_cls, strategy_fn, use_tf_function):
self.assertEqual(4.0, self.evaluate(opt.loss_scale))

# Test optimizer with NaN gradients
<<<<<<< HEAD
loss = lambda: var * float("NaN")
run_fn = lambda: opt.minimize(loss, var_list=[var])
=======
def run_fn():
def loss():
return var * float("NaN")

return opt.minimize(loss, var_list=[var])

>>>>>>> 0bb24689 (fix F811)
run_op = strategy.experimental_run(run_fn)
self._run_if_in_graph_mode(run_op)
# Variable should not change from before, due to NaN gradients.
Expand Down
3 changes: 1 addition & 2 deletions keras/saving/saved_model/load.py
Original file line number Diff line number Diff line change
Expand Up @@ -713,9 +713,8 @@ def finalize_objects(self):
for node_id, (node, _) in self.loaded_nodes.items():
if (
not isinstance(node, base_layer.Layer)
or
# Don't finalize models until all layers have finished loading.
node_id in self.model_layer_dependencies
or node_id in self.model_layer_dependencies
):
continue

Expand Down
2 changes: 1 addition & 1 deletion keras/saving/saved_model/saved_model_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -1125,7 +1125,7 @@ def __call__(self, inputs):
class Model(keras.models.Model):
def __init__(self):
super().__init__()
self.layer = CustomLayer()
self.layer = CustomLayer() # noqa: F821

@tf.function(input_signature=[tf.TensorSpec([None, 1])])
def call(self, inputs):
Expand Down
2 changes: 1 addition & 1 deletion keras/saving/saving_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -365,7 +365,7 @@ def try_build_compiled_arguments(model):
model.compiled_loss.build(model.outputs)
if not model.compiled_metrics.built:
model.compiled_metrics.build(model.outputs, model.outputs)
except: # pylint: disable=bare-except
except: # noqa: E722
logging.warning(
"Compiled the loaded model, but the compiled metrics have "
"yet to be built. `model.compile_metrics` will be empty "
Expand Down
4 changes: 1 addition & 3 deletions keras/saving/utils_v1/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,7 @@
from __future__ import division
from __future__ import print_function

# pylint: disable=wildcard-import
from keras.saving.utils_v1.export_output import *
from keras.saving.utils_v1.export_output import * # noqa: F403
from keras.saving.utils_v1.export_utils import EXPORT_TAG_MAP
from keras.saving.utils_v1.export_utils import SIGNATURE_KEY_MAP
from keras.saving.utils_v1.export_utils import build_all_signature_defs
Expand All @@ -28,5 +27,4 @@
from keras.saving.utils_v1.export_utils import get_temp_export_dir
from keras.saving.utils_v1.export_utils import get_timestamped_export_dir

# pylint: enable=wildcard-import
# LINT.ThenChange(//tensorflow/python/saved_model/model_utils/__init__.py)
2 changes: 1 addition & 1 deletion keras/testing_infra/keras_doctest_lib_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ class KerasDoctestOutputCheckerTest(parameterized.TestCase):
["text1.0 text", []],
["text 1.0text", []],
["text1.0text", []],
["0x12e4", []], # not 12000
["0x12e4", []], # not 12000
["TensorBoard: http://128.0.0.1:8888", []],
# With a newline
["1.0 text\n 2.0 3.0 text", [1.0, 2.0, 3.0]],
Expand Down
2 changes: 1 addition & 1 deletion keras/tests/keras_doctest.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@

# We put doctest after absltest so that it picks up the unittest monkeypatch.
# Otherwise doctest tests aren't runnable at all.
import doctest # pylint: disable=g-import-not-at-top,g-bad-import-order
import doctest # noqa: E402

FLAGS = flags.FLAGS

Expand Down
2 changes: 1 addition & 1 deletion keras/utils/data_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,7 @@ def __call__(self, block_num, block_size, total_size):
raise Exception(error_msg.format(origin, e.code, e.msg))
except urllib.error.URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except (Exception, KeyboardInterrupt) as e:
except (Exception, KeyboardInterrupt):
if os.path.exists(fpath):
os.remove(fpath)
raise
Expand Down
2 changes: 1 addition & 1 deletion keras/utils/mode_keys.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,6 @@
"""Keras model mode constants."""

# isort: off
from tensorflow.python.saved_model.model_utils.mode_keys import ( # noqa: E501
from tensorflow.python.saved_model.model_utils.mode_keys import ( # noqa: F401,E501
KerasModeKeys as ModeKeys,
)
4 changes: 1 addition & 3 deletions setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,5 @@ profile=black
[flake8]
# imported but unused in __init__.py, that's ok.
per-file-ignores=**/__init__.py:F401
ignore=E203,W503
ignore=E203,W503,F632,E266,E731,E712,E741
max-line-length=80
# Only check line-too-long and ignore other errors.
select=E501

0 comments on commit 5cf72f4

Please sign in to comment.