Skip to content

Commit

Permalink
Remove object metadata when saving SavedModel.
Browse files Browse the repository at this point in the history
This change also fixes a few bugs when loading the metadata file, and fixes Keras tests so that they use model.save instead of tf.saved_model.save

PiperOrigin-RevId: 376031254
  • Loading branch information
k-w-w authored and tensorflower-gardener committed Jun 11, 2021
1 parent 2d6b097 commit 1e4c9e4
Show file tree
Hide file tree
Showing 6 changed files with 73 additions and 44 deletions.
1 change: 1 addition & 0 deletions keras/engine/base_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -3022,6 +3022,7 @@ def _object_identifier(self):

@property
def _tracking_metadata(self):
"""Info about this layer to be saved into the SavedModel."""
return self._trackable_saved_model_saver.tracking_metadata

def _list_extra_dependencies_for_serialization(self, serialization_cache):
Expand Down
4 changes: 2 additions & 2 deletions keras/integration_test/tpu_strategy_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,8 +215,8 @@ def serve_fn(raw_features):
serving_fn = create_serving_signature(model)

saved_model_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
tf.saved_model.save(
model, saved_model_dir, signatures={"serving_default": serving_fn})
model.save(saved_model_dir, save_format="tf",
signatures={"serving_default": serving_fn})

# Test the saved_model.
loaded_serving_fn = tf.keras.models.load_model(
Expand Down
15 changes: 10 additions & 5 deletions keras/saving/saved_model/load.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,6 +174,12 @@ def _read_legacy_metadata(object_graph_def, metadata):
for node_id, proto in enumerate(object_graph_def.nodes):
if (proto.WhichOneof('kind') == 'user_object' and
proto.user_object.identifier in constants.KERAS_OBJECT_IDENTIFIERS):
if not proto.user_object.metadata:
raise ValueError('Unable to create a Keras model from this SavedModel. '
'This SavedModel was created with '
'`tf.saved_model.save`, and lacks the Keras metadata.'
'Please save your Keras model by calling `model.save`'
'or `tf.keras.models.save_model`.')
metadata.nodes.add(
node_id=node_id,
node_path=node_paths[node_id],
Expand Down Expand Up @@ -232,7 +238,7 @@ class KerasObjectLoader(object):
"""

def __init__(self, metadata, object_graph_def):
self._metadata = metadata
self._metadata = {x.node_id: x for x in metadata.nodes}
self._proto = object_graph_def

self._node_paths = {node_data.node_id: node_data.node_path
Expand Down Expand Up @@ -288,7 +294,7 @@ def _add_children_recreated_from_config(self, obj, proto, node_id):
self._traversed_nodes_from_config.add(node_id)
obj._maybe_initialize_trackable()
if isinstance(obj, base_layer.Layer) and not obj.built:
metadata = json_utils.decode(proto.user_object.metadata)
metadata = json_utils.decode(self._metadata[node_id].metadata)
self._try_build_layer(obj, node_id, metadata.get('build_input_shape'))

# Create list of all possible children
Expand Down Expand Up @@ -357,7 +363,7 @@ def load_layers(self, compile=True): # pylint: disable=redefined-builtin
# and layers will create the metric when initialized (this avoids wasting
# time by creating objects multiple times).
metric_list = []
for node_metadata in self._metadata.nodes:
for node_metadata in self._metadata.values():
if node_metadata.identifier == constants.METRIC_IDENTIFIER:
metric_list.append(node_metadata)
continue
Expand Down Expand Up @@ -645,8 +651,7 @@ def _reconstruct_all_models(self):

def _reconstruct_model(self, model_id, model, layers):
"""Reconstructs the network structure."""
config = json_utils.decode(
self._proto.nodes[model_id].user_object.metadata)['config']
config = json_utils.decode(self._metadata[model_id].metadata)['config']

# Set up model inputs
if model.inputs:
Expand Down
84 changes: 52 additions & 32 deletions keras/saving/saved_model/saved_model_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@

import os
import shutil
import sys

from absl.testing import parameterized
import numpy as np
Expand Down Expand Up @@ -134,7 +135,7 @@ def _train_model(self, model, use_dataset=False):

def _save_and_load(self, model):
saved_model_dir = self._save_model_dir()
tf.saved_model.save(model, saved_model_dir)
model.save(saved_model_dir, save_format='tf')
loaded = keras_load.load(saved_model_dir)
return loaded

Expand Down Expand Up @@ -177,7 +178,7 @@ def test_model_save_and_load_dataset(self):
def test_trainable_weights(self):
"""Tests that trainable status of individual weights is preserved."""
layer = keras.layers.Dense(4, name='custom_layer')
layer.build([3,])
layer.build([None, 3])
layer.add_weight(
'extra_weight', shape=[],
initializer=tf.compat.v1.constant_initializer(11),
Expand All @@ -186,12 +187,15 @@ def test_trainable_weights(self):
'extra_weight_2', shape=[],
initializer=tf.compat.v1.constant_initializer(12),
trainable=False)
model = keras.Sequential([keras.Input([3,]), layer])

saved_model_dir = self._save_model_dir()
self.evaluate(tf.compat.v1.variables_initializer(layer.variables))
tf.saved_model.save(layer, saved_model_dir)
loaded = keras_load.load(saved_model_dir)
self.evaluate(tf.compat.v1.variables_initializer(loaded.variables))
model.save(saved_model_dir, save_format='tf')
loaded_model = keras_load.load(saved_model_dir)
self.evaluate(tf.compat.v1.variables_initializer(loaded_model.variables))

loaded = loaded_model.layers[-1]

equal_attrs = ['name', '_expects_training_arg', 'trainable']
for attr in equal_attrs:
Expand Down Expand Up @@ -242,7 +246,7 @@ def test_maintains_losses(self):
previous_losses = model.losses[:]

saved_model_dir = self._save_model_dir()
tf.saved_model.save(model, saved_model_dir)
model.save(saved_model_dir, save_format='tf')

with previous_losses[0].graph.as_default():
# If we try to compare symbolic Tensors in eager mode assertAllEqual will
Expand All @@ -253,15 +257,18 @@ def test_maintains_losses(self):
# Test that eager losses are maintained.
model(input_arr) # Calls model eagerly, creating eager losses.
previous_losses = model.losses[:]
tf.saved_model.save(model, saved_model_dir)
model.save(saved_model_dir, save_format='tf')
self.assertAllEqual(previous_losses, model.losses)

def test_layer_with_learning_phase(self):
layer = LayerWithLearningPhase()
layer.build([None, None])
saved_model_dir = self._save_model_dir()
tf.saved_model.save(layer, saved_model_dir)
loaded = keras_load.load(saved_model_dir)
model = testing_utils.get_model_from_layers(
[layer], input_shape=[None], model_type='functional')
model.save(saved_model_dir, save_format='tf')
loaded_model = keras_load.load(saved_model_dir)
loaded = loaded_model.layers[-1]
input_arr = tf.ones((4, 3))

# Run the layer, and use the keras backend learning phase
Expand Down Expand Up @@ -289,7 +296,7 @@ def eager_loss():
model.predict(np.random.random((1, 3)).astype(np.float32))
saved_model_dir = self._save_model_dir()

tf.saved_model.save(model, saved_model_dir)
model.save(saved_model_dir, save_format='tf')

loaded = tf.saved_model.load(saved_model_dir)
self.evaluate(tf.compat.v1.variables_initializer(loaded.variables))
Expand Down Expand Up @@ -321,7 +328,7 @@ def test_compiled_model(self):
# Compile and save model.
model.compile('rmsprop', 'mse')
saved_model_dir = self._save_model_dir()
tf.saved_model.save(model, saved_model_dir)
model.save(saved_model_dir, save_format='tf')

loaded = keras_load.load(saved_model_dir)
actual_predict = loaded.predict(input_arr)
Expand All @@ -347,20 +354,25 @@ def __init__(self):
super(LayerWithNestedSpec, self).__init__()
self.input_spec = {
'a': keras.layers.InputSpec(max_ndim=3, axes={-1: 2}),
'b': keras.layers.InputSpec(shape=(None, 2, 3), dtype='float16')}
'b': keras.layers.InputSpec(shape=(None, 2, 3), dtype='int32')}

@property
def _use_input_spec_as_call_signature(self):
return True

layer = LayerWithNestedSpec()
saved_model_dir = self._save_model_dir()
tf.saved_model.save(layer, saved_model_dir)
loaded = keras_load.load(saved_model_dir)
model = testing_utils.get_model_from_layers(
[layer], model_type='subclass')
model({'a': tf.constant([[2, 4]]),
'b': tf.ones([1, 2, 3], dtype=tf.int32)})
model.save(saved_model_dir, save_format='tf')
loaded_model = keras_load.load(saved_model_dir)
loaded = loaded_model.layers[-1]
self.assertEqual(3, loaded.input_spec['a'].max_ndim)
self.assertEqual({-1: 2}, loaded.input_spec['a'].axes)
self.assertAllEqual([None, 2, 3], loaded.input_spec['b'].shape)
self.assertEqual('float16', loaded.input_spec['b'].dtype)
self.assertEqual('int32', loaded.input_spec['b'].dtype)

def test_must_restore_from_config_fails_if_layer_is_not_in_scope(self):

Expand All @@ -369,7 +381,9 @@ class LayerThatShouldFailIfNotAdded(keras.layers.Layer):

layer = LayerThatShouldFailIfNotAdded()
saved_model_dir = self._save_model_dir()
tf.saved_model.save(layer, saved_model_dir)
model = testing_utils.get_model_from_layers(
[layer], input_shape=[3], model_type='functional')
model.save(saved_model_dir, save_format='tf')
with self.assertRaisesRegex(RuntimeError, 'Unable to restore a layer of'):
_ = keras_load.load(saved_model_dir)

Expand All @@ -379,16 +393,20 @@ class LayerThatShouldFailIfNotAdded(keras.layers.Layer):
_must_restore_from_config = True

layer = LayerThatShouldFailIfNotAdded()
model = testing_utils.get_model_from_layers(
[layer], input_shape=[3], model_type='functional')
saved_model_dir = self._save_model_dir()
tf.saved_model.save(layer, saved_model_dir)
model.save(saved_model_dir, save_format='tf')
with generic_utils.CustomObjectScope(
{'LayerThatShouldFailIfNotAdded': LayerThatShouldFailIfNotAdded}):
_ = keras_load.load(saved_model_dir)

def test_must_restore_from_config_registration(self):
layer = GlobalLayerThatShouldFailIfNotAdded()
saved_model_dir = self._save_model_dir()
tf.saved_model.save(layer, saved_model_dir)
model = testing_utils.get_model_from_layers(
[layer], input_shape=[3], model_type='functional')
model.save(saved_model_dir, save_format='tf')
_ = keras_load.load(saved_model_dir)

def test_multi_input_model(self):
Expand Down Expand Up @@ -437,16 +455,14 @@ def testBatchNormUpdates(self):
self.evaluate(tf.compat.v1.variables_initializer(model.variables))
saved_model_dir = self._save_model_dir()

# TODO(kathywu): Re-enable this check after removing the tf.saved_model.save
# metadata warning.
# with self.captureWritesToStream(sys.stderr) as captured_logs:
model.save(saved_model_dir, save_format='tf')
loaded = keras_load.load(saved_model_dir)
with self.captureWritesToStream(sys.stderr) as captured_logs:
model.save(saved_model_dir, save_format='tf')
loaded = keras_load.load(saved_model_dir)

# Assert that saving does not log deprecation warnings
# (even if it needs to set learning phase for compat reasons)
# if context.executing_eagerly():
# self.assertNotIn('deprecated', captured_logs.contents())
if tf.executing_eagerly():
self.assertNotIn('deprecated', captured_logs.contents())

input_arr = tf.constant([[11], [12], [13]], dtype=tf.float32)
input_arr2 = tf.constant([[14], [15], [16]], dtype=tf.float32)
Expand Down Expand Up @@ -789,7 +805,7 @@ def testSaveStatefulRNN(self, unroll):
np.zeros((batch, 64)).astype('float32'))

saved_model_dir = self._save_model_dir()
tf.saved_model.save(model, saved_model_dir)
model.save(saved_model_dir, save_format='tf')

loaded = keras_load.load(saved_model_dir)
loaded_layer = loaded.layers[1]
Expand Down Expand Up @@ -817,7 +833,7 @@ def testSaveConvLSTM2D(self, stateful):
self.evaluate([v.initializer for v in model.variables])
saved_model_dir = self._save_model_dir()

tf.saved_model.save(model, saved_model_dir)
model.save(saved_model_dir, save_format='tf')
del model

loaded = keras_load.load(saved_model_dir)
Expand Down Expand Up @@ -857,7 +873,7 @@ def call(self, inputs):
model = keras.Model(f_inputs, out)
self.evaluate(tf.compat.v1.variables_initializer(model.variables))
saved_model_dir = self._save_model_dir()
tf.saved_model.save(model, saved_model_dir)
model.save(saved_model_dir, save_format='tf')

loaded = keras_load.load(saved_model_dir)
self.evaluate(tf.compat.v1.variables_initializer(loaded.variables))
Expand Down Expand Up @@ -956,7 +972,7 @@ def call(self, inputs):
inp = tf.constant([[1.0]])
model(inp)
saved_model_dir = self._save_model_dir()
tf.saved_model.save(model, saved_model_dir)
model.save(saved_model_dir, save_format='tf')

loaded = keras_load.load(saved_model_dir)
self.assertAllEqual([[1.0]], self.evaluate(loaded(inp)))
Expand Down Expand Up @@ -1128,8 +1144,12 @@ def _test_metric_save_and_load(self,
shape=(1, 5),
test_sample_weight=True):
with self.cached_session():
tf.saved_model.save(metric, save_dir)
loaded = keras_load.load(save_dir)
model = testing_utils.get_model_from_layers(
[keras.layers.Layer()], input_shape=[3], model_type='functional')
model.saved_metric = metric
model.save(save_dir, save_format='tf')
loaded_model = keras_load.load(save_dir)
loaded = loaded_model.saved_metric
self.evaluate([v.initializer for v in loaded.variables])
self.assertEqual(metric.name, loaded.name)
self.assertEqual(metric.dtype, loaded.dtype)
Expand Down Expand Up @@ -1281,7 +1301,7 @@ def zero_metric(y_true, y_pred):
metrics=[CustomMetric(), zero_metric])
model.fit(x, y)
saved_model_dir = self._save_model_dir()
tf.saved_model.save(model, saved_model_dir)
model.save(saved_model_dir, save_format='tf')

with self.assertRaisesRegex(ValueError, 'custom_objects'):
keras_load.load(saved_model_dir)
Expand Down
9 changes: 6 additions & 3 deletions keras/testing_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -565,7 +565,8 @@ def get_model_from_layers(model_layers,
input_dtype=None,
name=None,
input_ragged=None,
input_sparse=None):
input_sparse=None,
model_type=None):
"""Builds a model from a sequence of layers.
Args:
Expand All @@ -575,12 +576,14 @@ def get_model_from_layers(model_layers,
name: Name for the model.
input_ragged: Boolean, whether the input data is a ragged tensor.
input_sparse: Boolean, whether the input data is a sparse tensor.
model_type: One of "subclass", "subclass_custom_build", "sequential", or
"functional". When None, defaults to `get_model_type`.
Returns:
A Keras model.
"""

model_type = get_model_type()
if model_type is None:
model_type = get_model_type()
if model_type == 'subclass':
inputs = None
if input_ragged or input_sparse:
Expand Down
4 changes: 2 additions & 2 deletions keras/utils/kpl_test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,8 +163,8 @@ def test_save_load_serving_model(self, model, feature_mapper,
label_inverse_lookup_layer)

saved_model_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
tf.saved_model.save(
model, saved_model_dir, signatures={"serving_default": serving_fn})
model.save(saved_model_dir, save_format="tf",
signatures={"serving_default": serving_fn})

# Test the saved_model.
loaded_serving_fn = keras.saving.save.load_model(
Expand Down

0 comments on commit 1e4c9e4

Please sign in to comment.