diff --git a/keras/layers/preprocessing/normalization.py b/keras/layers/preprocessing/normalization.py index be052a59ec5..ef2252d7328 100644 --- a/keras/layers/preprocessing/normalization.py +++ b/keras/layers/preprocessing/normalization.py @@ -61,6 +61,7 @@ class Normalization(base_preprocessing_layer.PreprocessingLayer): value(s) will be broadcast to the shape of the kept axes above; if the value(s) cannot be broadcast, an error will be raised when this layer's `build()` method is called. + invert: If True, this layer will return the denormalized values of inputs. Default to False. Examples: @@ -96,9 +97,32 @@ class Normalization(base_preprocessing_layer.PreprocessingLayer): array([[-1.4142135 ], [-0.70710677], [ 0. ]], dtype=float32)> + + Using it in the invert manner for denormalizing the inputs with calculating a mean and variance for each index on the last axis. + + >>> adapt_data = np.array([[0., 7., 4.], + ... [2., 9., 6.], + ... [0., 7., 4.], + ... [2., 9., 6.]], dtype='float32') + >>> input_data = np.array([[1., 2., 3.]], dtype='float32') + >>> layer = tf.keras.layers.Normalization(axis=-1, invert=True) + >>> layer.adapt(adapt_data) + >>> layer(input_data) + + + Using it in the invert manner for denormalizing the inputs with passing the mean and variance directly. + + >>> input_data = np.array([[-1.4142135], [-0.70710677], [0.]], dtype='float32') + >>> layer = tf.keras.layers.Normalization(mean=3., variance=2., invert=True) + >>> layer(input_data) + """ - def __init__(self, axis=-1, mean=None, variance=None, **kwargs): + def __init__(self, axis=-1, mean=None, variance=None, invert=False, **kwargs): super().__init__(**kwargs) base_preprocessing_layer.keras_kpl_gauge.get_cell('Normalization').set(True) @@ -124,6 +148,7 @@ def __init__(self, axis=-1, mean=None, variance=None, **kwargs): 'must be set. Got mean: {} and variance: {}'.format(mean, variance)) self.input_mean = mean self.input_variance = variance + self.invert = invert def build(self, input_shape): super().build(input_shape) @@ -302,7 +327,11 @@ def call(self, inputs): # The base layer automatically casts floating-point inputs, but we # explicitly cast here to also allow integer inputs to be passed inputs = tf.cast(inputs, self.compute_dtype) - return ((inputs - self.mean) / + if self.invert: + return ((inputs + self.mean) * + tf.maximum(tf.sqrt(self.variance), backend.epsilon())) + else: + return ((inputs - self.mean) / tf.maximum(tf.sqrt(self.variance), backend.epsilon())) def compute_output_shape(self, input_shape): diff --git a/keras/layers/preprocessing/normalization_test.py b/keras/layers/preprocessing/normalization_test.py index 4edf789089b..79cf334d3c8 100644 --- a/keras/layers/preprocessing/normalization_test.py +++ b/keras/layers/preprocessing/normalization_test.py @@ -198,6 +198,24 @@ def test_output_dtype(self): output = layer(input_data) self.assertAllEqual(output.dtype, tf.float64) + def test_invert(self): + data = np.array([0., 2., 0., 2.]) + norm = normalization.Normalization(mean=1.0, variance=1.0) + inv_norm = normalization.Normalization(mean=1.0, variance=1.0, invert=True) + output = norm(data) + output2 = inv_norm(output) + self.assertListEqual(output2.shape.as_list(), [4]) + self.assertAllClose(output2, [0., 2., 0., 2.]) + + def test_invert_adapt(self): + input_data = [[0.], [2.], [0.], [2.]] + norm = keras.layers.Normalization(axis=-1) + norm.adapt(input_data) + inv_norm = keras.layers.Normalization(axis=-1, invert=True) + inv_norm.adapt(input_data) + output = norm(input_data) + output2 = inv_norm(output) + self.assertAllClose(input_data, output2) @test_combinations.run_all_keras_modes(always_skip_v1=True) class NormalizationAdaptTest(test_combinations.TestCase,