Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Denormalization layer #16350

Merged
merged 5 commits into from
Apr 8, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 31 additions & 2 deletions keras/layers/preprocessing/normalization.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ class Normalization(base_preprocessing_layer.PreprocessingLayer):
value(s) will be broadcast to the shape of the kept axes above; if the
value(s) cannot be broadcast, an error will be raised when this layer's
`build()` method is called.
invert: If True, this layer will return the denormalized values of inputs. Default to False.

Examples:

Expand Down Expand Up @@ -96,9 +97,32 @@ class Normalization(base_preprocessing_layer.PreprocessingLayer):
array([[-1.4142135 ],
[-0.70710677],
[ 0. ]], dtype=float32)>

Using it in the invert manner for denormalizing the inputs with calculating a mean and variance for each index on the last axis.

>>> adapt_data = np.array([[0., 7., 4.],
... [2., 9., 6.],
... [0., 7., 4.],
... [2., 9., 6.]], dtype='float32')
>>> input_data = np.array([[1., 2., 3.]], dtype='float32')
>>> layer = tf.keras.layers.Normalization(axis=-1, invert=True)
>>> layer.adapt(adapt_data)
>>> layer(input_data)
<tf.Tensor: shape=(1, 3), dtype=float32, numpy=
array([2., 10., 8.], dtype=float32)>

Using it in the invert manner for denormalizing the inputs with passing the mean and variance directly.

>>> input_data = np.array([[-1.4142135], [-0.70710677], [0.]], dtype='float32')
>>> layer = tf.keras.layers.Normalization(mean=3., variance=2., invert=True)
>>> layer(input_data)
<tf.Tensor: shape=(3, 1), dtype=float32, numpy=
array([[1. ],
[2. ],
[3. ]], dtype=float32)>
"""

def __init__(self, axis=-1, mean=None, variance=None, **kwargs):
def __init__(self, axis=-1, mean=None, variance=None, invert=False, **kwargs):
super().__init__(**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell('Normalization').set(True)

Expand All @@ -124,6 +148,7 @@ def __init__(self, axis=-1, mean=None, variance=None, **kwargs):
'must be set. Got mean: {} and variance: {}'.format(mean, variance))
self.input_mean = mean
self.input_variance = variance
self.invert = invert

def build(self, input_shape):
super().build(input_shape)
Expand Down Expand Up @@ -302,7 +327,11 @@ def call(self, inputs):
# The base layer automatically casts floating-point inputs, but we
# explicitly cast here to also allow integer inputs to be passed
inputs = tf.cast(inputs, self.compute_dtype)
return ((inputs - self.mean) /
if self.invert:
return ((inputs + self.mean) *
tf.maximum(tf.sqrt(self.variance), backend.epsilon()))
else:
return ((inputs - self.mean) /
tf.maximum(tf.sqrt(self.variance), backend.epsilon()))

def compute_output_shape(self, input_shape):
Expand Down
18 changes: 18 additions & 0 deletions keras/layers/preprocessing/normalization_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,24 @@ def test_output_dtype(self):
output = layer(input_data)
self.assertAllEqual(output.dtype, tf.float64)

def test_invert(self):
data = np.array([0., 2., 0., 2.])
norm = normalization.Normalization(mean=1.0, variance=1.0)
inv_norm = normalization.Normalization(mean=1.0, variance=1.0, invert=True)
output = norm(data)
output2 = inv_norm(output)
self.assertListEqual(output2.shape.as_list(), [4])
self.assertAllClose(output2, [0., 2., 0., 2.])

def test_invert_adapt(self):
input_data = [[0.], [2.], [0.], [2.]]
norm = keras.layers.Normalization(axis=-1)
norm.adapt(input_data)
inv_norm = keras.layers.Normalization(axis=-1, invert=True)
inv_norm.adapt(input_data)
output = norm(input_data)
output2 = inv_norm(output)
self.assertAllClose(input_data, output2)

@test_combinations.run_all_keras_modes(always_skip_v1=True)
class NormalizationAdaptTest(test_combinations.TestCase,
Expand Down