Skip to content

Commit

Permalink
Merge pull request #16350 from markub3327:master2
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 440421972
  • Loading branch information
tensorflower-gardener committed Apr 8, 2022
2 parents a954f96 + 3a3d7b6 commit 4c87dc9
Show file tree
Hide file tree
Showing 6 changed files with 48 additions and 7 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ tf_class {
}
member_method {
name: "__init__"
argspec: "args=[\'self\', \'axis\', \'mean\', \'variance\'], varargs=None, keywords=kwargs, defaults=[\'-1\', \'None\', \'None\'], "
argspec: "args=[\'self\', \'axis\', \'mean\', \'variance\', \'invert\'], varargs=None, keywords=kwargs, defaults=[\'-1\', \'None\', \'None\', \'False\'], "
}
member_method {
name: "adapt"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ tf_class {
}
member_method {
name: "__init__"
argspec: "args=[\'self\', \'axis\', \'mean\', \'variance\'], varargs=None, keywords=kwargs, defaults=[\'-1\', \'None\', \'None\'], "
argspec: "args=[\'self\', \'axis\', \'mean\', \'variance\', \'invert\'], varargs=None, keywords=kwargs, defaults=[\'-1\', \'None\', \'None\', \'False\'], "
}
member_method {
name: "adapt"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ tf_class {
}
member_method {
name: "__init__"
argspec: "args=[\'self\', \'axis\', \'mean\', \'variance\'], varargs=None, keywords=kwargs, defaults=[\'-1\', \'None\', \'None\'], "
argspec: "args=[\'self\', \'axis\', \'mean\', \'variance\', \'invert\'], varargs=None, keywords=kwargs, defaults=[\'-1\', \'None\', \'None\', \'False\'], "
}
member_method {
name: "adapt"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ tf_class {
}
member_method {
name: "__init__"
argspec: "args=[\'self\', \'axis\', \'mean\', \'variance\'], varargs=None, keywords=kwargs, defaults=[\'-1\', \'None\', \'None\'], "
argspec: "args=[\'self\', \'axis\', \'mean\', \'variance\', \'invert\'], varargs=None, keywords=kwargs, defaults=[\'-1\', \'None\', \'None\', \'False\'], "
}
member_method {
name: "adapt"
Expand Down
27 changes: 24 additions & 3 deletions keras/layers/preprocessing/normalization.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,9 @@ class Normalization(base_preprocessing_layer.PreprocessingLayer):
value(s) will be broadcast to the shape of the kept axes above; if the
value(s) cannot be broadcast, an error will be raised when this layer's
`build()` method is called.
invert: If True, this layer will apply the inverse transformation
to its inputs: it would turn a normalized input back into its
original form.
Examples:
Expand Down Expand Up @@ -96,9 +99,22 @@ class Normalization(base_preprocessing_layer.PreprocessingLayer):
array([[-1.4142135 ],
[-0.70710677],
[ 0. ]], dtype=float32)>
Use the layer to de-normalize inputs (after adapting the layer).
>>> adapt_data = np.array([[0., 7., 4.],
... [2., 9., 6.],
... [0., 7., 4.],
... [2., 9., 6.]], dtype='float32')
>>> input_data = np.array([[1., 2., 3.]], dtype='float32')
>>> layer = tf.keras.layers.Normalization(axis=-1, invert=True)
>>> layer.adapt(adapt_data)
>>> layer(input_data)
<tf.Tensor: shape=(1, 3), dtype=float32, numpy=
array([2., 10., 8.], dtype=float32)>
"""

def __init__(self, axis=-1, mean=None, variance=None, **kwargs):
def __init__(self, axis=-1, mean=None, variance=None, invert=False, **kwargs):
super().__init__(**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell('Normalization').set(True)

Expand All @@ -124,6 +140,7 @@ def __init__(self, axis=-1, mean=None, variance=None, **kwargs):
'must be set. Got mean: {} and variance: {}'.format(mean, variance))
self.input_mean = mean
self.input_variance = variance
self.invert = invert

def build(self, input_shape):
super().build(input_shape)
Expand Down Expand Up @@ -302,8 +319,12 @@ def call(self, inputs):
# The base layer automatically casts floating-point inputs, but we
# explicitly cast here to also allow integer inputs to be passed
inputs = tf.cast(inputs, self.compute_dtype)
return ((inputs - self.mean) /
tf.maximum(tf.sqrt(self.variance), backend.epsilon()))
if self.invert:
return ((inputs + self.mean) *
tf.maximum(tf.sqrt(self.variance), backend.epsilon()))
else:
return ((inputs - self.mean) /
tf.maximum(tf.sqrt(self.variance), backend.epsilon()))

def compute_output_shape(self, input_shape):
return input_shape
Expand Down
20 changes: 20 additions & 0 deletions keras/layers/preprocessing/normalization_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,26 @@ def test_output_dtype(self):
output = layer(input_data)
self.assertAllEqual(output.dtype, tf.float64)

def test_invert(self):
data = np.array([0., 2., 0., 2.])
norm = normalization.Normalization(mean=1.0, variance=1.0)
inv_norm = normalization.Normalization(mean=1.0, variance=1.0, invert=True)
output = norm(data)
output2 = inv_norm(output)
self.assertListEqual(output2.shape.as_list(), [4])
self.assertAllClose(output2, [0., 2., 0., 2.])

@test_utils.run_v2_only
def test_invert_adapt(self):
input_data = [[0.], [2.], [0.], [2.]]
norm = keras.layers.Normalization(axis=-1)
norm.adapt(input_data)
inv_norm = keras.layers.Normalization(axis=-1, invert=True)
inv_norm.adapt(input_data)
output = norm(input_data)
output2 = inv_norm(output)
self.assertAllClose(input_data, output2)


@test_combinations.run_all_keras_modes(always_skip_v1=True)
class NormalizationAdaptTest(test_combinations.TestCase,
Expand Down

0 comments on commit 4c87dc9

Please sign in to comment.