From 398a694f86b8b1b117b2bc85a9f6dba2a98eae36 Mon Sep 17 00:00:00 2001 From: Daniel Rasmussen Date: Mon, 20 Jan 2020 15:12:44 -0400 Subject: [PATCH] Add support for LeakyReLU layers to Converter --- CHANGES.rst | 1 + nengo_dl/converter.py | 31 ++++++++++++++++++++++++++----- nengo_dl/tests/test_converter.py | 8 ++++++++ 3 files changed, 35 insertions(+), 5 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 688a1e884..4d6666863 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -33,6 +33,7 @@ Release history (not the training behaviour). (`#119`_) - Added ``nengo_dl.LeakyReLU`` and ``nengo_dl.SpikingLeakyReLU`` neuron models. (`#126`_) +- Added support for leaky ReLU Keras layers to ``nengo_dl.Converter``. (`#126`_) **Changed** diff --git a/nengo_dl/converter.py b/nengo_dl/converter.py index 897ac8611..23d0d690b 100644 --- a/nengo_dl/converter.py +++ b/nengo_dl/converter.py @@ -11,8 +11,10 @@ from tensorflow.python.util import nest from nengo_dl.config import configure_settings -from nengo_dl.tensor_node import Layer, TensorNode +from nengo_dl.neurons import LeakyReLU from nengo_dl.simulator import Simulator +from nengo_dl.tensor_node import Layer, TensorNode + logger = logging.getLogger(__name__) @@ -1121,8 +1123,8 @@ def convert(self, node_id): broadcast_bias = np.zeros(self.output_shape(node_id)) for i in range(idxs.shape[axis]): slices[axis] = i - broadcast_scale[slices] = scale[i] - broadcast_bias[slices] = bias[i] + broadcast_scale[tuple(slices)] = scale[i] + broadcast_bias[tuple(slices)] = bias[i] broadcast_scale = np.ravel(broadcast_scale) broadcast_bias = np.ravel(broadcast_bias) @@ -1408,10 +1410,29 @@ def convert(self, node_id): class ConvertReLU(LayerConverter): """Convert ``tf.keras.layers.ReLU`` to Nengo objects.""" - unsupported_args = [("negative_slope", 0), "max_value", ("threshold", 0)] + unsupported_args = ["max_value", ("threshold", 0)] def convert(self, node_id): - output = self.add_nengo_obj(node_id, biases=None, activation=tf.nn.relu) + if self.layer.negative_slope == 0: + activation = tf.nn.relu + else: + activation = LeakyReLU(negative_slope=self.layer.negative_slope) + + output = self.add_nengo_obj(node_id, biases=None, activation=activation) + + self.add_connection(node_id, output) + + return output + + +@Converter.register(tf.keras.layers.LeakyReLU) +class ConvertLeakyReLU(LayerConverter): + """Convert ``tf.keras.layers.LeakyReLU`` to Nengo objects.""" + + def convert(self, node_id): + output = self.add_nengo_obj( + node_id, biases=None, activation=LeakyReLU(negative_slope=self.layer.alpha) + ) self.add_connection(node_id, output) diff --git a/nengo_dl/tests/test_converter.py b/nengo_dl/tests/test_converter.py index 0c1dac3e0..6e3fe1d6a 100644 --- a/nengo_dl/tests/test_converter.py +++ b/nengo_dl/tests/test_converter.py @@ -514,3 +514,11 @@ def test_nested_input(): x = tf.keras.layers.Concatenate()([x, x_0]) _test_convert(inputs, x) + + +def test_leaky_relu(rng): + inp = x = tf.keras.Input(shape=(4,)) + x = tf.keras.layers.ReLU(negative_slope=0.1)(x) + x = tf.keras.layers.LeakyReLU(alpha=2)(x) + + _test_convert(inp, x, inp_vals=[rng.uniform(-1, 1, size=(32, 4))])