From 7e674e537a316fc7d0bbe4c7a07d2a17e5945bdd Mon Sep 17 00:00:00 2001 From: tom Koren Date: Thu, 4 Jul 2024 09:39:59 +0300 Subject: [PATCH] replace keras_name with new naming --- onnx2kerastl/activation_layers.py | 2 +- onnx2kerastl/linear_layers.py | 2 +- onnx2kerastl/ltsm_layers.py | 16 +++++++++------- onnx2kerastl/normalization_layers.py | 8 ++++---- onnx2kerastl/operation_layers.py | 8 ++++---- onnx2kerastl/padding_layers.py | 10 +++++----- onnx2kerastl/pooling_layers.py | 25 ++++++++++++++----------- onnx2kerastl/reshape_layers.py | 2 +- 8 files changed, 39 insertions(+), 34 deletions(-) diff --git a/onnx2kerastl/activation_layers.py b/onnx2kerastl/activation_layers.py index 808649df..b2cfbe30 100644 --- a/onnx2kerastl/activation_layers.py +++ b/onnx2kerastl/activation_layers.py @@ -43,7 +43,7 @@ def convert_elu(node, params, layers, lambda_func, node_name, keras_name): input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) alpha = params.get('alpha', keras.layers.ELU.__init__.__defaults__[0]) - elu = keras.layers.ELU(alpha=alpha, name=keras_name) + elu = keras.layers.ELU(alpha=alpha, name=f"{params['cleaned_name']}_elu") layers[node_name] = elu(input_0) diff --git a/onnx2kerastl/linear_layers.py b/onnx2kerastl/linear_layers.py index 56d96b8a..7a685625 100644 --- a/onnx2kerastl/linear_layers.py +++ b/onnx2kerastl/linear_layers.py @@ -44,7 +44,7 @@ def convert_gemm(node, params, layers, lambda_func, node_name, keras_name): if is_numpy(keras_weights[0]): dense = keras.layers.Dense( output_channels, - weights=keras_weights, name=keras_name, use_bias=has_bias + weights=keras_weights, name=f"{params['cleaned_name']}_gemm_dense", use_bias=has_bias ) # The first input - always X diff --git a/onnx2kerastl/ltsm_layers.py b/onnx2kerastl/ltsm_layers.py index 8e9ac634..97f54944 100644 --- a/onnx2kerastl/ltsm_layers.py +++ b/onnx2kerastl/ltsm_layers.py @@ -31,23 +31,25 @@ def convert_lstm(node, params, layers, lambda_func, node_name, keras_name): if direction != 'forward': raise UnsupportedLayer(f"LSTM with {direction} direction") should_return_state = len(node.output) == 3 - input_tensor = tf.transpose(ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name[0]), perm=[1, 0, 2]) + input_tensor = tf_transpose(ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name[0]), + perm=[1, 0, 2], + tf_name=f"{params['cleaned_name']}_lstm_first_transpose") weights_w = layers[node.input[1]][0] weights_r = layers[node.input[2]][0] weights_b = layers[node.input[3]][0] initial_h_state = tf_cast(tf_squeeze(ensure_tf_type(layers[node.input[5]]), axis=0, - tf_name=f"{params['cleaned_name']}_squeeze_h" + tf_name=f"{params['cleaned_name']}_lstm_squeeze_h" ), input_tensor.dtype, - tf_name=f"{params['cleaned_name']}_cast_h") + tf_name=f"{params['cleaned_name']}_lstm_cast_h") initial_c_state = tf_cast( tf_squeeze( ensure_tf_type(layers[node.input[6]]), axis=0, - tf_name=f"{params['cleaned_name']}_squeeze_c"), input_tensor.dtype, - tf_name=f"{params['cleaned_name']}_cast_c") + tf_name=f"{params['cleaned_name']}_lstm_squeeze_c"), input_tensor.dtype, + tf_name=f"{params['cleaned_name']}_lstm_cast_c") tf.keras.backend.set_image_data_format("channels_last") hidden_size = params['hidden_size'] @@ -87,9 +89,9 @@ def convert_lstm(node, params, layers, lambda_func, node_name, keras_name): layers[node.output[2]] = c_out else: lstm_tensor = res - lstm_tensor_in_onnx_order = tf_transpose(lstm_tensor, perm=[1, 0, 2], tf_name=f"{params['cleaned_name']}_transpose") + lstm_tensor_in_onnx_order = tf_transpose(lstm_tensor, perm=[1, 0, 2], tf_name=f"{params['cleaned_name']}_lstm_transpose") lstm_tensor_in_onnx_order = tf_expand_dims(lstm_tensor_in_onnx_order, axis=1, - tf_name=f"{params['cleaned_name']}_expand_dims") + tf_name=f"{params['cleaned_name']}_lstm_expand_dims") layers[node_name] = lstm_tensor_in_onnx_order def convert_gru(node, params, layers, lambda_func, node_name, keras_name): diff --git a/onnx2kerastl/normalization_layers.py b/onnx2kerastl/normalization_layers.py index 8cba5c63..417e5a3b 100644 --- a/onnx2kerastl/normalization_layers.py +++ b/onnx2kerastl/normalization_layers.py @@ -49,13 +49,13 @@ def convert_batchnorm(node, params, layers, lambda_func, node_name, keras_name): axis=1, momentum=momentum, epsilon=eps, center=False, scale=False, weights=weights, - name=keras_name + name=f"{params['cleaned_name']}_bn" ) else: bn = keras.layers.BatchNormalization( axis=1, momentum=momentum, epsilon=eps, weights=weights, - name=keras_name + name=f"{params['cleaned_name']}_bn" ) layers[node_name] = bn(input_0) @@ -115,7 +115,7 @@ def convert_dropout(node, params, layers, lambda_func, node_name, keras_name): input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) ratio = params['ratio'] if 'ratio' in params else 0.0 - lambda_layer = keras.layers.Dropout(ratio, name=keras_name) + lambda_layer = keras.layers.Dropout(ratio, name=f"{params['cleaned_name']}_dropout") layers[node_name] = lambda_layer(input_0) @@ -152,6 +152,6 @@ def target_layer(x, depth_radius=params['size'], bias=params['bias'], alpha=para return layer - lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) + lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_lrn") layers[node_name] = lambda_layer(input_0) lambda_func[keras_name] = target_layer diff --git a/onnx2kerastl/operation_layers.py b/onnx2kerastl/operation_layers.py index 179e86d1..8e20e814 100644 --- a/onnx2kerastl/operation_layers.py +++ b/onnx2kerastl/operation_layers.py @@ -82,7 +82,7 @@ def target_layer(x): import keras.backend as K return K.log(x) - lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) + lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_log") layers[node_name] = lambda_layer(input_0) lambda_func[keras_name] = target_layer @@ -124,7 +124,7 @@ def target_layer(x): import keras.backend as K return K.exp(x) - lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) + lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_exp") layers[node_name] = lambda_layer(input_0) lambda_func[keras_name] = target_layer @@ -158,7 +158,7 @@ def target_layer(x, axis=axis, keep_dims=keep_dims): import keras.backend as K return K.sum(x, keepdims=keep_dims, axis=axis) - lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) + lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_reduce_sum") layers[node_name] = lambda_layer(input_0) layers[node_name].set_shape(layers[node_name].shape) lambda_func[keras_name] = target_layer @@ -205,7 +205,7 @@ def target_layer(x, axis=params.get('axes'), keepdims=params['keepdims']): import keras.backend as K return K.max(x, keepdims=(keepdims == 1), axis=axis) - lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) + lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_reduce_max") layers[node_name] = lambda_layer(input_0) layers[node_name].set_shape(layers[node_name].shape) lambda_func[keras_name] = target_layer diff --git a/onnx2kerastl/padding_layers.py b/onnx2kerastl/padding_layers.py index 6e056e8c..778a51c2 100644 --- a/onnx2kerastl/padding_layers.py +++ b/onnx2kerastl/padding_layers.py @@ -46,7 +46,7 @@ def convert_padding(node, params, layers, lambda_func, node_name, keras_name): if pads.shape[0] == 8: padding_layer = keras.layers.ZeroPadding2D( padding=((pads[2], pads[6]), (pads[3], pads[7])), - name=keras_name + name=f"{params['cleaned_name']}_pad_0" ) elif pads.shape[0] == 12: # Check for rank 6 input padding_layer = keras.layers.Lambda( @@ -62,14 +62,14 @@ def convert_padding(node, params, layers, lambda_func, node_name, keras_name): ], mode='CONSTANT' ), - name=keras_name + name=f"{params['cleaned_name']}_pad_1" ) layers[node_name] = padding_layer(input_0) else: logger.warning("Caution - no test yet") padding_layer = keras.layers.ZeroPadding3D( padding=((pads[2], pads[7]), (pads[3], pads[8]), (pads[4], pads[9])), - name=keras_name + name=f"{params['cleaned_name']}_pad_2" ) layers[node_name] = padding_layer(input_0) elif params['mode'] == 'reflect': @@ -82,7 +82,7 @@ def target_layer(x, pads=pads): layer = tf.pad(x, [[0, 0], [0, 0], [pads[2], pads[7]], [pads[3], pads[8]], [pads[4], pads[9]]], 'REFLECT') return layer - lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) + lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_pad_reflect") layers[node_name] = lambda_layer(input_0) lambda_func[keras_name] = target_layer elif params['mode'] == 'edge': @@ -96,7 +96,7 @@ def target_layer(x, pads=pads): layer = tf.pad(x, [[0, 0], [0, 0], [pads[2], pads[7]], [pads[3], pads[8]], [pads[4], pads[9]]], 'SYMMETRIC') return layer - lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) + lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_pad_edge") layers[node_name] = lambda_layer(input_0) lambda_func[keras_name] = target_layer diff --git a/onnx2kerastl/pooling_layers.py b/onnx2kerastl/pooling_layers.py index 9027a27b..4d64887d 100644 --- a/onnx2kerastl/pooling_layers.py +++ b/onnx2kerastl/pooling_layers.py @@ -38,7 +38,7 @@ def convert_maxpool(node, params, layers, lambda_func, node_name, keras_name): logger.debug('Use `same` padding parameters.') else: logger.warning('Unable to use `same` padding. Add ZeroPadding2D layer to fix shapes.') - padding_name = keras_name + '_pad' + padding_name = f"{params['cleaned_name']}_maxpool" + '_pad' if len(kernel_shape) == 2: padding = None @@ -64,7 +64,7 @@ def convert_maxpool(node, params, layers, lambda_func, node_name, keras_name): pool_size=kernel_shape, strides=stride_shape, padding=pad, - name=keras_name, + name=f"{params['cleaned_name']}_maxpool", data_format='channels_first' ) else: @@ -72,7 +72,7 @@ def convert_maxpool(node, params, layers, lambda_func, node_name, keras_name): pool_size=kernel_shape, strides=stride_shape, padding=pad, - name=keras_name, + name=f"{params['cleaned_name']}_maxpool", data_format='channels_first' ) ceil_mode = params.get('ceil_mode', False) @@ -126,7 +126,7 @@ def convert_avgpool(node, params, layers, lambda_func, node_name, keras_name): else: pad = 'valid' logger.warning('Unable to use `same` padding. Add ZeroPadding2D layer to fix shapes.') - padding_name = keras_name + '_pad' + padding_name = f"{params['cleaned_name']}_avgpool" + '_pad' if len(kernel_shape) == 2: padding_layer = keras.layers.ZeroPadding2D( padding=pads[:len(stride_shape)], @@ -144,7 +144,7 @@ def convert_avgpool(node, params, layers, lambda_func, node_name, keras_name): pool_size=kernel_shape, strides=stride_shape, padding=pad, - name=keras_name, + name=f"{params['cleaned_name']}_avgpool", data_format='channels_first' ) elif len(kernel_shape) == 1: @@ -152,7 +152,7 @@ def convert_avgpool(node, params, layers, lambda_func, node_name, keras_name): pool_size=kernel_shape, strides=stride_shape, padding=pad, - name=keras_name, + name=f"{params['cleaned_name']}_avgpool", data_format='channels_first' ) else: @@ -160,7 +160,7 @@ def convert_avgpool(node, params, layers, lambda_func, node_name, keras_name): pool_size=kernel_shape, strides=stride_shape, padding=pad, - name=keras_name, + name=f"{params['cleaned_name']}_avgpool", data_format='channels_first' ) layers[node_name] = pooling(input_0) @@ -180,18 +180,21 @@ def convert_global_avg_pool(node, params, layers, lambda_func, node_name, keras_ input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) tensor_dim = len(input_0.shape) if tensor_dim == 3: - global_pool = keras.layers.GlobalAveragePooling1D(data_format='channels_first', name=keras_name) + global_pool = keras.layers.GlobalAveragePooling1D(data_format='channels_first', + name=f"{params['cleaned_name']}_global_avg_pool_3") elif tensor_dim == 4: - global_pool = keras.layers.GlobalAveragePooling2D(data_format='channels_first', name=keras_name) + global_pool = keras.layers.GlobalAveragePooling2D(data_format='channels_first', + name=f"{params['cleaned_name']}_global_avg_pool_4") elif tensor_dim == 5: - global_pool = keras.layers.GlobalAveragePooling3D(data_format='channels_first', name=keras_name) + global_pool = keras.layers.GlobalAveragePooling3D(data_format='channels_first', + name=f"{params['cleaned_name']}_global_avg_pool_5") else: raise NotImplementedError("Global average pooling of dims < 3 or dims > 5 is not supported") input_0 = global_pool(input_0) new_shape = input_0.shape.as_list() new_shape = new_shape[1:] new_shape.extend([1] * (tensor_dim - 2)) - reshape_layer = keras.layers.Reshape(new_shape, name=f"{params['cleaned_name']}_reshape") + reshape_layer = keras.layers.Reshape(new_shape, name=f"{params['cleaned_name']}_global_avg_pool_reshape") input_0 = reshape_layer(input_0) layers[node_name] = input_0 diff --git a/onnx2kerastl/reshape_layers.py b/onnx2kerastl/reshape_layers.py index 23904d34..c0315b68 100644 --- a/onnx2kerastl/reshape_layers.py +++ b/onnx2kerastl/reshape_layers.py @@ -35,7 +35,7 @@ def convert_transpose(node, params, layers, lambda_func, node_name, keras_name): layers[node_name] = tf_transpose(layers[input_name], perm=params['perm'], tf_name=f"{params['cleaned_name']}_transpose") else: - permute = keras.layers.Permute(params['perm'][1:], name=keras_name) + permute = keras.layers.Permute(params['perm'][1:], name=f"{params['cleaned_name']}_transpose") layers[node_name] = permute(layers[input_name])