Skip to content

Commit

Permalink
replace keras_name with new naming
Browse files Browse the repository at this point in the history
  • Loading branch information
tomkoren21 committed Jul 9, 2024
1 parent 91f5d5b commit 7e674e5
Show file tree
Hide file tree
Showing 8 changed files with 39 additions and 34 deletions.
2 changes: 1 addition & 1 deletion onnx2kerastl/activation_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def convert_elu(node, params, layers, lambda_func, node_name, keras_name):

input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name)
alpha = params.get('alpha', keras.layers.ELU.__init__.__defaults__[0])
elu = keras.layers.ELU(alpha=alpha, name=keras_name)
elu = keras.layers.ELU(alpha=alpha, name=f"{params['cleaned_name']}_elu")
layers[node_name] = elu(input_0)


Expand Down
2 changes: 1 addition & 1 deletion onnx2kerastl/linear_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def convert_gemm(node, params, layers, lambda_func, node_name, keras_name):
if is_numpy(keras_weights[0]):
dense = keras.layers.Dense(
output_channels,
weights=keras_weights, name=keras_name, use_bias=has_bias
weights=keras_weights, name=f"{params['cleaned_name']}_gemm_dense", use_bias=has_bias
)

# The first input - always X
Expand Down
16 changes: 9 additions & 7 deletions onnx2kerastl/ltsm_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,23 +31,25 @@ def convert_lstm(node, params, layers, lambda_func, node_name, keras_name):
if direction != 'forward':
raise UnsupportedLayer(f"LSTM with {direction} direction")
should_return_state = len(node.output) == 3
input_tensor = tf.transpose(ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name[0]), perm=[1, 0, 2])
input_tensor = tf_transpose(ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name[0]),
perm=[1, 0, 2],
tf_name=f"{params['cleaned_name']}_lstm_first_transpose")
weights_w = layers[node.input[1]][0]
weights_r = layers[node.input[2]][0]
weights_b = layers[node.input[3]][0]

initial_h_state = tf_cast(tf_squeeze(ensure_tf_type(layers[node.input[5]]),
axis=0,
tf_name=f"{params['cleaned_name']}_squeeze_h"
tf_name=f"{params['cleaned_name']}_lstm_squeeze_h"
),
input_tensor.dtype,
tf_name=f"{params['cleaned_name']}_cast_h")
tf_name=f"{params['cleaned_name']}_lstm_cast_h")
initial_c_state = tf_cast(
tf_squeeze(
ensure_tf_type(layers[node.input[6]]),
axis=0,
tf_name=f"{params['cleaned_name']}_squeeze_c"), input_tensor.dtype,
tf_name=f"{params['cleaned_name']}_cast_c")
tf_name=f"{params['cleaned_name']}_lstm_squeeze_c"), input_tensor.dtype,
tf_name=f"{params['cleaned_name']}_lstm_cast_c")

tf.keras.backend.set_image_data_format("channels_last")
hidden_size = params['hidden_size']
Expand Down Expand Up @@ -87,9 +89,9 @@ def convert_lstm(node, params, layers, lambda_func, node_name, keras_name):
layers[node.output[2]] = c_out
else:
lstm_tensor = res
lstm_tensor_in_onnx_order = tf_transpose(lstm_tensor, perm=[1, 0, 2], tf_name=f"{params['cleaned_name']}_transpose")
lstm_tensor_in_onnx_order = tf_transpose(lstm_tensor, perm=[1, 0, 2], tf_name=f"{params['cleaned_name']}_lstm_transpose")
lstm_tensor_in_onnx_order = tf_expand_dims(lstm_tensor_in_onnx_order, axis=1,
tf_name=f"{params['cleaned_name']}_expand_dims")
tf_name=f"{params['cleaned_name']}_lstm_expand_dims")
layers[node_name] = lstm_tensor_in_onnx_order

def convert_gru(node, params, layers, lambda_func, node_name, keras_name):
Expand Down
8 changes: 4 additions & 4 deletions onnx2kerastl/normalization_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,13 +49,13 @@ def convert_batchnorm(node, params, layers, lambda_func, node_name, keras_name):
axis=1, momentum=momentum, epsilon=eps,
center=False, scale=False,
weights=weights,
name=keras_name
name=f"{params['cleaned_name']}_bn"
)
else:
bn = keras.layers.BatchNormalization(
axis=1, momentum=momentum, epsilon=eps,
weights=weights,
name=keras_name
name=f"{params['cleaned_name']}_bn"
)

layers[node_name] = bn(input_0)
Expand Down Expand Up @@ -115,7 +115,7 @@ def convert_dropout(node, params, layers, lambda_func, node_name, keras_name):
input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name)

ratio = params['ratio'] if 'ratio' in params else 0.0
lambda_layer = keras.layers.Dropout(ratio, name=keras_name)
lambda_layer = keras.layers.Dropout(ratio, name=f"{params['cleaned_name']}_dropout")
layers[node_name] = lambda_layer(input_0)


Expand Down Expand Up @@ -152,6 +152,6 @@ def target_layer(x, depth_radius=params['size'], bias=params['bias'], alpha=para

return layer

lambda_layer = keras.layers.Lambda(target_layer, name=keras_name)
lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_lrn")
layers[node_name] = lambda_layer(input_0)
lambda_func[keras_name] = target_layer
8 changes: 4 additions & 4 deletions onnx2kerastl/operation_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def target_layer(x):
import keras.backend as K
return K.log(x)

lambda_layer = keras.layers.Lambda(target_layer, name=keras_name)
lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_log")
layers[node_name] = lambda_layer(input_0)
lambda_func[keras_name] = target_layer

Expand Down Expand Up @@ -124,7 +124,7 @@ def target_layer(x):
import keras.backend as K
return K.exp(x)

lambda_layer = keras.layers.Lambda(target_layer, name=keras_name)
lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_exp")
layers[node_name] = lambda_layer(input_0)
lambda_func[keras_name] = target_layer

Expand Down Expand Up @@ -158,7 +158,7 @@ def target_layer(x, axis=axis, keep_dims=keep_dims):
import keras.backend as K
return K.sum(x, keepdims=keep_dims, axis=axis)

lambda_layer = keras.layers.Lambda(target_layer, name=keras_name)
lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_reduce_sum")
layers[node_name] = lambda_layer(input_0)
layers[node_name].set_shape(layers[node_name].shape)
lambda_func[keras_name] = target_layer
Expand Down Expand Up @@ -205,7 +205,7 @@ def target_layer(x, axis=params.get('axes'), keepdims=params['keepdims']):
import keras.backend as K
return K.max(x, keepdims=(keepdims == 1), axis=axis)

lambda_layer = keras.layers.Lambda(target_layer, name=keras_name)
lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_reduce_max")
layers[node_name] = lambda_layer(input_0)
layers[node_name].set_shape(layers[node_name].shape)
lambda_func[keras_name] = target_layer
Expand Down
10 changes: 5 additions & 5 deletions onnx2kerastl/padding_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def convert_padding(node, params, layers, lambda_func, node_name, keras_name):
if pads.shape[0] == 8:
padding_layer = keras.layers.ZeroPadding2D(
padding=((pads[2], pads[6]), (pads[3], pads[7])),
name=keras_name
name=f"{params['cleaned_name']}_pad_0"
)
elif pads.shape[0] == 12: # Check for rank 6 input
padding_layer = keras.layers.Lambda(
Expand All @@ -62,14 +62,14 @@ def convert_padding(node, params, layers, lambda_func, node_name, keras_name):
],
mode='CONSTANT'
),
name=keras_name
name=f"{params['cleaned_name']}_pad_1"
)
layers[node_name] = padding_layer(input_0)
else:
logger.warning("Caution - no test yet")
padding_layer = keras.layers.ZeroPadding3D(
padding=((pads[2], pads[7]), (pads[3], pads[8]), (pads[4], pads[9])),
name=keras_name
name=f"{params['cleaned_name']}_pad_2"
)
layers[node_name] = padding_layer(input_0)
elif params['mode'] == 'reflect':
Expand All @@ -82,7 +82,7 @@ def target_layer(x, pads=pads):
layer = tf.pad(x, [[0, 0], [0, 0], [pads[2], pads[7]], [pads[3], pads[8]], [pads[4], pads[9]]], 'REFLECT')
return layer

lambda_layer = keras.layers.Lambda(target_layer, name=keras_name)
lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_pad_reflect")
layers[node_name] = lambda_layer(input_0)
lambda_func[keras_name] = target_layer
elif params['mode'] == 'edge':
Expand All @@ -96,7 +96,7 @@ def target_layer(x, pads=pads):
layer = tf.pad(x, [[0, 0], [0, 0], [pads[2], pads[7]], [pads[3], pads[8]], [pads[4], pads[9]]], 'SYMMETRIC')
return layer

lambda_layer = keras.layers.Lambda(target_layer, name=keras_name)
lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_pad_edge")
layers[node_name] = lambda_layer(input_0)
lambda_func[keras_name] = target_layer

Expand Down
25 changes: 14 additions & 11 deletions onnx2kerastl/pooling_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def convert_maxpool(node, params, layers, lambda_func, node_name, keras_name):
logger.debug('Use `same` padding parameters.')
else:
logger.warning('Unable to use `same` padding. Add ZeroPadding2D layer to fix shapes.')
padding_name = keras_name + '_pad'
padding_name = f"{params['cleaned_name']}_maxpool" + '_pad'
if len(kernel_shape) == 2:
padding = None

Expand All @@ -64,15 +64,15 @@ def convert_maxpool(node, params, layers, lambda_func, node_name, keras_name):
pool_size=kernel_shape,
strides=stride_shape,
padding=pad,
name=keras_name,
name=f"{params['cleaned_name']}_maxpool",
data_format='channels_first'
)
else:
pooling = keras.layers.MaxPooling3D(
pool_size=kernel_shape,
strides=stride_shape,
padding=pad,
name=keras_name,
name=f"{params['cleaned_name']}_maxpool",
data_format='channels_first'
)
ceil_mode = params.get('ceil_mode', False)
Expand Down Expand Up @@ -126,7 +126,7 @@ def convert_avgpool(node, params, layers, lambda_func, node_name, keras_name):
else:
pad = 'valid'
logger.warning('Unable to use `same` padding. Add ZeroPadding2D layer to fix shapes.')
padding_name = keras_name + '_pad'
padding_name = f"{params['cleaned_name']}_avgpool" + '_pad'
if len(kernel_shape) == 2:
padding_layer = keras.layers.ZeroPadding2D(
padding=pads[:len(stride_shape)],
Expand All @@ -144,23 +144,23 @@ def convert_avgpool(node, params, layers, lambda_func, node_name, keras_name):
pool_size=kernel_shape,
strides=stride_shape,
padding=pad,
name=keras_name,
name=f"{params['cleaned_name']}_avgpool",
data_format='channels_first'
)
elif len(kernel_shape) == 1:
pooling = keras.layers.AveragePooling1D(
pool_size=kernel_shape,
strides=stride_shape,
padding=pad,
name=keras_name,
name=f"{params['cleaned_name']}_avgpool",
data_format='channels_first'
)
else:
pooling = keras.layers.AveragePooling3D(
pool_size=kernel_shape,
strides=stride_shape,
padding=pad,
name=keras_name,
name=f"{params['cleaned_name']}_avgpool",
data_format='channels_first'
)
layers[node_name] = pooling(input_0)
Expand All @@ -180,18 +180,21 @@ def convert_global_avg_pool(node, params, layers, lambda_func, node_name, keras_
input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name)
tensor_dim = len(input_0.shape)
if tensor_dim == 3:
global_pool = keras.layers.GlobalAveragePooling1D(data_format='channels_first', name=keras_name)
global_pool = keras.layers.GlobalAveragePooling1D(data_format='channels_first',
name=f"{params['cleaned_name']}_global_avg_pool_3")
elif tensor_dim == 4:
global_pool = keras.layers.GlobalAveragePooling2D(data_format='channels_first', name=keras_name)
global_pool = keras.layers.GlobalAveragePooling2D(data_format='channels_first',
name=f"{params['cleaned_name']}_global_avg_pool_4")
elif tensor_dim == 5:
global_pool = keras.layers.GlobalAveragePooling3D(data_format='channels_first', name=keras_name)
global_pool = keras.layers.GlobalAveragePooling3D(data_format='channels_first',
name=f"{params['cleaned_name']}_global_avg_pool_5")
else:
raise NotImplementedError("Global average pooling of dims < 3 or dims > 5 is not supported")
input_0 = global_pool(input_0)
new_shape = input_0.shape.as_list()
new_shape = new_shape[1:]
new_shape.extend([1] * (tensor_dim - 2))
reshape_layer = keras.layers.Reshape(new_shape, name=f"{params['cleaned_name']}_reshape")
reshape_layer = keras.layers.Reshape(new_shape, name=f"{params['cleaned_name']}_global_avg_pool_reshape")
input_0 = reshape_layer(input_0)

layers[node_name] = input_0
Expand Down
2 changes: 1 addition & 1 deletion onnx2kerastl/reshape_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def convert_transpose(node, params, layers, lambda_func, node_name, keras_name):
layers[node_name] = tf_transpose(layers[input_name], perm=params['perm'],
tf_name=f"{params['cleaned_name']}_transpose")
else:
permute = keras.layers.Permute(params['perm'][1:], name=keras_name)
permute = keras.layers.Permute(params['perm'][1:], name=f"{params['cleaned_name']}_transpose")
layers[node_name] = permute(layers[input_name])


Expand Down

0 comments on commit 7e674e5

Please sign in to comment.