Skip to content

Commit

Permalink
replace keras_name with new naming
Browse files Browse the repository at this point in the history
  • Loading branch information
tomkoren21 committed Jul 16, 2024
1 parent c249a7f commit 84401fc
Show file tree
Hide file tree
Showing 10 changed files with 50 additions and 43 deletions.
12 changes: 6 additions & 6 deletions onnx2kerastl/activation_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def convert_relu(node, params, layers, lambda_func, node_name, keras_name):

input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name)

relu = keras.layers.Activation('relu', name=keras_name)
relu = keras.layers.Activation('relu', name=f"{params['cleaned_name']}_reli")
layers[node_name] = relu(input_0)


Expand All @@ -43,7 +43,7 @@ def convert_elu(node, params, layers, lambda_func, node_name, keras_name):

input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name)
alpha = params.get('alpha', keras.layers.ELU.__init__.__defaults__[0])
elu = keras.layers.ELU(alpha=alpha, name=keras_name)
elu = keras.layers.ELU(alpha=alpha, name=f"{params['cleaned_name']}_elu")
layers[node_name] = elu(input_0)


Expand All @@ -64,7 +64,7 @@ def convert_lrelu(node, params, layers, lambda_func, node_name, keras_name):
input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name)

alpha = params.get('alpha', keras.layers.LeakyReLU.__init__.__defaults__[0])
leakyrelu = keras.layers.LeakyReLU(alpha=alpha, name=keras_name)
leakyrelu = keras.layers.LeakyReLU(alpha=alpha, name=f"{params['cleaned_name']}_leakyrelu")
layers[node_name] = leakyrelu(input_0)


Expand All @@ -84,7 +84,7 @@ def convert_sigmoid(node, params, layers, lambda_func, node_name, keras_name):

input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name)

sigmoid = keras.layers.Activation('sigmoid', name=keras_name)
sigmoid = keras.layers.Activation('sigmoid', name=f"{params['cleaned_name']}_sigmoid")
layers[node_name] = sigmoid(input_0)


Expand All @@ -104,7 +104,7 @@ def convert_tanh(node, params, layers, lambda_func, node_name, keras_name):

input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name)

tanh = keras.layers.Activation('tanh', name=keras_name)
tanh = keras.layers.Activation('tanh', name=f"{params['cleaned_name']}_tanh")
layers[node_name] = tanh(input_0)


Expand All @@ -124,7 +124,7 @@ def convert_selu(node, params, layers, lambda_func, node_name, keras_name):

input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name)

selu = keras.layers.Activation('selu', name=keras_name)
selu = keras.layers.Activation('selu', name=f"{params['cleaned_name']}_selu")
layers[node_name] = selu(input_0)


Expand Down
2 changes: 1 addition & 1 deletion onnx2kerastl/elementwise_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ def convert_elementwise_add(node, params, layers, lambda_func, node_name, keras_
to_add = tf_repeat(tf_expand_dims(input_1, axis=-1, tf_name=f"{params['cleaned_name']}_expand"),
input_0.shape[-1], axis=-1, tf_name=f"{params['cleaned_name']}_repeat")

layers[node_name] = keras.layers.Add(name=keras_name)([input_0, to_add])
layers[node_name] = keras.layers.Add(name=f"{params['cleaned_name']}_add")([input_0, to_add])
else:
raise ValueError('Operands are different.')
except (IndexError, ValueError):
Expand Down
2 changes: 1 addition & 1 deletion onnx2kerastl/linear_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def convert_gemm(node, params, layers, lambda_func, node_name, keras_name):
if is_numpy(keras_weights[0]):
dense = keras.layers.Dense(
output_channels,
weights=keras_weights, name=keras_name, use_bias=has_bias
weights=keras_weights, name=f"{params['cleaned_name']}_gemm_dense", use_bias=has_bias
)

# The first input - always X
Expand Down
16 changes: 9 additions & 7 deletions onnx2kerastl/ltsm_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,23 +31,25 @@ def convert_lstm(node, params, layers, lambda_func, node_name, keras_name):
if direction != 'forward':
raise UnsupportedLayer(f"LSTM with {direction} direction")
should_return_state = len(node.output) == 3
input_tensor = tf.transpose(ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name[0]), perm=[1, 0, 2])
input_tensor = tf_transpose(ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name[0]),
perm=[1, 0, 2],
tf_name=f"{params['cleaned_name']}_lstm_first_transpose")
weights_w = layers[node.input[1]][0]
weights_r = layers[node.input[2]][0]
weights_b = layers[node.input[3]][0]

initial_h_state = tf_cast(tf_squeeze(ensure_tf_type(layers[node.input[5]]),
axis=0,
tf_name=f"{params['cleaned_name']}_squeeze_h"
tf_name=f"{params['cleaned_name']}_lstm_squeeze_h"
),
input_tensor.dtype,
tf_name=f"{params['cleaned_name']}_cast_h")
tf_name=f"{params['cleaned_name']}_lstm_cast_h")
initial_c_state = tf_cast(
tf_squeeze(
ensure_tf_type(layers[node.input[6]]),
axis=0,
tf_name=f"{params['cleaned_name']}_squeeze_c"), input_tensor.dtype,
tf_name=f"{params['cleaned_name']}_cast_c")
tf_name=f"{params['cleaned_name']}_lstm_squeeze_c"), input_tensor.dtype,
tf_name=f"{params['cleaned_name']}_lstm_cast_c")

tf.keras.backend.set_image_data_format("channels_last")
hidden_size = params['hidden_size']
Expand Down Expand Up @@ -87,9 +89,9 @@ def convert_lstm(node, params, layers, lambda_func, node_name, keras_name):
layers[node.output[2]] = c_out
else:
lstm_tensor = res
lstm_tensor_in_onnx_order = tf_transpose(lstm_tensor, perm=[1, 0, 2], tf_name=f"{params['cleaned_name']}_transpose")
lstm_tensor_in_onnx_order = tf_transpose(lstm_tensor, perm=[1, 0, 2], tf_name=f"{params['cleaned_name']}_lstm_transpose")
lstm_tensor_in_onnx_order = tf_expand_dims(lstm_tensor_in_onnx_order, axis=1,
tf_name=f"{params['cleaned_name']}_expand_dims")
tf_name=f"{params['cleaned_name']}_lstm_expand_dims")
layers[node_name] = lstm_tensor_in_onnx_order

def convert_gru(node, params, layers, lambda_func, node_name, keras_name):
Expand Down
8 changes: 4 additions & 4 deletions onnx2kerastl/normalization_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,13 +49,13 @@ def convert_batchnorm(node, params, layers, lambda_func, node_name, keras_name):
axis=1, momentum=momentum, epsilon=eps,
center=False, scale=False,
weights=weights,
name=keras_name
name=f"{params['cleaned_name']}_bn"
)
else:
bn = keras.layers.BatchNormalization(
axis=1, momentum=momentum, epsilon=eps,
weights=weights,
name=keras_name
name=f"{params['cleaned_name']}_bn"
)

layers[node_name] = bn(input_0)
Expand Down Expand Up @@ -115,7 +115,7 @@ def convert_dropout(node, params, layers, lambda_func, node_name, keras_name):
input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name)

ratio = params['ratio'] if 'ratio' in params else 0.0
lambda_layer = keras.layers.Dropout(ratio, name=keras_name)
lambda_layer = keras.layers.Dropout(ratio, name=f"{params['cleaned_name']}_dropout")
layers[node_name] = lambda_layer(input_0)


Expand Down Expand Up @@ -152,6 +152,6 @@ def target_layer(x, depth_radius=params['size'], bias=params['bias'], alpha=para

return layer

lambda_layer = keras.layers.Lambda(target_layer, name=keras_name)
lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_lrn")
layers[node_name] = lambda_layer(input_0)
lambda_func[keras_name] = target_layer
8 changes: 4 additions & 4 deletions onnx2kerastl/operation_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def target_layer(x):
import keras.backend as K
return K.log(x)

lambda_layer = keras.layers.Lambda(target_layer, name=keras_name)
lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_log")
layers[node_name] = lambda_layer(input_0)
lambda_func[keras_name] = target_layer

Expand Down Expand Up @@ -124,7 +124,7 @@ def target_layer(x):
import keras.backend as K
return K.exp(x)

lambda_layer = keras.layers.Lambda(target_layer, name=keras_name)
lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_exp")
layers[node_name] = lambda_layer(input_0)
lambda_func[keras_name] = target_layer

Expand Down Expand Up @@ -158,7 +158,7 @@ def target_layer(x, axis=axis, keep_dims=keep_dims):
import keras.backend as K
return K.sum(x, keepdims=keep_dims, axis=axis)

lambda_layer = keras.layers.Lambda(target_layer, name=keras_name)
lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_reduce_sum")
layers[node_name] = lambda_layer(input_0)
layers[node_name].set_shape(layers[node_name].shape)
lambda_func[keras_name] = target_layer
Expand Down Expand Up @@ -205,7 +205,7 @@ def target_layer(x, axis=params.get('axes'), keepdims=params['keepdims']):
import keras.backend as K
return K.max(x, keepdims=(keepdims == 1), axis=axis)

lambda_layer = keras.layers.Lambda(target_layer, name=keras_name)
lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_reduce_max")
layers[node_name] = lambda_layer(input_0)
layers[node_name].set_shape(layers[node_name].shape)
lambda_func[keras_name] = target_layer
Expand Down
10 changes: 5 additions & 5 deletions onnx2kerastl/padding_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def convert_padding(node, params, layers, lambda_func, node_name, keras_name):
if pads.shape[0] == 8:
padding_layer = keras.layers.ZeroPadding2D(
padding=((pads[2], pads[6]), (pads[3], pads[7])),
name=keras_name
name=f"{params['cleaned_name']}_pad_0"
)
elif pads.shape[0] == 12: # Check for rank 6 input
padding_layer = keras.layers.Lambda(
Expand All @@ -62,14 +62,14 @@ def convert_padding(node, params, layers, lambda_func, node_name, keras_name):
],
mode='CONSTANT'
),
name=keras_name
name=f"{params['cleaned_name']}_pad_1"
)
layers[node_name] = padding_layer(input_0)
else:
logger.warning("Caution - no test yet")
padding_layer = keras.layers.ZeroPadding3D(
padding=((pads[2], pads[7]), (pads[3], pads[8]), (pads[4], pads[9])),
name=keras_name
name=f"{params['cleaned_name']}_pad_2"
)
layers[node_name] = padding_layer(input_0)
elif params['mode'] == 'reflect':
Expand All @@ -82,7 +82,7 @@ def target_layer(x, pads=pads):
layer = tf.pad(x, [[0, 0], [0, 0], [pads[2], pads[7]], [pads[3], pads[8]], [pads[4], pads[9]]], 'REFLECT')
return layer

lambda_layer = keras.layers.Lambda(target_layer, name=keras_name)
lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_pad_reflect")
layers[node_name] = lambda_layer(input_0)
lambda_func[keras_name] = target_layer
elif params['mode'] == 'edge':
Expand All @@ -96,7 +96,7 @@ def target_layer(x, pads=pads):
layer = tf.pad(x, [[0, 0], [0, 0], [pads[2], pads[7]], [pads[3], pads[8]], [pads[4], pads[9]]], 'SYMMETRIC')
return layer

lambda_layer = keras.layers.Lambda(target_layer, name=keras_name)
lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_pad_edge")
layers[node_name] = lambda_layer(input_0)
lambda_func[keras_name] = target_layer

Expand Down
25 changes: 14 additions & 11 deletions onnx2kerastl/pooling_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def convert_maxpool(node, params, layers, lambda_func, node_name, keras_name):
logger.debug('Use `same` padding parameters.')
else:
logger.warning('Unable to use `same` padding. Add ZeroPadding2D layer to fix shapes.')
padding_name = keras_name + '_pad'
padding_name = f"{params['cleaned_name']}_maxpool" + '_pad'
if len(kernel_shape) == 2:
padding = None

Expand All @@ -65,15 +65,15 @@ def convert_maxpool(node, params, layers, lambda_func, node_name, keras_name):
pool_size=kernel_shape,
strides=stride_shape,
padding=pad,
name=keras_name,
name=f"{params['cleaned_name']}_maxpool",
data_format='channels_first'
)
else:
pooling = keras.layers.MaxPooling3D(
pool_size=kernel_shape,
strides=stride_shape,
padding=pad,
name=keras_name,
name=f"{params['cleaned_name']}_maxpool",
data_format='channels_first'
)
ceil_mode = params.get('ceil_mode', False)
Expand Down Expand Up @@ -127,7 +127,7 @@ def convert_avgpool(node, params, layers, lambda_func, node_name, keras_name):
else:
pad = 'valid'
logger.warning('Unable to use `same` padding. Add ZeroPadding2D layer to fix shapes.')
padding_name = keras_name + '_pad'
padding_name = f"{params['cleaned_name']}_avgpool" + '_pad'
if len(kernel_shape) == 2:
padding_layer = keras.layers.ZeroPadding2D(
padding=pads[:len(stride_shape)],
Expand All @@ -145,23 +145,23 @@ def convert_avgpool(node, params, layers, lambda_func, node_name, keras_name):
pool_size=kernel_shape,
strides=stride_shape,
padding=pad,
name=keras_name,
name=f"{params['cleaned_name']}_avgpool",
data_format='channels_first'
)
elif len(kernel_shape) == 1:
pooling = keras.layers.AveragePooling1D(
pool_size=kernel_shape,
strides=stride_shape,
padding=pad,
name=keras_name,
name=f"{params['cleaned_name']}_avgpool",
data_format='channels_first'
)
else:
pooling = keras.layers.AveragePooling3D(
pool_size=kernel_shape,
strides=stride_shape,
padding=pad,
name=keras_name,
name=f"{params['cleaned_name']}_avgpool",
data_format='channels_first'
)
layers[node_name] = pooling(input_0)
Expand All @@ -181,18 +181,21 @@ def convert_global_avg_pool(node, params, layers, lambda_func, node_name, keras_
input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name)
tensor_dim = len(input_0.shape)
if tensor_dim == 3:
global_pool = keras.layers.GlobalAveragePooling1D(data_format='channels_first', name=keras_name)
global_pool = keras.layers.GlobalAveragePooling1D(data_format='channels_first',
name=f"{params['cleaned_name']}_global_avg_pool_3")
elif tensor_dim == 4:
global_pool = keras.layers.GlobalAveragePooling2D(data_format='channels_first', name=keras_name)
global_pool = keras.layers.GlobalAveragePooling2D(data_format='channels_first',
name=f"{params['cleaned_name']}_global_avg_pool_4")
elif tensor_dim == 5:
global_pool = keras.layers.GlobalAveragePooling3D(data_format='channels_first', name=keras_name)
global_pool = keras.layers.GlobalAveragePooling3D(data_format='channels_first',
name=f"{params['cleaned_name']}_global_avg_pool_5")
else:
raise NotImplementedError("Global average pooling of dims < 3 or dims > 5 is not supported")
input_0 = global_pool(input_0)
new_shape = input_0.shape.as_list()
new_shape = new_shape[1:]
new_shape.extend([1] * (tensor_dim - 2))
reshape_layer = keras.layers.Reshape(new_shape, name=f"{params['cleaned_name']}_reshape")
reshape_layer = keras.layers.Reshape(new_shape, name=f"{params['cleaned_name']}_global_avg_pool_reshape")
input_0 = reshape_layer(input_0)

layers[node_name] = input_0
Expand Down
7 changes: 4 additions & 3 deletions onnx2kerastl/reshape_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def convert_transpose(node, params, layers, lambda_func, node_name, keras_name):
layers[node_name] = tf_transpose(layers[input_name], perm=params['perm'],
tf_name=f"{params['cleaned_name']}_transpose")
else:
permute = keras.layers.Permute(params['perm'][1:], name=keras_name)
permute = keras.layers.Permute(params['perm'][1:], name=f"{params['cleaned_name']}_transpose")
layers[node_name] = permute(layers[input_name])


Expand Down Expand Up @@ -228,7 +228,7 @@ def convert_concat(node, params, layers, lambda_func, node_name, keras_name):
layer_input = unsqueeze_tensors_of_rank_one(layer_input, axis=params['axis'], name=params['cleaned_name'])
layers[node_name] = keras.layers.concatenate(inputs=layer_input,
axis=params['axis'],
name=keras_name)
name=f"{params['cleaned_name']}_concat_2")
else:
layers[node_name] = layer_input[0]

Expand Down Expand Up @@ -290,7 +290,8 @@ def target_layer(x):
x = tf.transpose(x, [0, 3, 1, 2])
return x

lambda_layer = keras.layers.Lambda(target_layer, name="%s_CHW" % keras_name)
lambda_layer = keras.layers.Lambda(target_layer,
name="%s_CHW" % f"{params['cleaned_name']}_reshape_lambda")
layers[node_name] = lambda_layer(input_0)
lambda_func[keras_name] = target_layer
else:
Expand Down
3 changes: 2 additions & 1 deletion onnx2kerastl/upsampling_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ def convert_upsample(node, params, layers, lambda_func, node_name, keras_name):
logger.error(f'Cannot convert upsampling. interpolation mode: {interpolation_mode} is not supported')
raise AssertionError(f'Cannot convert upsampling. interpolation mode: {interpolation_mode} is not supported')

upsampling = keras.layers.UpSampling2D(size=scale, name=keras_name, interpolation=interpolation)
upsampling = keras.layers.UpSampling2D(size=scale, name=f"{params['cleaned_name']}_upsample_2d",
interpolation=interpolation)

layers[node_name] = upsampling(layers[node.input[0]])

0 comments on commit 84401fc

Please sign in to comment.