Skip to content

Commit

Permalink
[KERAS] conv3d frontend operator support (#5080)
Browse files Browse the repository at this point in the history
* [KERAS]Conv3d support added

* Keras conv3d testcase added
  • Loading branch information
siju-samuel authored Mar 21, 2020
1 parent 36a83c7 commit 050f2bd
Show file tree
Hide file tree
Showing 2 changed files with 106 additions and 6 deletions.
89 changes: 83 additions & 6 deletions python/tvm/relay/frontend/keras.py
Original file line number Diff line number Diff line change
Expand Up @@ -320,6 +320,75 @@ def _convert_convolution(inexpr, keras_layer, etab):
out = _convert_activation(out, act_type, etab)
return out

def _convert_convolution3d(inexpr, keras_layer, etab):
_check_data_format(keras_layer)
weightList = keras_layer.get_weights()
weight = weightList[0]

if etab.data_layout == 'NDHWC':
kernel_layout = 'DHWIO'
else:
kernel_layout = 'OIDHW'
msg = 'Kernel layout with {} is not supported for operator Convolution3D ' \
'in frontend Keras.'
raise tvm.error.OpAttributeUnImplemented(msg.format(etab.data_layout))

dilation_rate = keras_layer.dilation_rate
if isinstance(dilation_rate, (list, tuple)):
dilation = [dilation_rate[0], dilation_rate[1], dilation_rate[2]]
else:
dilation = [dilation_rate, dilation_rate, dilation_rate]

kernel_d1 = weight.shape[0]
kernel_d2 = weight.shape[1]
kernel_d3 = weight.shape[2]
# in_channels = weight.shape[3]
n_filters = weight.shape[4]

dilated_kernel_d1 = (kernel_d1 - 1) * dilation[0] + 1
dilated_kernel_d2 = (kernel_d2 - 1) * dilation[1] + 1
dilated_kernel_d3 = (kernel_d3 - 1) * dilation[2] + 1
stride_d1, stride_d2, stride_d3 = keras_layer.strides
params = {'weight': etab.new_const(weight),
'kernel_size': [kernel_d1, kernel_d2, kernel_d3],
'strides': [stride_d1, stride_d2, stride_d3],
'dilation': dilation,
'padding': [0, 0, 0],
'data_layout': etab.data_layout,
'kernel_layout': kernel_layout}
params['channels'] = n_filters

if keras_layer.padding == 'valid':
pass
# calculate the padding values
elif keras_layer.padding == 'same':
in_d1 = keras_layer.input_shape[1]
in_d2 = keras_layer.input_shape[2]
in_d3 = keras_layer.input_shape[3]
pad_d1 = _get_pad_pair(in_d1, dilated_kernel_d1, stride_d1)
pad_d2 = _get_pad_pair(in_d2, dilated_kernel_d2, stride_d2)
pad_d3 = _get_pad_pair(in_d3, dilated_kernel_d3, stride_d3)
params['padding'] = [pad_d1[0], pad_d2[0], pad_d3[0], pad_d1[1], pad_d2[1], pad_d3[1]]
else:
msg = 'Padding with {} is not supported for operator Convolution ' \
'in frontend Keras.'
raise tvm.error.OpAttributeUnImplemented(msg.format(keras_layer.padding))
out = _op.nn.conv3d(data=inexpr, **params)

channel_axis = -1 if etab.data_layout == "NDHWC" else 1
if keras_layer.use_bias:
bias = etab.new_const(weightList[1])
out = _op.nn.bias_add(out, bias, channel_axis)

# defuse activation
if sys.version_info.major < 3:
act_type = keras_layer.activation.func_name
else:
act_type = keras_layer.activation.__name__
if act_type != 'linear':
out = _convert_activation(out, act_type, etab)

return out

def _convert_separable_convolution(inexpr, keras_layer, etab):
_check_data_format(keras_layer)
Expand Down Expand Up @@ -743,19 +812,27 @@ def _default_skip(inexpr, keras_layer, _): # pylint: disable=unused-argument
# 'GlobalMaxPooling1D' : _convert_pooling,
# 'Cropping1D' : _convert_cropping,
# 'UpSampling1D' : _convert_upsample,
# 'UpSampling3D' : _convert_upsample,
# 'Conv1D' : _convert_convolution1d,

'Conv3D' : _convert_convolution3d,
# 'Conv3DTranspose' : _convert_convolution3d,
# 'SeparableConv3D' : _convert_convolution3d,
# 'MaxPooling3D' : _convert_pooling3d,
# 'AveragePooling3D' : _convert_pooling3d,
# 'GlobalMaxPooling3D' : _convert_pooling3d,
# 'GlobalAveragePooling3D' : _convert_pooling3d,
# 'UpSampling3D' : _convert_upsample3d,

'SimpleRNN' : _convert_simple_rnn,
'LSTM' : _convert_lstm,
'GRU' : _convert_gru,
# 'Bidirectional' : _convert_bidirectional,
# 'TimeDistributed' : _default_skip,

'Average' : _convert_merge,
'Maximum' : _convert_merge,
'Dot' : _convert_merge,
'Permute' : _convert_permute,
'Average' : _convert_merge,
'Maximum' : _convert_merge,
'Dot' : _convert_merge,
'Permute' : _convert_permute,
# 'Embedding' : _convert_embedding,
# 'RepeatVector' : _convert_repeat_vector,

Expand Down Expand Up @@ -867,7 +944,7 @@ def _convert_input_layer(keras_layer):

etab = ExprTable()
# Set global data format.
assert layout in ['NCHW', 'NHWC'], "Layout must be one of 'NCHW' or NHWC"
assert layout in ['NCHW', 'NHWC', 'NDHWC'], "Layout must be one of 'NCHW', NHWC or NDHWC"
etab.data_layout = layout
for keras_layer in model.layers:
if isinstance(keras_layer, input_layer_class):
Expand Down
23 changes: 23 additions & 0 deletions tests/python/frontend/keras/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -398,6 +398,28 @@ def test_forward_mobilenet(self, keras, layout='NCHW'):
input_shape=(224, 224, 3), classes=1000)
verify_keras_frontend(keras_model, layout=layout)

def test_forward_conv3d(self, keras):
data = keras.layers.Input(shape=(32, 32, 32, 3))
conv_funcs = [keras.layers.Conv3D(filters=10,
kernel_size=(3, 3, 3),
strides=(2, 2, 2),
padding='same'),
keras.layers.Conv3D(filters=10,
kernel_size=(3, 3, 3),
dilation_rate=(2, 2, 2),
padding='same'),
keras.layers.Conv3D(filters=1,
kernel_size=(3, 3, 3),
padding='valid',
use_bias=False),
keras.layers.Conv3D(filters=10,
kernel_size=(2, 2, 2),
padding='valid'),
]
for conv_func in conv_funcs:
x = conv_func(data)
keras_model = keras.models.Model(data, x)
verify_keras_frontend(keras_model, layout='NDHWC')

if __name__ == '__main__':
for k in [keras, tf_keras]:
Expand Down Expand Up @@ -426,3 +448,4 @@ def test_forward_mobilenet(self, keras, layout='NCHW'):
sut.test_forward_resnet50(keras=k, layout='NHWC')
sut.test_forward_mobilenet(keras=k)
sut.test_forward_mobilenet(keras=k, layout='NHWC')
sut.test_forward_conv3d(keras=k)

0 comments on commit 050f2bd

Please sign in to comment.