Skip to content

Commit

Permalink
[TFLite] Implemented PADV2 Operator for TFLite and added support for …
Browse files Browse the repository at this point in the history
…constant values in PAD. (#6167)
  • Loading branch information
jainris authored Aug 4, 2020
1 parent d9c4f82 commit 90bde33
Show file tree
Hide file tree
Showing 2 changed files with 122 additions and 7 deletions.
38 changes: 31 additions & 7 deletions python/tvm/relay/frontend/tflite.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,7 @@ def __init__(self, model, subgraph, exp_tab):
'NOT_EQUAL': self.convert_not_equal,
'PACK': self.convert_pack,
'PAD': self.convert_pad,
'PADV2': self.convert_pad,
'POW': self.convert_pow,
'PRELU': self.convert_prelu,
'RANGE': self.convert_range,
Expand Down Expand Up @@ -2306,35 +2307,58 @@ def convert_pool2d(self, op, pool_type):
return out

def convert_pad(self, op):
"""Convert TFLite PAD"""
"""Convert TFLite PAD/PADV2 \
TFLite treats PAD and PADV2 operators identically"""

input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"

# TFLite PAD only support CONSTANT mode and does not support constant_values parameter.
# tensor
# TFLite PAD/PADV2 only supports CONSTANT mode
assert (len(input_tensors) == 2 or len(input_tensors) == 3), \
"input tensor's length should be 2 for PAD and 3 for PADV2"

if len(input_tensors) == 3:
assert input_tensors[0].tensor.Type() == input_tensors[2].tensor.Type(), \
"constant_values tensor must be of same type as input tensor"

input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)

# paddings
pad_list = self.get_tensor_value(input_tensors[1])

# convert list of lists to tuple of tuples
paddings = tuple(tuple(l) for l in pad_list)

# Set the pad value
# Set the pad value, by default 0, unless constant_values parameter is provided
pad_value = 0

if input_tensor.qnn_params:
# Check that input and output tensor have same qnn params.
output_tensors = self.get_output_tensors(op)
output_tensor = output_tensors[0]
assert self.has_same_qnn_params(input_tensor, output_tensor), \
"TFLite reshape requires input and output scale and zero points to be equal"
"TFLite PADV2 requires input and output scale and zero points to be equal"

# The pad value for quantized pad is the input zero point.
# The pad value for quantized pad is the input zero point by default.
pad_value = float(input_tensor.qnn_params['zero_point'].data.asnumpy())

if len(input_tensors) == 3:
pad_value = self.get_tensor_value(input_tensors[2])
if isinstance(pad_value, np.ndarray):
pad_value = pad_value.tolist()
if isinstance(pad_value, list):
assert len(pad_value) == 1, "Only one constant value is expected."
pad_value = pad_value[0]
if input_tensor.qnn_params:
# Check that input tensor and constant_values have same qnn params.
assert self.has_same_qnn_params(input_tensor, input_tensors[2]), \
"TFLite PADV2 requires input and constant_values tensors' \
scale and zero points to be equal"

out = _op.nn.pad(in_expr, pad_width=paddings, pad_value=pad_value)
return out


def convert_floor_div(self, op):
"""Convert TFLite FLOOR_DIV"""
if self.is_quantized(op):
Expand Down
91 changes: 91 additions & 0 deletions tests/python/frontend/tflite/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -1924,6 +1924,97 @@ def test_forward_pad():
np.array([[1, 1], [2, 2]], dtype=np.int32)], quantized=True)


#######################################################################
# PADV2
# -----

def _test_padv2(data, mode="CONSTANT", quantized=False):
""" One iteration of PADV2 """

assert (len(data) == 2 or len(data) == 3)

with_constant_values = len(data) == 3

# Test with tensor and constant
with tf.Graph().as_default():
in_data = [array_ops.placeholder(shape=data[0].shape, dtype='float32', name='in')]

if quantized:
# fake_quant will keep the tensors in float32 until the conversion in the session
input_range = {'inq_0': (-100, 100)}
inq_data = [tf.quantization.fake_quant_with_min_max_args(in_data[0],
min=-100,
max=100,
name="inq_0")]
if with_constant_values:
in_constant_values = constant_op.constant(data[2], shape=data[2].shape, dtype='float32', name='in_constant_values')
inq_constant_values = tf.quantization.fake_quant_with_min_max_args(in_constant_values,
min=-100,
max=100,
name='inq_constant_values')
out = array_ops.pad_v2(inq_data[0],
ops.convert_to_tensor(data[1], dtype=data[1].dtype),
constant_values=inq_constant_values,
mode=mode)
out = tf.quantization.fake_quant_with_min_max_args(out, min=-100, max=100, name="out")
else:
out = array_ops.pad_v2(inq_data[0], ops.convert_to_tensor(data[1], dtype=data[1].dtype), mode=mode)
compare_tflite_with_tvm([data[0]], ['inq_0:0'], inq_data, [out], quantized=True, input_range=input_range)
else:
if with_constant_values:
out = array_ops.pad_v2(in_data[0],
ops.convert_to_tensor(data[1], dtype=data[1].dtype),
constant_values= ops.convert_to_tensor(data[2], dtype=data[2].dtype),
mode=mode)
else:
out = array_ops.pad_v2(in_data[0], ops.convert_to_tensor(data[1], dtype=data[1].dtype), mode=mode)
compare_tflite_with_tvm([data[0]], ['in:0'], in_data, [out])


def test_forward_padv2():
""" PADV2 """
# Tests without Constant_values
_test_padv2([np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 1, 3)),
np.array([[1, 1], [2, 2], [1, 1], [2, 2]], dtype=np.int32)])
_test_padv2([np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 3)),
np.array([[2, 2], [1, 1], [1, 1]], dtype=np.int32)])
_test_padv2([np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int32)])
_test_padv2([np.arange(1.0, 4.0, dtype=np.float32).reshape((1, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int32)])
_test_padv2([np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int32)], mode="REFLECT")
_test_padv2([np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int32)], mode="SYMMETRIC")
_test_padv2([np.arange(0, 256, dtype=np.uint8).reshape((1, 256)),
np.array([[1, 1], [2, 2]], dtype=np.int32)], quantized=True)

# Tests with Constant_values
_test_padv2([np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 1, 3)),
np.array([[1, 1], [2, 2], [1, 1], [2, 2]], dtype=np.int32),
np.array([2], dtype=np.float32)])
_test_padv2([np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 3)),
np.array([[2, 2], [1, 1], [1, 1]], dtype=np.int32),
np.array([1], dtype=np.float32)])
_test_padv2([np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int32),
np.array([-1], dtype=np.float32)])
_test_padv2([np.arange(1.0, 4.0, dtype=np.float32).reshape((1, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int32),
np.array([2], dtype=np.float32)])
_test_padv2([np.arange(0, 256, dtype=np.uint8).reshape((1, 256)),
np.array([[1, 1], [2, 2]], dtype=np.int32),
np.array([2], dtype=np.uint8)], quantized=True)

# Constant Values input can be scalar
_test_padv2([np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 1, 3)),
np.array([[1, 1], [2, 2], [1, 1], [2, 2]], dtype=np.int32),
np.float32(2)])
_test_padv2([np.arange(0, 256, dtype=np.uint8).reshape((1, 256)),
np.array([[1, 1], [2, 2]], dtype=np.int32),
np.uint8(10)], quantized=True)


#######################################################################
# Pack
# ----
Expand Down

0 comments on commit 90bde33

Please sign in to comment.