From aafb334d775fe025b884501e9c37963f9ec09e03 Mon Sep 17 00:00:00 2001 From: Animesh Jain Date: Wed, 5 Feb 2020 19:24:16 +0000 Subject: [PATCH] Support quantized Pad op. --- python/tvm/relay/frontend/tflite.py | 15 ++++++++++++-- tests/python/frontend/tflite/test_forward.py | 21 ++++++++++++++++---- 2 files changed, 30 insertions(+), 6 deletions(-) diff --git a/python/tvm/relay/frontend/tflite.py b/python/tvm/relay/frontend/tflite.py index a45b1d84f3869..2080bdf081e1e 100644 --- a/python/tvm/relay/frontend/tflite.py +++ b/python/tvm/relay/frontend/tflite.py @@ -1426,8 +1426,19 @@ def convert_pad(self, op): # convert list of lists to tuple of tuples paddings = tuple(tuple(l) for l in pad_list) - # Use default pad_value 0 because TFLite does not support constant_values parameter - out = _op.nn.pad(in_expr, paddings) + # Set the pad value + pad_value = 0 + if input_tensor.qnn_params: + # Check that input and output tensor have same qnn params. + output_tensors = self.get_output_tensors(op) + output_tensor = output_tensors[0] + assert self.has_same_qnn_params(input_tensor, output_tensor), \ + "TFLite reshape requires input and output scale and zero points to be equal" + + # The pad value for quantized pad is the input zero point. + pad_value = float(input_tensor.qnn_params['zero_point'].data.asnumpy()) + + out = _op.nn.pad(in_expr, pad_width=paddings, pad_value=pad_value) return out def convert_pack(self, op): diff --git a/tests/python/frontend/tflite/test_forward.py b/tests/python/frontend/tflite/test_forward.py index eaaa035cc6bbc..4620e738bf035 100644 --- a/tests/python/frontend/tflite/test_forward.py +++ b/tests/python/frontend/tflite/test_forward.py @@ -1117,16 +1117,27 @@ def test_forward_squeeze(): # Pad # --- -def _test_pad(data): +def _test_pad(data, quantized=False): """ One iteration of PAD """ assert len(data) == 2 # Test with tensor and constant with tf.Graph().as_default(): - in_data = [array_ops.placeholder(shape=data[0].shape, dtype=data[0].dtype, name='in')] - out = array_ops.pad(in_data[0], ops.convert_to_tensor(data[1], dtype=data[1].dtype)) - compare_tflite_with_tvm([data[0]], ['in:0'], in_data, [out]) + in_data = [array_ops.placeholder(shape=data[0].shape, dtype='float32', name='in')] + + if quantized: + min_value, max_value = -100, 100 + # fake_quant will keep the tensors in float32 until the conversion in the session + inq_data = [tf.quantization.fake_quant_with_min_max_args(in_data[0], + min=-100, + max=100, + name="inq_0")] + out = array_ops.pad(inq_data[0], ops.convert_to_tensor(data[1], dtype=data[1].dtype)) + compare_tflite_with_tvm([data[0]], ['inq_0:0'], inq_data, [out], quantized=True) + else: + out = array_ops.pad(in_data[0], ops.convert_to_tensor(data[1], dtype=data[1].dtype)) + compare_tflite_with_tvm([data[0]], ['in:0'], in_data, [out]) def test_forward_pad(): @@ -1139,6 +1150,8 @@ def test_forward_pad(): np.array([[1, 1], [2, 2]], dtype=np.int32)]) _test_pad([np.arange(1.0, 4.0, dtype=np.float32).reshape((1, 3)), np.array([[1, 1], [2, 2]], dtype=np.int32)]) + _test_pad([np.arange(0, 256, dtype=np.uint8).reshape((1, 256)), + np.array([[1, 1], [2, 2]], dtype=np.int32)], quantized=True) #######################################################################