Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Frontend][TFlite] Add parser support for relu6, leaky_relu, relu_n1_to_1, log_softmax #4805

Merged
merged 3 commits into from
Jun 16, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
134 changes: 134 additions & 0 deletions python/tvm/relay/frontend/tflite.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,10 +94,12 @@ def __init__(self, model, subgraph, exp_tab):
'HARD_SWISH': self.convert_hard_swish,
'L2_NORMALIZATION': self.convert_l2_normalization,
'L2_POOL_2D': self.convert_l2_pool2d,
'LEAKY_RELU': self.convert_leaky_relu,
'LESS_EQUAL': self.convert_less_equal,
'LESS': self.convert_less,
'LOCAL_RESPONSE_NORMALIZATION': self.convert_lrn,
'LOG': self.convert_log,
'LOG_SOFTMAX': self.convert_log_softmax,
'LOGICAL_AND': self.convert_logical_and,
'LOGICAL_NOT': self.convert_logical_not,
'LOGICAL_OR': self.convert_logical_or,
Expand All @@ -121,6 +123,8 @@ def __init__(self, model, subgraph, exp_tab):
'REDUCE_MIN': self.convert_reduce_min,
'REDUCE_PROD': self.convert_reduce_prod,
'RELU':self.convert_relu,
'RELU6': self.convert_relu6,
'RELU_N1_TO_1': self.convert_relu_n1_to_1,
'RESHAPE': self.convert_reshape,
'RESIZE_BILINEAR': self.convert_resize_bilinear,
'RESIZE_NEAREST_NEIGHBOR': self.convert_resize_nearest_neighbor,
Expand Down Expand Up @@ -685,6 +689,136 @@ def _hard_swish(data):

return out

def convert_relu6(self, op):
"""Convert TFLite ReLU6"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)

output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]

if input_tensor.qnn_params:
# Quantize a float value to an quantized integer value
scale_val = get_scalar_from_constant(input_tensor.qnn_params['scale'])
zero_point_val = get_scalar_from_constant(input_tensor.qnn_params['zero_point'])
quantize = lambda x: float(int(round(x / scale_val)) + zero_point_val)

# Get min/max of the input dtype. This will be used to ensure that
# clip a_min/a_max are not beyond the dtype range.
input_tensor_type_str = self.get_tensor_type_str(input_tensor.tensor.Type())
qmin = float(tvm.tir.op.min_value(input_tensor_type_str).value)
qmax = float(tvm.tir.op.max_value(input_tensor_type_str).value)

out = _op.clip(in_expr,
a_min=max(qmin, quantize(0)),
a_max=min(qmax, quantize(6.0)))
else:
out = _op.clip(in_expr, a_min=0, a_max=6)

if output_tensor.qnn_params:
output_tensor_type_str = self.get_tensor_type_str(output_tensor.tensor.Type())
out = _qnn.op.requantize(out,
input_scale=input_tensor.qnn_params['scale'],
input_zero_point=input_tensor.qnn_params['zero_point'],
output_scale=output_tensor.qnn_params['scale'],
output_zero_point=output_tensor.qnn_params['zero_point'],
out_dtype=output_tensor_type_str)

return out

def convert_leaky_relu(self, op):
"""Convert TFLite LEAKY_RELU"""
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not important, I think it should be """Convert TFLite Leaky_ReLU""" to align with """One iteration of Leaky_ReLU""".

try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.LeakyReluOptions import LeakyReluOptions
except ImportError:
raise ImportError("The tflite package must be installed")

input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)

assert op.BuiltinOptionsType() == BuiltinOptions.LeakyReluOptions
op_options = op.BuiltinOptions()
leaky_relu_options = LeakyReluOptions()
leaky_relu_options.Init(op_options.Bytes, op_options.Pos)
alpha_tensor = leaky_relu_options.Alpha()

output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]

if input_tensor.qnn_params:
in_expr = self.dequantize(in_expr, input_tensor)
out = _op.nn.leaky_relu(in_expr, alpha_tensor)
if output_tensor.qnn_params:
out = self.quantize(out, output_tensor)

return out

def convert_relu_n1_to_1(self, op):
"""Convert TFLite RELU_N1_TO_1"""
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not important, I think it should be """Convert TFLite ReLU_n1_to_1""" to align with """One iteration of ReLU_n1_to_1""".

input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)

output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]

if input_tensor.qnn_params:
# Quantize a float value to an quantized integer value
scale_val = get_scalar_from_constant(input_tensor.qnn_params['scale'])
zero_point_val = get_scalar_from_constant(input_tensor.qnn_params['zero_point'])
quantize = lambda x: float(int(round(x / scale_val)) + zero_point_val)

# Get min/max of the input dtype. This will be used to ensure that
# clip a_min/a_max are not beyond the dtype range.
input_tensor_type_str = self.get_tensor_type_str(input_tensor.tensor.Type())
qmin = float(tvm.tir.op.min_value(input_tensor_type_str).value)
qmax = float(tvm.tir.op.max_value(input_tensor_type_str).value)

out = _op.clip(in_expr,
a_min=max(qmin, quantize(-1.0)),
a_max=min(qmax, quantize(1.0)))
else:
out = _op.clip(in_expr, a_min=-1, a_max=1)

if output_tensor.qnn_params:
output_tensor_type_str = self.get_tensor_type_str(output_tensor.tensor.Type())
out = _qnn.op.requantize(out,
input_scale=input_tensor.qnn_params['scale'],
input_zero_point=input_tensor.qnn_params['zero_point'],
output_scale=output_tensor.qnn_params['scale'],
output_zero_point=output_tensor.qnn_params['zero_point'],
out_dtype=output_tensor_type_str)

return out

def convert_log_softmax(self, op):
"""Convert TFLite LOG_SOFTMAX"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)

output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]

if input_tensor.qnn_params:
in_expr = self.dequantize(in_expr, input_tensor)
out = _op.nn.log_softmax(in_expr)
if output_tensor.qnn_params:
out = self.quantize(out, output_tensor)

return out

def convert_concatenation(self, op):
"""Convert TFLite concatenation"""
try:
Expand Down
109 changes: 109 additions & 0 deletions tests/python/frontend/tflite/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -1900,6 +1900,32 @@ def test_forward_softmax():
""" Softmax """
_test_softmax(np.arange(6.0, dtype=np.float32).reshape((1, 6)))

######################################################################
# Log_softmax
# -----------

def _test_log_softmax(data, quantized=False):
""" One iteration of log_softmax """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype='float32', name='in_0')

if quantized:
inq_data = tf.quantization.fake_quant_with_min_max_args(in_data, min=-10, max=10, name="inq_0")
input_range = {'inq_0': (-10, 10)}
# tflite log_softmax supports only the case when axis is not specified
out = nn_ops.log_softmax(inq_data)
out = tf.quantization.fake_quant_with_min_max_args(out, min=-20, max=0, name="out")
compare_tflite_with_tvm(data, 'inq_0:0', [inq_data], [out], quantized=True, input_range=input_range)
else:
out = nn_ops.log_softmax(in_data)
compare_tflite_with_tvm(data, 'in_0:0', [in_data], [out])


def test_forward_log_softmax():
""" Log_softmax """
_test_log_softmax(np.random.uniform(-10, 10, size=(3, 6)).astype(np.float32))
_test_log_softmax(np.random.uniform(0, 255, (3, 6)).astype(np.uint8), quantized=True)

#######################################################################
# Tanh
# ----
Expand Down Expand Up @@ -1930,6 +1956,85 @@ def test_forward_relu():
""" ReLU """
_test_relu(np.arange(6.0, dtype=np.float32).reshape((1, 6)))

#######################################################################
# ReLU6
# -----

def _test_relu6(data, quantized=False):
""" One iteration of ReLU6 """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype='float32', name='in_0')

if quantized:
inq_data = tf.quantization.fake_quant_with_min_max_args(in_data, min=-10, max=10, name="inq_0")
input_range = {'inq_0': (-10, 10)}
out = nn_ops.relu6(inq_data)
out = tf.quantization.fake_quant_with_min_max_args(out, min=0, max=6, name="out")
compare_tflite_with_tvm(data, 'inq_0:0', [inq_data], [out], quantized=True, input_range=input_range)
else:
out = nn_ops.relu6(in_data)
compare_tflite_with_tvm(data, 'in_0:0', [in_data], [out])

def test_forward_relu6():
""" ReLU6 """
_test_relu6(np.random.uniform(-10, 10, size=(3, 6)).astype(np.float32))
_test_relu6(np.random.uniform(0, 255, (3, 6)).astype(np.uint8), quantized=True)

#######################################################################
# Leaky_ReLU
# ----------

def _test_leaky_relu(data, alpha, quantized=False):
""" One iteration of Leaky_ReLU """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype='float32', name='in_0')

if quantized:
inq_data = tf.quantization.fake_quant_with_min_max_args(in_data, min=-3, max=2, name="inq_0")
input_range = {'inq_0': (-3, 2)}
out = nn_ops.leaky_relu(inq_data, alpha)
out = tf.quantization.fake_quant_with_min_max_args(out, min=-3, max=2, name="out")
compare_tflite_with_tvm(data, 'inq_0:0', [inq_data], [out], quantized=True, input_range=input_range)
else:
out = nn_ops.leaky_relu(in_data, alpha)
compare_tflite_with_tvm(data, 'in_0:0', [in_data], [out])

def test_forward_leaky_relu():
""" Leaky_ReLU """
_test_leaky_relu(np.random.uniform(-5, 5, (1, 6)).astype(np.float32), alpha=0.2)
if package_version.parse(tf.VERSION) >= package_version.parse('1.14.0'):
_test_leaky_relu(np.random.uniform(0, 255, (2, 3)).astype(np.uint8), alpha=0.3, quantized=True)

#######################################################################
# ReLU_n1_to_1
# ------------

def _test_relu_n1_to_1(data, quantized=False):
""" One iteration of ReLU_n1_to_1 """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype='float32', name='in_0')

if quantized:
inq_data = tf.quantization.fake_quant_with_min_max_args(in_data, min=-3, max=3, name="inq_0")
input_range = {'inq_0': (-3, 3)}
# There is no such tf operation. The specific pattern will be replaced into RELU_N1_TO_1 by tflite
out = math_ops.maximum(-1.0, math_ops.minimum(inq_data, 1.0))
out = tf.quantization.fake_quant_with_min_max_args(out, min=-1, max=1, name="out")
compare_tflite_with_tvm(data, 'inq_0:0', [inq_data], [out], quantized=True, input_range=input_range)
else:
out = math_ops.maximum(-1.0, math_ops.minimum(in_data, 1.0))
compare_tflite_with_tvm(data, 'in_0:0', [in_data], [out])

def test_forward_relu_n1_to_1():
""" ReLU_n1_to_1 """
_test_relu_n1_to_1(np.random.uniform(-3, 3, (1, 6)).astype(np.float32))
if package_version.parse(tf.VERSION) >= package_version.parse('1.14.0'):
_test_relu_n1_to_1(np.random.uniform(0, 255, (3, 6)).astype(np.uint8), quantized=True)

#######################################################################
# PReLU
# -----

def _test_prelu(data, alpha):
""" One iteration of PReLU """
with tf.Graph().as_default():
Expand Down Expand Up @@ -2511,6 +2616,10 @@ def test_forward_mediapipe_hand_landmark():
test_forward_softmax()
test_forward_tanh()
test_forward_relu()
test_forward_relu6()
test_forward_leaky_relu()
test_forward_relu_n1_to_1()
test_forward_log_softmax()
test_forward_prelu()
test_forward_fully_connected()
test_forward_l2_normalization()
Expand Down