Skip to content

Commit

Permalink
Added tests for relu, conv, quantize. Address comments.
Browse files Browse the repository at this point in the history
  • Loading branch information
anijain2305 committed Jun 25, 2020
1 parent 63f4a60 commit 05fad24
Show file tree
Hide file tree
Showing 2 changed files with 232 additions and 53 deletions.
68 changes: 51 additions & 17 deletions python/tvm/relay/frontend/tflite.py
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,7 @@ def get_tensors(self, tensors_idx_list):
scale = tflite_scale
# Ensure that all zero points are zeros
zero_point = tflite_zero_point
if not all(x == 0 for x in zero_point):
if not np.all(zero_point == 0):
raise tvm.error.OpAttributeInvalid(\
"TFLite per-axis quantization restricts all zero points to be"
+ " 0, but a non-zero value is observed")
Expand All @@ -276,8 +276,9 @@ def get_tensors(self, tensors_idx_list):
zero_point = int(tflite_zero_point[0])

else:
raise NotImplementedError("Quantized type {} not supported"
.format(type(tflite_scale)))
raise NotImplementedError(\
"Quantized type {} (scale) and {} (zero point) not supported"
.format(type(tflite_scale), type(tflite_zero_point)))
elif tflite_scale == 0 and tflite_zero_point == 0:
# Handle corner case for ops like quantized reshape whose second operand (shape)
# has zero scale and zero zero point. This is not used.
Expand Down Expand Up @@ -309,8 +310,8 @@ def get_tensor_value(self, tensor_wrapper):
shape = tensor_wrapper.tensor.ShapeAsNumpy()

# When TFLite buffer is of size 1 (scalar), then TFLite tensor shape is set to 0.
# Therefore, we set the shape to 1 for numpy reshape to work.
Set shape to 1 if the data is a scalar type
# Therefore, we set the shape to 1 for numpy reshape to work. Set shape to 1 if the data is
# a scalar type
if data.size == 1 and isinstance(shape, int) and shape == 0:
shape = (1,)

Expand Down Expand Up @@ -699,12 +700,43 @@ def convert_shape(self, op):

def convert_relu(self, op):
"""Convert TFLite ReLU"""
try:
from tflite.ActivationFunctionType import ActivationFunctionType
except ImportError:
raise ImportError("The tflite package must be installed")

input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"

input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
out = _op.nn.relu(in_expr)

output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]

if input_tensor.qnn_params:
# Quantize a float value to an quantized integer value
scale_val = get_scalar_from_constant(input_tensor.qnn_params['scale'])
zero_point_val = get_scalar_from_constant(input_tensor.qnn_params['zero_point'])

output_tensor_type_str = self.get_tensor_type_str(output_tensor.tensor.Type())
out = self.convert_qnn_fused_activation_function(\
expr=in_expr,
fused_activation_fn=ActivationFunctionType.RELU,
scale=scale_val,
zero_point=zero_point_val,
dtype=output_tensor_type_str)
else:
out = _op.clip(in_expr, a_min=0, a_max=6)

if output_tensor.qnn_params:
output_tensor_type_str = self.get_tensor_type_str(output_tensor.tensor.Type())
out = _qnn.op.requantize(out,
input_scale=input_tensor.qnn_params['scale'],
input_zero_point=input_tensor.qnn_params['zero_point'],
output_scale=output_tensor.qnn_params['scale'],
output_zero_point=output_tensor.qnn_params['zero_point'],
out_dtype=output_tensor_type_str)

return out

Expand Down Expand Up @@ -740,6 +772,11 @@ def _hard_swish(data):

def convert_relu6(self, op):
"""Convert TFLite ReLU6"""
try:
from tflite.ActivationFunctionType import ActivationFunctionType
except ImportError:
raise ImportError("The tflite package must be installed")

input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
Expand All @@ -753,17 +790,14 @@ def convert_relu6(self, op):
# Quantize a float value to an quantized integer value
scale_val = get_scalar_from_constant(input_tensor.qnn_params['scale'])
zero_point_val = get_scalar_from_constant(input_tensor.qnn_params['zero_point'])
quantize = lambda x: float(int(round(x / scale_val)) + zero_point_val)

# Get min/max of the input dtype. This will be used to ensure that
# clip a_min/a_max are not beyond the dtype range.
input_tensor_type_str = self.get_tensor_type_str(input_tensor.tensor.Type())
qmin = float(tvm.tir.op.min_value(input_tensor_type_str).value)
qmax = float(tvm.tir.op.max_value(input_tensor_type_str).value)

out = _op.clip(in_expr,
a_min=max(qmin, quantize(0)),
a_max=min(qmax, quantize(6.0)))
output_tensor_type_str = self.get_tensor_type_str(output_tensor.tensor.Type())
out = self.convert_qnn_fused_activation_function(\
expr=in_expr,
fused_activation_fn=ActivationFunctionType.RELU6,
scale=scale_val,
zero_point=zero_point_val,
dtype=output_tensor_type_str)
else:
out = _op.clip(in_expr, a_min=0, a_max=6)

Expand Down
Loading

0 comments on commit 05fad24

Please sign in to comment.