Skip to content

Commit

Permalink
Fix floating value quantization for RELU6 and RELU1
Browse files Browse the repository at this point in the history
  • Loading branch information
inadob committed Jun 16, 2020
1 parent 7f0599c commit 9345405
Showing 1 changed file with 6 additions and 4 deletions.
10 changes: 6 additions & 4 deletions python/tvm/relay/frontend/tflite.py
Original file line number Diff line number Diff line change
Expand Up @@ -702,8 +702,9 @@ def convert_relu6(self, op):

if input_tensor.qnn_params:
# Quantize a float value to an quantized integer value
quantize = lambda x: float(int(round(x / input_tensor.qnn_params['scale'])) + \
input_tensor.qnn_params['zero_point'])
scale_val = get_scalar_from_constant(input_tensor.qnn_params['scale'])
zero_point_val = get_scalar_from_constant(input_tensor.qnn_params['zero_point'])
quantize = lambda x: float(int(round(x / scale_val)) + zero_point_val)

# Get min/max of the input dtype. This will be used to ensure that
# clip a_min/a_max are not beyond the dtype range.
Expand Down Expand Up @@ -772,8 +773,9 @@ def convert_relu_n1_to_1(self, op):

if input_tensor.qnn_params:
# Quantize a float value to an quantized integer value
quantize = lambda x: float(int(round(x / input_tensor.qnn_params['scale'])) + \
input_tensor.qnn_params['zero_point'])
scale_val = get_scalar_from_constant(input_tensor.qnn_params['scale'])
zero_point_val = get_scalar_from_constant(input_tensor.qnn_params['zero_point'])
quantize = lambda x: float(int(round(x / scale_val)) + zero_point_val)

# Get min/max of the input dtype. This will be used to ensure that
# clip a_min/a_max are not beyond the dtype range.
Expand Down

0 comments on commit 9345405

Please sign in to comment.