From a0e7be2d872f8607375f413d60c9fcdc3c8dfe25 Mon Sep 17 00:00:00 2001 From: Animesh Jain Date: Thu, 5 Sep 2019 18:45:57 +0000 Subject: [PATCH] [QNN][TFLite] Parsing TFLite quantized models. --- python/tvm/relay/frontend/tflite.py | 159 +++++++++++++++++-- tests/python/frontend/tflite/test_forward.py | 22 +++ 2 files changed, 164 insertions(+), 17 deletions(-) diff --git a/python/tvm/relay/frontend/tflite.py b/python/tvm/relay/frontend/tflite.py index e0f9775c5895..85367ca58aa9 100644 --- a/python/tvm/relay/frontend/tflite.py +++ b/python/tvm/relay/frontend/tflite.py @@ -24,6 +24,7 @@ from .. import expr as _expr from .. import module as _module from .. import op as _op +from .. import qnn as _qnn from ... import nd as _nd from .common import ExprTable @@ -31,10 +32,11 @@ class TensorWrapper(object): """Tensor wrapper for TFLite Tensor""" - def __init__(self, tensor_idx, tensor, buffer): + def __init__(self, tensor_idx, tensor, buffer, qnn_params=None): self.tensor_idx = tensor_idx self.tensor = tensor self.buffer = buffer + self.qnn_params = qnn_params class OperatorConverter(object): """Operator Converted for converting TFLite ops to Relay ops""" @@ -147,13 +149,23 @@ def get_tensors(self, tensors_idx_list): return_list = list() for tensor_idx in tensors_idx_list: if tensor_idx < 0: - return_list.append(TensorWrapper(tensor_idx, 0, 0)) + return_list.append(TensorWrapper(tensor_idx, 0, 0, None)) continue tensor = self.subgraph.Tensors(tensor_idx) buffer_idx = tensor.Buffer() buffer = self.model.Buffers(buffer_idx) - return_list.append(TensorWrapper(tensor_idx, tensor, buffer)) + + # Check if the tensors are quantized. Parse if yes. + qnn_params = None + tflite_qnn_params = tensor.Quantization() + if tflite_qnn_params is not None: + qnn_params = dict() + qnn_params['scale'] = float(tflite_qnn_params.ScaleAsNumpy()) + qnn_params['zero_point'] = int(tflite_qnn_params.ZeroPointAsNumpy()) + if qnn_params['scale'] == 0 and qnn_params['zero_point'] == 0: + qnn_params = None + return_list.append(TensorWrapper(tensor_idx, tensor, buffer, qnn_params)) return return_list def get_tensor_value(self, tensor_wrapper): @@ -193,6 +205,10 @@ def get_tensor_type_str(self, tensor_type): raise NotImplementedError("Tensor type {} is currently not supported" .format(str(tensor_type))) + def has_same_qnn_params(self, tensor_a, tensor_b): + return tensor_a.qnn_params['scale'] == tensor_b.qnn_params['scale'] and \ + tensor_a.qnn_params['zero_point'] == tensor_b.qnn_params['zero_point'] + def convert_conv2d(self, op): """Convert TFLite conv2d""" return self.convert_conv(op, "conv2d") @@ -231,8 +247,15 @@ def convert_reshape(self, op): target_shape = reshape_options.NewShapeAsNumpy() in_expr = self.get_expr(input_tensor_idx) - out = _op.reshape(in_expr, newshape=tuple(target_shape)) + # If the tensors are quantized, ensure that input/output qnn params are same. + if input_tensor.qnn_params is not None: + output_tensors = self.get_output_tensors(op) + assert len(output_tensors) == 1, "output tensors should be 1" + output_tensor = output_tensors[0] + assert self.has_same_qnn_params(input_tensor, output_tensor), \ + "qnn.op.reshape requires input and output qnn params to be same" + out = _op.reshape(in_expr, newshape=tuple(target_shape)) return out def _convert_resize(self, method, op): @@ -319,8 +342,14 @@ def convert_softmax(self, op): input_tensor_idx = input_tensor.tensor_idx params = {'axis': 1} # 1 is channel in_expr = self.get_expr(input_tensor_idx) - out = _op.nn.softmax(in_expr, **params) + # Softmax does not go well with Int8. So Dequantize back to FP32. + if input_tensor.qnn_params is not None: + in_expr = _qnn.op.dequantize(data=in_expr, + input_scale=input_tensor.qnn_params['scale'], + input_zero_point=input_tensor.qnn_params['zero_point']) + + out = _op.nn.softmax(in_expr, **params) return out def convert_concatenation(self, op): @@ -340,6 +369,7 @@ def convert_concatenation(self, op): output_tensors = self.get_output_tensors(op) assert len(output_tensors) == 1, "output tensors should be 1" + output_tensor = output_tensors[0] assert op.BuiltinOptionsType() == BuiltinOptions.ConcatenationOptions op_options = op.BuiltinOptions() @@ -348,12 +378,26 @@ def convert_concatenation(self, op): concatenation_axis = concatenation_options.Axis() fused_activation_fn = concatenation_options.FusedActivationFunction() - # with axis in N H W C - out = _op.concatenate(in_exprs, axis=concatenation_axis) + if input_tensors[0].qnn_params is None: + out = _op.concatenate(in_exprs, axis=concatenation_axis) + else: + input_scales = [input_tensor.qnn_params['scale'] for input_tensor in input_tensors] + input_zero_points = [input_tensor.qnn_params['zero_point'] for input_tensor in input_tensors] + out = _qnn.op.concatenate(in_exprs, + input_scales=input_scales, + input_zero_points=input_zero_points, + output_scale=output_tensor.qnn_params['scale'], + output_zero_point=output_tensor.qnn_params['zero_point'], + axis=concatenation_axis) # if we have activation fn if fused_activation_fn != ActivationFunctionType.NONE: - out = self.convert_fused_activation_function(out, fused_activation_fn) + if output_tensor.qnn_params is None: + out = self.convert_fused_activation_function(out, fused_activation_fn) + else: + raise tvm.error.OpNotImplemented( + 'Operator {} with fused activation is not supported yet.' + .format('qnn.op.concatenate')) return out def _convert_elemwise(self, relay_op, op): @@ -496,6 +540,12 @@ def convert_fully_connected(self, op): input_tensor_idx = input_tensor.tensor_idx weight_tensor = input_tensors[1] + output_tensors = self.get_output_tensors(op) + assert len(output_tensors) == 1, "output tensors length should be 1" + output_tensor = output_tensors[0] + output_tensor_type = output_tensor.tensor.Type() + output_tensor_type_str = self.get_tensor_type_str(output_tensor_type) + input_tensor_shape = input_tensor.tensor.ShapeAsNumpy() weight_tensor_shape = weight_tensor.tensor.ShapeAsNumpy() @@ -523,7 +573,15 @@ def convert_fully_connected(self, op): weight_value = self.get_tensor_value(weight_tensor) weight_expr = self.exp_tab.new_const(weight_value, dtype=weight_tensor_type_str) - out = _op.nn.dense(in_expr, weight_expr) + # Check if the inputs are quantized. If yes, call dense. + if input_tensor.qnn_params is None: + out = _op.nn.dense(in_expr, weight_expr) + else: + out = _qnn.op.quantized_dense(in_expr, weight_expr, + input_zero_point=input_tensor.qnn_params['zero_point'], + kernel_zero_point=weight_tensor.qnn_params['zero_point'], + out_dtype='int32') + assert bias_tensor_type is TensorType.INT32 # if we have bias if len(input_tensors) == 3: @@ -538,7 +596,22 @@ def convert_fully_connected(self, op): # If we have fused activations if fused_activation_fn != ActivationFunctionType.NONE: - out = self.convert_fused_activation_function(out, fused_activation_fn) + if output_tensor.qnn_params is None: + out = self.convert_fused_activation_function(out, fused_activation_fn) + else: + raise tvm.error.OpNotImplemented( + 'Operator {} with fused activation is not supported yet.' + .format('qnn.op.dense')) + + # Finally if the dense is quantized. Add a requantize at the end. + if output_tensor.qnn_params is not None: + input_scale = input_tensor.qnn_params['scale'] * weight_tensor.qnn_params['scale'] + out = _qnn.op.requantize(out, + input_scale=input_scale, + input_zero_point=input_zero_point, + output_scale=output_tensor.qnn_params['scale'], + output_zero_point=output_tensor.qnn_params['zero_point'], + out_dtype=output_tensor_type_str) return out @@ -610,6 +683,12 @@ def convert_conv(self, op, conv_type): input_tensor_idx = input_tensor.tensor_idx weight_tensor = input_tensors[1] + output_tensors = self.get_output_tensors(op) + assert len(output_tensors) == 1, "output tensors should be 1" + output_tensor = output_tensors[0] + output_tensor_type = output_tensor.tensor.Type() + output_tensor_type_str = self.get_tensor_type_str(output_tensor_type) + is_depthwise_conv = False if conv_type == 'conv2d': assert op.BuiltinOptionsType() == BuiltinOptions.Conv2DOptions @@ -672,7 +751,6 @@ def convert_conv(self, op, conv_type): # TFLite is OC/M KH KW IC, we require KH KW IC OC/M # M means multiplier in depthwise convolution weight_value = weight_value.transpose((1, 2, 3, 0)) - weight_expr = self.exp_tab.new_const(weight_value, dtype=weight_tensor_type_str) if padding == Padding.VALID: @@ -688,7 +766,14 @@ def convert_conv(self, op, conv_type): raise tvm.error.OpAttributeUnImplemented( 'Padding format {} is not supported for operator Conv.'.format(padding)) - out = _op.nn.conv2d(data=in_expr, weight=weight_expr, **params) + if input_tensor.qnn_params is None: + out = _op.nn.conv2d(in_expr, weight_expr, **params) + else: + qnn_conv2d_params = dict(params) + qnn_conv2d_params['input_zero_point'] = input_tensor.qnn_params['zero_point'] + qnn_conv2d_params['kernel_zero_point'] = weight_tensor.qnn_params['zero_point'] + qnn_conv2d_params['out_dtype'] = 'int32' + out = _qnn.op.conv2d(in_expr, weight_expr, **qnn_conv2d_params) # if we have bias if len(input_tensors) == 3: @@ -704,7 +789,23 @@ def convert_conv(self, op, conv_type): # If we have fused activations if fused_activation_fn != ActivationFunctionType.NONE: - out = self.convert_fused_activation_function(out, fused_activation_fn) + if output_tensor.qnn_params is None: + out = self.convert_fused_activation_function(out, fused_activation_fn) + else: + raise tvm.error.OpNotImplemented( + 'Operator {} with fused activation is not supported yet.' + .format('qnn.op.conv2d')) + + # Finally if the conv is quantized. Add a requantize at the end. + if output_tensor.qnn_params is not None: + input_scale = input_tensor.qnn_params['scale'] * weight_tensor.qnn_params['scale'] + input_zero_point = 0 + out = _qnn.op.requantize(out, + input_scale=input_scale, + input_zero_point=input_zero_point, + output_scale=output_tensor.qnn_params['scale'], + output_zero_point=output_tensor.qnn_params['zero_point'], + out_dtype=output_tensor_type_str) return out @@ -808,6 +909,14 @@ def convert_pool2d(self, op, pool_type): assert len(input_tensors) == 1, "input tensors length should be 1" input_tensor = input_tensors[0] input_tensor_idx = input_tensor.tensor_idx + input_tensor_type = input_tensor.tensor.Type() + input_tensor_type_str = self.get_tensor_type_str(input_tensor_type) + + output_tensors = self.get_output_tensors(op) + assert len(output_tensors) == 1, "output tensors should be 1" + output_tensor = output_tensors[0] + output_tensor_type = output_tensor.tensor.Type() + output_tensor_type_str = self.get_tensor_type_str(output_tensor_type) assert op.BuiltinOptionsType() == BuiltinOptions.Pool2DOptions op_options = op.BuiltinOptions() @@ -839,8 +948,18 @@ def convert_pool2d(self, op, pool_type): 'Padding format {} for operator Pool2D is not supported.'.format(padding)) if pool_type == "average": - out = _op.nn.avg_pool2d(in_expr, **params) + if input_tensor.qnn_params is None: + out = _op.nn.avg_pool2d(in_expr, **params) + else: + assert self.has_same_qnn_params(input_tensor, output_tensor), \ + "qnn.op.avg_pool2d requires input and output qnn params to be same" + out = _op.cast(in_expr, dtype="int32") + out = _op.nn.avg_pool2d(out, **params) + out = _op.cast(out, dtype=output_tensor_type_str) elif pool_type == "max": + if input_tensor.qnn_params is not None: + assert self.has_same_qnn_params(input_tensor, output_tensor), \ + "qnn.op.max_pool2d requires input and output qnn params to be same" out = _op.nn.max_pool2d(in_expr, **params) else: raise tvm.error.OpNotImplemented( @@ -848,8 +967,12 @@ def convert_pool2d(self, op, pool_type): # If we have fused activations if fused_activation_fn != ActivationFunctionType.NONE: - out = self.convert_fused_activation_function(out, fused_activation_fn) - + if input_tensor.qnn_params is None: + out = self.convert_fused_activation_function(out, fused_activation_fn) + else: + raise tvm.error.OpNotImplemented( + 'Operator {} with fused activation is not supported yet.' + .format('qnn.op.pool2d')) return out def convert_pad(self, op): @@ -1030,4 +1153,6 @@ def from_tflite(model, shape_dict, dtype_dict): outputs = [exp_tab.get_expr(get_tensor_name(subgraph, i)) for i in model_outputs] outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs) func = _expr.Function(analysis.free_vars(outputs), outputs) - return _module.Module.from_expr(func), params + mod = _module.Module.from_expr(func) + mod = tvm.relay.qnn.transform.CanonicalizeOps()(mod) + return mod, params diff --git a/tests/python/frontend/tflite/test_forward.py b/tests/python/frontend/tflite/test_forward.py index 771226056ed4..7ea38a98edc5 100644 --- a/tests/python/frontend/tflite/test_forward.py +++ b/tests/python/frontend/tflite/test_forward.py @@ -848,6 +848,25 @@ def test_forward_inception_v4_net(): tvm.testing.assert_allclose(np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-5) +def test_forward_quantized_inception_v1_net(): + """Test the Quantized TFLite Inception model.""" + # InceptionV1 + tflite_model_file = tf_testing.get_workload_official( + "https://storage.googleapis.com/download.tensorflow.org/models/inception_v1_224_quant_20181026.tgz", + "inception_v1_224_quant.tflite") + with open(tflite_model_file, "rb") as f: + tflite_model_buf = f.read() + # Checking the labels because the requantize implementation is different between TFLite and + # Relay. This cause final output numbers to mismatch. So, testing accuracy via labels. + data = np.random.uniform(size=(1, 224, 224, 3)).astype('uint8') + tflite_output = run_tflite_graph(tflite_model_buf, data) + tflite_predictions = np.squeeze(tflite_output) + tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1] + tvm_output = run_tvm_graph(tflite_model_buf, data, 'input') + tvm_predictions = np.squeeze(tvm_output) + tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1] + tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels) + ####################################################################### # SSD Mobilenet # ------------- @@ -906,3 +925,6 @@ def test_forward_ssd_mobilenet_v1(): test_forward_inception_v3_net() test_forward_inception_v4_net() test_forward_ssd_mobilenet_v1() + + # End to End quantized + test_forward_quantized_inception_v1_net()