Skip to content

Commit

Permalink
Refactor pooling-tests + update documentation (ARM-software#144)
Browse files Browse the repository at this point in the history
- Adds support for pooling in RefactoredTestGen
- Regenerates data
- Aligns variable names in related tests
- Adds missing int16 LSTM support to README
  • Loading branch information
AdrianLundell authored Aug 28, 2024
1 parent 1cad199 commit 95f293d
Show file tree
Hide file tree
Showing 138 changed files with 2,480 additions and 2,073 deletions.
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ Examples are Cortex-M55 or Cortex-M85 configured with MVE.
| MaxPooling | Yes | Yes | N/A | Yes | Yes | N/A | Yes | Yes | N/A |
| AvgPooling | Yes | Yes | N/A | Yes | Yes | N/A | Yes | Yes | N/A |
| Softmax | Yes | Yes | N/A | Yes | Yes | N/A | Yes | No | N/A |
| LSTM | Yes | NA | No | Yes | NA | No | Yes | NA | No |
| LSTM | Yes | Yes | No | Yes | Yes | No | Yes | Yes | No |
| SVDF | Yes | No | No | Yes | No | No | Yes | No | No |

* int4 weights + int8 activations
Expand Down Expand Up @@ -110,4 +110,4 @@ This product confirms to Arm’s inclusive language policy and, to the best of o

## Support / Contact

For any questions or to reach the CMSIS-NN team, please create a new issue in https://github.com/ARM-software/CMSIS-NN/issues
For any questions or to reach the CMSIS-NN team, please create a new issue in https://github.com/ARM-software/CMSIS-NN/issues
6 changes: 3 additions & 3 deletions Tests/UnitTest/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -168,11 +168,11 @@ Current progress:
| convolution | x | x | New version only supports 16x8 and int4 packed weights
| depthwise conv | x | |
| fully_connected | x | x | New version supports int4 packed weights. Only new version supports per channels quantization for int8.
| lstm | x | x | Only new version supporting 16x8
| lstm | | x | Only new version supporting 16x8
| svdf | x | |
| softmax | x | |
| avgpool | x | |
| maxpool | x | |
| avgpool | | x |
| maxpool | | x |
| add | x | |
| mul | x | |
| batch matmul | | x |
Expand Down
80 changes: 80 additions & 0 deletions Tests/UnitTest/RefactoredTestGen/Lib/op_pooling.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
# SPDX-FileCopyrightText: Copyright 2024 Arm Limited and/or its affiliates <[email protected]>
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the License); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import Lib.op_utils
import tensorflow as tf
import math
import numpy as np

from tensorflow.lite.python.interpreter import Interpreter
from tensorflow.lite.python.interpreter import OpResolverType
import tf_keras as keras

class Op_pooling(Lib.op_utils.Op_type):

def get_shapes(params):
shapes = {}
shapes["input_tensor"] = (params["batch_size"], params["input_h"], params["input_w"], params["input_c"])
shapes["representational_dataset"] = shapes["input_tensor"]

return shapes

def generate_keras_model(shapes, params):
model = keras.models.Sequential()
model.add(keras.layers.InputLayer(input_shape=shapes["input_tensor"][1:], batch_size=shapes["input_tensor"][0]))
if params["op_type"] == 'avgpool':
model.add(
keras.layers.AveragePooling2D(pool_size=(params["filter_h"], params["filter_w"]),
strides=(params["stride_h"], params["stride_w"]),
padding=params["pad"],
input_shape=shapes["input_tensor"][1:]))
elif params["op_type"] == 'maxpool':
model.add(
keras.layers.MaxPooling2D(pool_size=(params["filter_h"], params["filter_w"]),
strides=(params["stride_h"], params["stride_w"]),
padding=params["pad"],
input_shape=shapes["input_tensor"][1:]))
else:
raise RuntimeError("Wrong test type")


return model

def generate_data_tflite(tflite_fname, params):
tensors = {}
effective_scales = {}
scales = {}
generated_params = {}

interpreter = Interpreter(str(tflite_fname), experimental_op_resolver_type=OpResolverType.BUILTIN_REF)
interpreter.allocate_tensors()
output_details = interpreter.get_output_details()

generated_params["output_c"] = output_details[0]['shape'][3]
generated_params["output_w"] = output_details[0]['shape'][2]
generated_params["output_h"] = output_details[0]['shape'][1]

if params["pad"] == "SAME":
pad_along_width = max((generated_params["output_w"] - 1) * params["stride_w"] + params["filter_w"] - params["input_w"], 0)
pad_along_height = max((generated_params["output_h"] - 1) * params["stride_h"] + params["filter_h"] - params["input_h"], 0)

generated_params["padding_h"] = pad_along_height // 2
generated_params["padding_w"] = pad_along_width // 2
else:
generated_params["padding_h"] = 0
generated_params["padding_w"] = 0

return Lib.op_utils.Generated_data(generated_params, tensors, scales, effective_scales)
15 changes: 14 additions & 1 deletion Tests/UnitTest/RefactoredTestGen/Lib/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
import Lib.op_conv
import Lib.op_batch_matmul
import Lib.op_fully_connected
import Lib.op_pooling
import tensorflow as tf
import numpy as np
from tensorflow.lite.python.interpreter import Interpreter
Expand Down Expand Up @@ -77,11 +78,16 @@ def generate(params, args, fpaths):
except KeyError:
pass

if "bias_data_type" in params:
bias_dtype = params["bias_data_type"]
else:
bias_dtype = None

convert_keras_to_tflite(fpaths["tflite"],
keras_model,
quantize=True,
dtype=params["input_data_type"],
bias_dtype=params["bias_data_type"],
bias_dtype=bias_dtype,
shape=shapes,
per_tensor_quant_for_dense=per_tensor_quant_for_dense)

Expand Down Expand Up @@ -138,6 +144,11 @@ def generate(params, args, fpaths):
else:
raise ValueError(f"Invalid interpreter in {params['name']}")

if "activation_min" in params:
data.tensors["output"] = np.maximum(data.tensors["output"], params["activation_min"])
if "activation_max" in params:
data.tensors["output"] = np.minimum(data.tensors["output"], params["activation_max"])

# Write data
header = get_header(params["tflite_generator"], params["interpreter"])

Expand Down Expand Up @@ -172,6 +183,8 @@ def get_op_type(op_type_string):
return Lib.op_batch_matmul.Op_batch_matmul
elif op_type_string == "fully_connected":
return Lib.op_fully_connected.Op_fully_connected
if op_type_string == "avgpool" or op_type_string == "maxpool":
return Lib.op_pooling.Op_pooling
else:
raise ValueError(f"Unknown op type '{op_type_string}'")

Expand Down
Loading

0 comments on commit 95f293d

Please sign in to comment.