From 079b36d72ba2ef298f7ae4dc283d8c7975eb02f6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 4 Feb 2022 09:30:01 +0100 Subject: [PATCH] Edge TPU `tf.lite.experimental.load_delegate` fix (#6536) * Edge TPU `tf.lite.experimental.load_delegate` fix Fix attempt for #6535 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/common.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/models/common.py b/models/common.py index 29d02e741e17..4da698811669 100644 --- a/models/common.py +++ b/models/common.py @@ -374,19 +374,20 @@ def wrap_frozen_graph(gd, inputs, outputs): graph_def.ParseFromString(open(w, 'rb').read()) frozen_func = wrap_frozen_graph(gd=graph_def, inputs="x:0", outputs="Identity:0") elif tflite: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python - try: - import tflite_runtime.interpreter as tfl # prefer tflite_runtime if installed + try: # prefer tflite_runtime if installed + from tflite_runtime.interpreter import Interpreter, load_delegate except ImportError: - import tensorflow.lite as tfl + import tensorflow.lite.experimental.load_delegate as load_delegate + import tensorflow.lite.Interpreter as Interpreter if 'edgetpu' in w.lower(): # Edge TPU https://coral.ai/software/#edgetpu-runtime LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...') delegate = {'Linux': 'libedgetpu.so.1', 'Darwin': 'libedgetpu.1.dylib', 'Windows': 'edgetpu.dll'}[platform.system()] - interpreter = tfl.Interpreter(model_path=w, experimental_delegates=[tfl.load_delegate(delegate)]) + interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)]) else: # Lite LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') - interpreter = tfl.Interpreter(model_path=w) # load TFLite model + interpreter = Interpreter(model_path=w) # load TFLite model interpreter.allocate_tensors() # allocate input_details = interpreter.get_input_details() # inputs output_details = interpreter.get_output_details() # outputs