diff --git a/examples/snpe/inception_snpe_qualcomm_npu/README.md b/examples/snpe/inception_snpe_qualcomm_npu/README.md index 684dbc29e..b3d4c1697 100644 --- a/examples/snpe/inception_snpe_qualcomm_npu/README.md +++ b/examples/snpe/inception_snpe_qualcomm_npu/README.md @@ -10,11 +10,16 @@ Outputs a summary of the accuracy and latency metrics for each SNPE model. ### Download and unzip SNPE SDK Download the SNPE SDK zip following [instructions from Qualcomm](https://developer.qualcomm.com/software/qualcomm-neural-processing-sdk) +We test it with SNPE v2.18.0.240101. + Unzip the file and set the unzipped directory path as environment variable `SNPE_ROOT` ### Configure SNPE -``` -python -m olive.snpe.configure +```sh +# in general, python 3.8 is recommended +python -m olive.snpe.configure --py_version 3.8 +# only when the tensorflow 1.15.0 is needed, use python 3.6 +python -m olive.snpe.configure --py_version 3.6 ``` ### Pip requirements diff --git a/examples/snpe/inception_snpe_qualcomm_npu/inception_config.json b/examples/snpe/inception_snpe_qualcomm_npu/inception_config.json index 0f31f83e1..4bbb4b5f5 100644 --- a/examples/snpe/inception_snpe_qualcomm_npu/inception_config.json +++ b/examples/snpe/inception_snpe_qualcomm_npu/inception_config.json @@ -1,5 +1,5 @@ { - "input_model":{ + "input_model": { "type": "TensorFlowModel", "config": { "model_path": "models/inception_v3.pb" @@ -17,8 +17,17 @@ }, "params_config": { "data_dir": "data", - "input_names": ["input"], - "input_shapes": [[1, 299, 299, 3]], + "input_names": [ + "input" + ], + "input_shapes": [ + [ + 1, + 299, + 299, + 3 + ] + ], "input_order_file": "input_order.txt", "annotations_file": "labels.npy", "batch_size": 7 @@ -27,14 +36,22 @@ }, "evaluators": { "common_evaluator": { - "metrics":[ + "metrics": [ { "name": "accuracy", "type": "accuracy", "sub_types": [ - {"name": "accuracy_score", "priority": 1} + { + "name": "accuracy_score", + "priority": 1, + "metric_config": { + "task": "multiclass", + "num_classes": "100", + "top_k": 1 + } + } ], - "user_config":{ + "user_config": { "inference_settings": { "snpe": { "return_numpy_results": true @@ -47,7 +64,15 @@ "name": "latency", "type": "latency", "sub_types": [ - {"name": "avg", "priority": 2, "metric_config": {"warmup_num": 0, "repeat_test_num": 5, "sleep_num": 2}} + { + "name": "avg", + "priority": 2, + "metric_config": { + "warmup_num": 0, + "repeat_test_num": 5, + "sleep_num": 2 + } + } ], "user_config": { "inference_settings": { @@ -67,9 +92,20 @@ "snpe_conversion": { "type": "SNPEConversion", "config": { - "input_names": ["input"], - "input_shapes": [[1, 299, 299, 3]], - "output_names": ["InceptionV3/Predictions/Reshape_1"] + "input_names": [ + "input" + ], + "input_shapes": [ + [ + 1, + 299, + 299, + 3 + ] + ], + "output_names": [ + "InceptionV3/Predictions/Reshape_1" + ] } }, "snpe_quantization": { @@ -82,9 +118,10 @@ }, "engine": { "search_strategy": false, + "evaluate_input_model": false, "evaluator": "common_evaluator", "cache_dir": "cache", - "output_dir" : "outputs", + "output_dir": "outputs", "output_name": "snpe_quantized" } } diff --git a/examples/snpe/vgg_snpe_qualcomm_npu/README.md b/examples/snpe/vgg_snpe_qualcomm_npu/README.md index b4fe21ddc..9e4023bc6 100644 --- a/examples/snpe/vgg_snpe_qualcomm_npu/README.md +++ b/examples/snpe/vgg_snpe_qualcomm_npu/README.md @@ -8,11 +8,16 @@ Performs optimization pipeline: ### Download and unzip SNPE SDK Download the SNPE SDK zip following [instructions from Qualcomm](https://developer.qualcomm.com/software/qualcomm-neural-processing-sdk) +We test it with SNPE v2.18.0.240101. + Unzip the file and set the unzipped directory path as environment variable `SNPE_ROOT`. ### Configure SNPE -``` -python -m olive.snpe.configure +```sh +# in general, python 3.8 is recommended +python -m olive.snpe.configure --py_version 3.8 +# only when the tensorflow 1.15.0 is needed, use python 3.6 +python -m olive.snpe.configure --py_version 3.6 ``` ### Pip requirements @@ -32,3 +37,12 @@ Run the conversion and quantization locally. Only supports `x64-Linux`. ``` python -m olive.workflows.run --config vgg_config.json ``` + +## Issues + +1. "Module 'qti.aisw.converters' has no attribute 'onnx': + Refer to this: https://developer.qualcomm.com/comment/21810#comment-21810, + change the import statement in `{SNPE_ROOT}/lib/python/qti/aisw/converters/onnx/onnx_to_ir.py:L30` to: + ```python + from qti.aisw.converters.onnx import composable_custom_op_utils as ComposableCustomOp + ``` diff --git a/examples/snpe/vgg_snpe_qualcomm_npu/vgg_config.json b/examples/snpe/vgg_snpe_qualcomm_npu/vgg_config.json index b8a4f73c2..7a098afd4 100644 --- a/examples/snpe/vgg_snpe_qualcomm_npu/vgg_config.json +++ b/examples/snpe/vgg_snpe_qualcomm_npu/vgg_config.json @@ -39,7 +39,9 @@ } }, "engine": { + "log_severity_level": 0, "search_strategy": false, + "clean_cache": true, "cache_dir": "cache", "output_dir" : "outputs" } diff --git a/examples/stable_diffusion/openvino/stable_diffusion.py b/examples/stable_diffusion/openvino/stable_diffusion.py index 0a6d0205e..867e329a6 100644 --- a/examples/stable_diffusion/openvino/stable_diffusion.py +++ b/examples/stable_diffusion/openvino/stable_diffusion.py @@ -92,7 +92,8 @@ def __init__( [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the clip-vit-large-patch14(https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer (CLIPTokenizer): - Tokenizer of class CLIPTokenizer(https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + Tokenizer of class CLIPTokenizer + (https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet (Model): Conditional U-Net architecture to denoise the encoded image latents. scheduler (SchedulerMixin): A scheduler to be used in combination with unet to denoise the encoded image latents. Can be one of diff --git a/olive/evaluator/olive_evaluator.py b/olive/evaluator/olive_evaluator.py index 98c36386f..da7affe81 100644 --- a/olive/evaluator/olive_evaluator.py +++ b/olive/evaluator/olive_evaluator.py @@ -936,8 +936,8 @@ def _inference( raise ValueError("Post processing function is required for SNPE model") preds.extend(outputs.tolist()) targets.extend(labels.tolist()) - # TODO(trajep): verify if we need to return logits - logits.extend(result.tolist()) + lg = result["results"].get("logits") + logits.extend(lg.to_list() if lg else []) return OliveModelOutput(preds=preds, logits=logits), targets def _evaluate_accuracy( diff --git a/olive/snpe/configure.py b/olive/snpe/configure.py index 02fe60c90..deb818e1b 100644 --- a/olive/snpe/configure.py +++ b/olive/snpe/configure.py @@ -2,6 +2,7 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. # -------------------------------------------------------------------------- +import argparse import logging import shutil from importlib import resources @@ -13,16 +14,16 @@ logger = logging.getLogger("olive.snpe.configure") -def dev(): +def dev(args): snpe_arch = get_snpe_target_arch(False) if snpe_arch != "x64-Linux": return get_snpe_root() - logger.info(f"Configuring SNPE for {snpe_arch}...") - with resources.path("olive.snpe", "create_python36_env.sh") as create_python36_env_path: - cmd = f"bash {create_python36_env_path}" + logger.info(f"Configuring SNPE for {snpe_arch} with python{args.py_version}...") + with resources.path("olive.snpe", "create_python_env.sh") as create_python_env_path: + cmd = f"bash {create_python_env_path} -v {args.py_version}" return_code, stdout, stderr = run_subprocess(cmd) if return_code != 0: raise RuntimeError(f"Failed to create python36 environment. stdout: {stdout}, stderr: {stderr}") @@ -66,5 +67,15 @@ def eval(): # noqa: A001 #pylint: disable=redefined-builtin if __name__ == "__main__": - dev() + # create args for py_version + parser = argparse.ArgumentParser("Olive SNPE: Configure") + parser.add_argument( + "--py_version", + type=str, + help="Python version, use 3.6 for tensorflow 1.15. Otherwise 3.8", + required=True, + choices=["3.6", "3.8"], + ) + args = parser.parse_args() + dev(args) eval() diff --git a/olive/snpe/create_python36_env.sh b/olive/snpe/create_python36_env.sh deleted file mode 100644 index b9ed30103..000000000 --- a/olive/snpe/create_python36_env.sh +++ /dev/null @@ -1,33 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. -# -------------------------------------------------------------------------- -set -eux - -FILES_DIR=$SNPE_ROOT/python36-env-setup -rm -rf $FILES_DIR -mkdir $FILES_DIR - -# Install conda if not already installed -if ! command -v conda; then - # Install conda - curl -fsSL -o $FILES_DIR/install_conda.sh https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh - sh $FILES_DIR/install_conda.sh -b -p $FILES_DIR/miniconda - CONDA=$FILES_DIR/miniconda/bin/conda -else - CONDA=conda -fi - -# Create python 3.6 environment -$CONDA create -y -p $FILES_DIR/python36-env python=3.6 - -# Install snpe requirements -$FILES_DIR/python36-env/bin/python -m pip install --upgrade pip -$FILES_DIR/python36-env/bin/python -m pip install onnx==1.11.0 onnx-simplifier packaging tensorflow==1.15.0 pyyaml - -# move the python36-env to the correct location -rm -rf $SNPE_ROOT/python36-env -mv $FILES_DIR/python36-env $SNPE_ROOT/python36-env - -# Remove all unnecessary files -rm -rf $FILES_DIR diff --git a/olive/snpe/create_python_env.sh b/olive/snpe/create_python_env.sh new file mode 100644 index 000000000..0cc7236aa --- /dev/null +++ b/olive/snpe/create_python_env.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +set -eux + +# This script creates a python 3.6 environment in $SNPE_ROOT/olive-pyenv +# and installs the required packages for SNPE-v2.18.0.240101 + +# Usage: ./create_python_env.sh -v/--version +while [[ "$#" -gt 0 ]]; do + key="$1" + case $key in + -v|--version) + PY_VERSION="$2" + shift + shift + ;; + *) + echo "Unknown option: $key" + exit 1 + ;; + esac +done + +PY_ENV_NAME=olive-pyenv +FILES_DIR=$SNPE_ROOT/python-env-setup +rm -rf "$FILES_DIR" +mkdir "$FILES_DIR" + +# Install conda if not already installed +if ! command -v conda; then + # Install conda + curl -fsSL -o "$FILES_DIR"/install_conda.sh https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh + sh "$FILES_DIR"/install_conda.sh -b -p "$FILES_DIR"/miniconda + CONDA=$FILES_DIR/miniconda/bin/conda +else + CONDA=conda +fi + +# Create python environment +$CONDA create -y -p "$FILES_DIR"/$PY_ENV_NAME python="$PY_VERSION" + +# Install snpe requirements +"$FILES_DIR"/$PY_ENV_NAME/bin/python -m pip install --upgrade pip +if [ "$PY_VERSION" == "3.6" ]; then + "$FILES_DIR"/$PY_ENV_NAME/bin/python -m pip install onnx==1.11.0 onnx-simplifier packaging tensorflow==1.15.0 pyyaml +elif [ "$PY_VERSION" == "3.8" ]; then + "$FILES_DIR"/$PY_ENV_NAME/bin/python -m pip install onnx onnx-simplifier packaging tensorflow pyyaml +else + echo "Unsupported python version: $PY_VERSION, only 3.6 and 3.8 are supported" + exit 1 +fi + + +rm -rf "${SNPE_ROOT:?}"/$PY_ENV_NAME +mv "$FILES_DIR"/$PY_ENV_NAME "$SNPE_ROOT"/$PY_ENV_NAME + +# Remove all unnecessary files +rm -rf "$FILES_DIR" diff --git a/olive/snpe/utils/local.py b/olive/snpe/utils/local.py index fcd5fb0f0..1ab8f83b9 100644 --- a/olive/snpe/utils/local.py +++ b/olive/snpe/utils/local.py @@ -90,20 +90,22 @@ def get_snpe_env(dev: bool = False) -> dict: bin_path = str(Path(f"{snpe_root}/bin/{target_arch_name}")) lib_path = str(Path(f"{snpe_root}/lib/{target_arch_name}")) + python_env_bin_path = str(Path(f"{snpe_root}/olive-pyenv/bin")) + python_env_lib_path = str(Path(f"{snpe_root}/olive-pyenv/lib")) env = {} delimiter = os.path.pathsep if platform.system() == "Linux": - env["LD_LIBRARY_PATH"] = lib_path if dev: - python36_env_path = str(Path(f"{snpe_root}/python36-env/bin")) - if not Path(python36_env_path).exists(): + if not Path(python_env_bin_path).exists(): raise FileNotFoundError( - f"Path {python36_env_path} does not exist. Please run 'python -m olive.snpe.configure' to add the" - " missing file" + f"Path {python_env_bin_path} does not exist." + " Please run 'python -m olive.snpe.configure' to add the missing file" ) - bin_path += delimiter + python36_env_path + bin_path += delimiter + python_env_bin_path + lib_path += delimiter + python_env_lib_path env["PYTHONPATH"] = str(Path(f"{snpe_root}/lib/python")) + env["LD_LIBRARY_PATH"] = lib_path bin_path += delimiter + "/usr/bin" elif platform.system() == "Windows": if target_arch == "ARM64-Windows": diff --git a/setup.py b/setup.py index e0a1144b7..666232202 100644 --- a/setup.py +++ b/setup.py @@ -86,7 +86,7 @@ def get_extra_deps(rel_path): "olive": ["extra_dependencies.json"], "olive.auto_optimizer": ["config_template/*.yaml"], "olive.engine.packaging": ["sample_code/*/*/*"], - "olive.snpe": ["create_python36_env.sh", "copy_libcdsprpc.ps1"], + "olive.snpe": ["create_python_env.sh", "copy_libcdsprpc.ps1"], "olive.systems.docker": ["Dockerfile*"], }, data_files=[],