From b968953527bba47f0d001826d8fde2c89030dd4b Mon Sep 17 00:00:00 2001 From: Ruslan Nugmanov Date: Tue, 5 Mar 2024 20:35:45 +0400 Subject: [PATCH] E2E open-sourced (#20429) Co-authored-by: Roman Kazantsev --- tests/CMakeLists.txt | 1 + tests/e2e_tests/CMakeLists.txt | 9 + tests/e2e_tests/README.md | 50 ++ tests/e2e_tests/__init__.py | 3 + tests/e2e_tests/base_test_rules.yml | 475 ++++++++++++ tests/e2e_tests/collect_refs.py | 62 ++ tests/e2e_tests/common/__init__.py | 2 + .../e2e_tests/common/common/base_provider.py | 64 ++ .../common/common/common_base_class.py | 210 ++++++ .../common/common/dummy_comparator.py | 34 + tests/e2e_tests/common/common/e2e_utils.py | 76 ++ tests/e2e_tests/common/common/pipeline.py | 64 ++ tests/e2e_tests/common/comparator/__init__.py | 11 + .../common/comparator/classification.py | 85 +++ .../e2e_tests/common/comparator/container.py | 91 +++ tests/e2e_tests/common/comparator/dummy.py | 38 + tests/e2e_tests/common/comparator/eltwise.py | 130 ++++ .../common/comparator/object_detection.py | 266 +++++++ tests/e2e_tests/common/comparator/ocr.py | 83 +++ tests/e2e_tests/common/comparator/provider.py | 20 + .../comparator/semantic_segmentation.py | 73 ++ tests/e2e_tests/common/comparator/ssim.py | 84 +++ tests/e2e_tests/common/comparator/ssim_4d.py | 72 ++ .../common/comparator/threshold_utils.py | 86 +++ tests/e2e_tests/common/config.py | 117 +++ tests/e2e_tests/common/core.py | 45 ++ tests/e2e_tests/common/decorators.py | 24 + tests/e2e_tests/common/env_utils.py | 31 + tests/e2e_tests/common/environment_info.py | 102 +++ tests/e2e_tests/common/hook_utils.py | 117 +++ tests/e2e_tests/common/infer/__init__.py | 10 + .../common/infer/common_inference.py | 246 +++++++ .../common/infer/dummy_infer_class.py | 21 + .../infer/network_modifiers/__init__.py | 5 + .../infer/network_modifiers/container.py | 30 + .../network_modifiers/network_modifiers.py | 140 ++++ tests/e2e_tests/common/infer/provider.py | 35 + .../e2e_tests/common/ir_provider/__init__.py | 4 + .../ir_provider/model_optimizer_runner.py | 143 ++++ .../common/ir_provider/pregenerated.py | 24 + .../e2e_tests/common/ir_provider/provider.py | 37 + tests/e2e_tests/common/logger.py | 299 ++++++++ tests/e2e_tests/common/marks.py | 275 +++++++ .../e2e_tests/common/model_loader/__init__.py | 5 + .../common/model_loader/load_pytorch_model.py | 49 ++ .../e2e_tests/common/model_loader/provider.py | 35 + .../model_loader/tf_hub_model_loader.py | 34 + .../e2e_tests/common/multiprocessing_utils.py | 104 +++ tests/e2e_tests/common/openvino_resources.py | 211 ++++++ tests/e2e_tests/common/parsers.py | 41 ++ tests/e2e_tests/common/platforms.yml | 138 ++++ tests/e2e_tests/common/plugins/__init__.py | 3 + .../common/plugins/common/__init__.py | 3 + .../common/plugins/common/base_conftest.py | 121 +++ .../common/plugins/common/conftest.py | 611 ++++++++++++++++ .../common/plugins/e2e_test/__init__.py | 3 + .../common/plugins/e2e_test/conftest.py | 410 +++++++++++ .../common/plugins/ref_collect/__init__.py | 3 + .../common/plugins/ref_collect/conftest.py | 56 ++ .../common/plugins/reshape_tests/__init__.py | 3 + .../common/plugins/reshape_tests/conftest.py | 216 ++++++ tests/e2e_tests/common/postprocessors/YOLO.py | 310 ++++++++ .../common/postprocessors/__init__.py | 13 + .../common/postprocessors/classification.py | 37 + .../e2e_tests/common/postprocessors/common.py | 229 ++++++ tests/e2e_tests/common/postprocessors/ctc.py | 59 ++ .../common/postprocessors/filters.py | 89 +++ .../postprocessors/image_modifications.py | 30 + .../common/postprocessors/mask_rcnn.py | 69 ++ .../common/postprocessors/object_detection.py | 226 ++++++ .../common/postprocessors/provider.py | 48 ++ .../postprocessors/semantic_segmentation.py | 31 + .../common/preprocessors/__init__.py | 6 + .../common/preprocessors/preprocessors.py | 501 +++++++++++++ .../common/preprocessors/provider.py | 50 ++ .../common/preprocessors/transformers.py | 187 +++++ tests/e2e_tests/common/pytest_utils.py | 115 +++ tests/e2e_tests/common/readers/__init__.py | 5 + tests/e2e_tests/common/readers/provider.py | 38 + tests/e2e_tests/common/readers/readers.py | 170 +++++ .../common/ref_collector/__init__.py | 3 + .../ref_collector/dummy_ref_collector.py | 25 + .../common/ref_collector/precollected.py | 48 ++ .../common/ref_collector/provider.py | 34 + .../ref_collector/score_onnx_runtime.py | 74 ++ .../ref_collector/score_paddlepaddle.py | 59 ++ .../common/ref_collector/score_pytorch.py | 367 ++++++++++ .../score_pytorch_onnx_runtime.py | 149 ++++ .../common/ref_collector/score_tf.py | 259 +++++++ .../common/ref_collector/score_tf_hub.py | 34 + .../common/ref_collector/score_tf_lite.py | 33 + .../common/ref_collector/score_tf_v2.py | 78 ++ .../ref_collector/tf_hub_ref_provider.py | 33 + tests/e2e_tests/common/sys_info_utils.py | 434 +++++++++++ tests/e2e_tests/common/table_utils.py | 13 + tests/e2e_tests/config.py | 14 + tests/e2e_tests/conftest.py | 69 ++ tests/e2e_tests/env_config_local.yml | 42 ++ .../collect_reference_templates.py | 146 ++++ .../comparators_template.py | 119 +++ .../pipeline_templates/infer_templates.py | 36 + .../pipeline_templates/input_templates.py | 33 + .../pipeline_templates/ir_gen_templates.py | 16 + .../model_loader_templates.py | 40 + .../pipeline_templates/postproc_template.py | 103 +++ .../pipeline_templates/preproc_templates.py | 190 +++++ .../pytorch_to_onnx_converter_template.py | 33 + tests/e2e_tests/pipelines/tf_hub/nightly.yml | 689 ++++++++++++++++++ .../e2e_tests/pipelines/tf_hub/precommit.yml | 26 + tests/e2e_tests/pipelines/tf_hub/tf_hub.py | 50 ++ .../pipelines/tf_hub/tf_hub_case_class.py | 30 + tests/e2e_tests/pytest.ini | 68 ++ tests/e2e_tests/requirements.txt | 52 ++ tests/e2e_tests/reshape_test_rules.yml | 33 + tests/e2e_tests/test_base.py | 159 ++++ tests/e2e_tests/test_config_local.yml | 12 + tests/e2e_tests/test_utils/__init__.py | 3 + tests/e2e_tests/test_utils/coverage_runner.py | 90 +++ tests/e2e_tests/test_utils/env_tools.py | 44 ++ tests/e2e_tests/test_utils/get_test_info.py | 66 ++ tests/e2e_tests/test_utils/modify_configs.py | 129 ++++ tests/e2e_tests/test_utils/path_utils.py | 227 ++++++ tests/e2e_tests/test_utils/pytorch_loaders.py | 107 +++ .../test_utils/reshape_pipeline_executers.py | 25 + .../test_utils/reshape_tests_utils.py | 320 ++++++++ tests/e2e_tests/test_utils/test_utils.py | 493 +++++++++++++ tests/e2e_tests/test_utils/tf_helper.py | 89 +++ .../e2e_tests/test_utils/tf_helper_config.yml | 39 + tests/e2e_tests/test_utils/tf_hub_utils.py | 81 ++ 129 files changed, 13369 insertions(+) create mode 100644 tests/e2e_tests/CMakeLists.txt create mode 100644 tests/e2e_tests/README.md create mode 100644 tests/e2e_tests/__init__.py create mode 100644 tests/e2e_tests/base_test_rules.yml create mode 100644 tests/e2e_tests/collect_refs.py create mode 100644 tests/e2e_tests/common/__init__.py create mode 100644 tests/e2e_tests/common/common/base_provider.py create mode 100644 tests/e2e_tests/common/common/common_base_class.py create mode 100644 tests/e2e_tests/common/common/dummy_comparator.py create mode 100644 tests/e2e_tests/common/common/e2e_utils.py create mode 100644 tests/e2e_tests/common/common/pipeline.py create mode 100644 tests/e2e_tests/common/comparator/__init__.py create mode 100644 tests/e2e_tests/common/comparator/classification.py create mode 100644 tests/e2e_tests/common/comparator/container.py create mode 100644 tests/e2e_tests/common/comparator/dummy.py create mode 100644 tests/e2e_tests/common/comparator/eltwise.py create mode 100644 tests/e2e_tests/common/comparator/object_detection.py create mode 100644 tests/e2e_tests/common/comparator/ocr.py create mode 100644 tests/e2e_tests/common/comparator/provider.py create mode 100644 tests/e2e_tests/common/comparator/semantic_segmentation.py create mode 100644 tests/e2e_tests/common/comparator/ssim.py create mode 100644 tests/e2e_tests/common/comparator/ssim_4d.py create mode 100644 tests/e2e_tests/common/comparator/threshold_utils.py create mode 100644 tests/e2e_tests/common/config.py create mode 100644 tests/e2e_tests/common/core.py create mode 100644 tests/e2e_tests/common/decorators.py create mode 100644 tests/e2e_tests/common/env_utils.py create mode 100644 tests/e2e_tests/common/environment_info.py create mode 100644 tests/e2e_tests/common/hook_utils.py create mode 100644 tests/e2e_tests/common/infer/__init__.py create mode 100644 tests/e2e_tests/common/infer/common_inference.py create mode 100644 tests/e2e_tests/common/infer/dummy_infer_class.py create mode 100644 tests/e2e_tests/common/infer/network_modifiers/__init__.py create mode 100644 tests/e2e_tests/common/infer/network_modifiers/container.py create mode 100644 tests/e2e_tests/common/infer/network_modifiers/network_modifiers.py create mode 100644 tests/e2e_tests/common/infer/provider.py create mode 100644 tests/e2e_tests/common/ir_provider/__init__.py create mode 100644 tests/e2e_tests/common/ir_provider/model_optimizer_runner.py create mode 100644 tests/e2e_tests/common/ir_provider/pregenerated.py create mode 100644 tests/e2e_tests/common/ir_provider/provider.py create mode 100644 tests/e2e_tests/common/logger.py create mode 100644 tests/e2e_tests/common/marks.py create mode 100644 tests/e2e_tests/common/model_loader/__init__.py create mode 100644 tests/e2e_tests/common/model_loader/load_pytorch_model.py create mode 100644 tests/e2e_tests/common/model_loader/provider.py create mode 100644 tests/e2e_tests/common/model_loader/tf_hub_model_loader.py create mode 100644 tests/e2e_tests/common/multiprocessing_utils.py create mode 100644 tests/e2e_tests/common/openvino_resources.py create mode 100644 tests/e2e_tests/common/parsers.py create mode 100644 tests/e2e_tests/common/platforms.yml create mode 100644 tests/e2e_tests/common/plugins/__init__.py create mode 100644 tests/e2e_tests/common/plugins/common/__init__.py create mode 100644 tests/e2e_tests/common/plugins/common/base_conftest.py create mode 100644 tests/e2e_tests/common/plugins/common/conftest.py create mode 100644 tests/e2e_tests/common/plugins/e2e_test/__init__.py create mode 100644 tests/e2e_tests/common/plugins/e2e_test/conftest.py create mode 100644 tests/e2e_tests/common/plugins/ref_collect/__init__.py create mode 100644 tests/e2e_tests/common/plugins/ref_collect/conftest.py create mode 100644 tests/e2e_tests/common/plugins/reshape_tests/__init__.py create mode 100644 tests/e2e_tests/common/plugins/reshape_tests/conftest.py create mode 100644 tests/e2e_tests/common/postprocessors/YOLO.py create mode 100644 tests/e2e_tests/common/postprocessors/__init__.py create mode 100644 tests/e2e_tests/common/postprocessors/classification.py create mode 100644 tests/e2e_tests/common/postprocessors/common.py create mode 100644 tests/e2e_tests/common/postprocessors/ctc.py create mode 100644 tests/e2e_tests/common/postprocessors/filters.py create mode 100644 tests/e2e_tests/common/postprocessors/image_modifications.py create mode 100644 tests/e2e_tests/common/postprocessors/mask_rcnn.py create mode 100644 tests/e2e_tests/common/postprocessors/object_detection.py create mode 100644 tests/e2e_tests/common/postprocessors/provider.py create mode 100644 tests/e2e_tests/common/postprocessors/semantic_segmentation.py create mode 100644 tests/e2e_tests/common/preprocessors/__init__.py create mode 100644 tests/e2e_tests/common/preprocessors/preprocessors.py create mode 100644 tests/e2e_tests/common/preprocessors/provider.py create mode 100644 tests/e2e_tests/common/preprocessors/transformers.py create mode 100644 tests/e2e_tests/common/pytest_utils.py create mode 100644 tests/e2e_tests/common/readers/__init__.py create mode 100644 tests/e2e_tests/common/readers/provider.py create mode 100644 tests/e2e_tests/common/readers/readers.py create mode 100644 tests/e2e_tests/common/ref_collector/__init__.py create mode 100644 tests/e2e_tests/common/ref_collector/dummy_ref_collector.py create mode 100644 tests/e2e_tests/common/ref_collector/precollected.py create mode 100644 tests/e2e_tests/common/ref_collector/provider.py create mode 100644 tests/e2e_tests/common/ref_collector/score_onnx_runtime.py create mode 100644 tests/e2e_tests/common/ref_collector/score_paddlepaddle.py create mode 100644 tests/e2e_tests/common/ref_collector/score_pytorch.py create mode 100644 tests/e2e_tests/common/ref_collector/score_pytorch_onnx_runtime.py create mode 100644 tests/e2e_tests/common/ref_collector/score_tf.py create mode 100644 tests/e2e_tests/common/ref_collector/score_tf_hub.py create mode 100644 tests/e2e_tests/common/ref_collector/score_tf_lite.py create mode 100644 tests/e2e_tests/common/ref_collector/score_tf_v2.py create mode 100644 tests/e2e_tests/common/ref_collector/tf_hub_ref_provider.py create mode 100644 tests/e2e_tests/common/sys_info_utils.py create mode 100644 tests/e2e_tests/common/table_utils.py create mode 100644 tests/e2e_tests/config.py create mode 100644 tests/e2e_tests/conftest.py create mode 100644 tests/e2e_tests/env_config_local.yml create mode 100644 tests/e2e_tests/pipelines/pipeline_templates/collect_reference_templates.py create mode 100644 tests/e2e_tests/pipelines/pipeline_templates/comparators_template.py create mode 100644 tests/e2e_tests/pipelines/pipeline_templates/infer_templates.py create mode 100644 tests/e2e_tests/pipelines/pipeline_templates/input_templates.py create mode 100644 tests/e2e_tests/pipelines/pipeline_templates/ir_gen_templates.py create mode 100644 tests/e2e_tests/pipelines/pipeline_templates/model_loader_templates.py create mode 100644 tests/e2e_tests/pipelines/pipeline_templates/postproc_template.py create mode 100644 tests/e2e_tests/pipelines/pipeline_templates/preproc_templates.py create mode 100644 tests/e2e_tests/pipelines/pipeline_templates/pytorch_to_onnx_converter_template.py create mode 100644 tests/e2e_tests/pipelines/tf_hub/nightly.yml create mode 100644 tests/e2e_tests/pipelines/tf_hub/precommit.yml create mode 100644 tests/e2e_tests/pipelines/tf_hub/tf_hub.py create mode 100644 tests/e2e_tests/pipelines/tf_hub/tf_hub_case_class.py create mode 100644 tests/e2e_tests/pytest.ini create mode 100644 tests/e2e_tests/requirements.txt create mode 100644 tests/e2e_tests/reshape_test_rules.yml create mode 100644 tests/e2e_tests/test_base.py create mode 100644 tests/e2e_tests/test_config_local.yml create mode 100644 tests/e2e_tests/test_utils/__init__.py create mode 100644 tests/e2e_tests/test_utils/coverage_runner.py create mode 100644 tests/e2e_tests/test_utils/env_tools.py create mode 100644 tests/e2e_tests/test_utils/get_test_info.py create mode 100644 tests/e2e_tests/test_utils/modify_configs.py create mode 100644 tests/e2e_tests/test_utils/path_utils.py create mode 100644 tests/e2e_tests/test_utils/pytorch_loaders.py create mode 100644 tests/e2e_tests/test_utils/reshape_pipeline_executers.py create mode 100644 tests/e2e_tests/test_utils/reshape_tests_utils.py create mode 100644 tests/e2e_tests/test_utils/test_utils.py create mode 100644 tests/e2e_tests/test_utils/tf_helper.py create mode 100644 tests/e2e_tests/test_utils/tf_helper_config.yml create mode 100644 tests/e2e_tests/test_utils/tf_hub_utils.py diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 12102199edd77c..be51fa13bef991 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -5,3 +5,4 @@ add_subdirectory(layer_tests) add_subdirectory(model_hub_tests) add_subdirectory(samples_tests) +add_subdirectory(e2e_tests) diff --git a/tests/e2e_tests/CMakeLists.txt b/tests/e2e_tests/CMakeLists.txt new file mode 100644 index 00000000000000..cf84092108f86f --- /dev/null +++ b/tests/e2e_tests/CMakeLists.txt @@ -0,0 +1,9 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +cmake_minimum_required(VERSION 3.13) + +project(e2e_tests) + +install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL) diff --git a/tests/e2e_tests/README.md b/tests/e2e_tests/README.md new file mode 100644 index 00000000000000..ed85a57cd8333e --- /dev/null +++ b/tests/e2e_tests/README.md @@ -0,0 +1,50 @@ +# End-to-end Tests User Documentation +This folder contains a code to run end-to-end validation of OpenVINO on real models of different frameworks (PyTorch, TensorFlow, and ONNX) + +The documentation provides necessary information about environment setup for e2e validation run, adding new model to the validation, and instructions to launch validation. + +> The following steps assume that your current working directory is: +> `tests/e2e_tests` + +### Environment preparation: + * Install Python modules required for tests: + ```bash + pip3 install -r requirements.txt + ``` + +### Add model from TensorFlow Hub repo to end-to-end validation: +To add new test for model from TF Hub repo just add new line into pipelines/production/tf_hub/precommit.yml +This line should contain comma separated model name and its link +``` +movenet/singlepose/lightning,https://www.kaggle.com/models/google/movenet/frameworks/tensorFlow2/variations/singlepose-lightning/versions/4 +``` + +### Main entry-point + +There is one main testing entry-point which is responsible for test run - test_base.py. This script performs the +following actions: +1. Loads model from its source +2. Infers original model through framework +3. Converts original model through OVC convert model +4. Infers converted model through OpenVINO +5. Provides results of element-wise comparison of framework and OpenVINO inference + +#### Launch tests + +[test_base.py](https://github.com/openvinotoolkit/openvino/tree/master/tests/e2e_tests/test_base.py) is the main script to run end-to-end tests. +Run all end-to-end tests in `pipelines/`: +```bash +pytest test_base.py +``` +`test_base.py` options: + +- `--modules=MODULES [MODULES ...]` - Paths to tests. +- `-k TESTNAME [TESTNAME ...]`- Test names. +- `-s` - Step-by-step logging. + +Example: +```bash +pytest test_base.py -s --modules=pipelines/production/tf_hub +``` + +> For full information on pytest options, run `pytest --help` or see the [documentation](https://docs.pytest.org) diff --git a/tests/e2e_tests/__init__.py b/tests/e2e_tests/__init__.py new file mode 100644 index 00000000000000..8ba81a92b19c53 --- /dev/null +++ b/tests/e2e_tests/__init__.py @@ -0,0 +1,3 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + diff --git a/tests/e2e_tests/base_test_rules.yml b/tests/e2e_tests/base_test_rules.yml new file mode 100644 index 00000000000000..49de56d6f651fb --- /dev/null +++ b/tests/e2e_tests/base_test_rules.yml @@ -0,0 +1,475 @@ +# Test rules configuration file +# +# Controls which tests will be run by applying specified rules to all discovered +# tests and filtering out non-conforming ones +# +# Rules specification: +# +# :attr rules: specifies rules to be applied to tests. For example, (CPU, FP32) +# rule states that for CPU device, only FP32 precision is +# expected. thus, any other configurations like (CPU, FP16), (CPU, +# INT8), etc. are to be excluded from parameters setup for testing +# +# Note: any value in rules may represent a list of values: +# "device: [GPU, OTHER], precision: [FP32, FP16]", +# "model: [TF_Amazon_RL_LSTM, TF_DeepSpeech]"... +# +# :attr filter_by: specifies which parameters are not comparable and must be +# handled in a special way when applying rules. For example, +# rules for CPU must not affect other devices (GPU, MYRIAD, +# ...). Specifying "filter_by: device" means: "if device != +# CPU/GPU/..., do not apply CPU/GPU/... rules to it". Same +# logic is useful when dealing with specific models. +# +# Note: One can specify multiple filters the following way: +# "filter_by: [device, precision]" +# +[ + { + rules: [ + { device: CPU, precision: [ FP32, FP16, BF16 ] }, + { device: GPU, precision: [ FP32, FP16 ] }, + ], + filter_by: device + }, + { + rules: [ + { model: CAFFE_Dilation, device: [ CPU ] }, #- CVS-21098 + { model: Caffe2_DarkNet_53, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_DenseNet_121, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_DenseNet_161, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_DenseNet_169, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_DenseNet_201, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_DenseNet_264, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_DenseNet_201_kinetics, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_Fit_a_Line, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_InceptionV4, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_MGANet, batch: 1 }, # model has concat layer (axis=1) which has constant input with fixed shape [1, 64, 240, 416] + { model: Caffe2_MobileNet, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_MobileNet_pp, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_MobileNetV2_x0_25, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_MobileNetV2_x0_5, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_MobileNetV2_x1_0, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_MobileNetV2_x1_5, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_MobileNetV2_x2_0, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_MobileNetV3, device: [ CPU ] }, # Only CPU were requested (CVS-38834) + { model: Caffe2_Recognize_Digits_conv, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_Recognize_Digits_mlp, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNet18, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNet18_V1_opset7, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNet18_V2_opset7, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNet18_kinetics, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNet34, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNet34_V1_opset7, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNet34_V2_opset7, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNet34_kinetics, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNet34_3D_Kinetics, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNet50, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNet50_pp, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNet50_vc, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNet50_vd, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNet50_V1_opset7, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNet50_V2_opset7, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNet50_kinetics, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNet101, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNet101_kinetics, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNet101_pp, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNet101_vd, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNet101_V1_opset7, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNet101_V2_opset7, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNet101_DUC_HDC_opset7, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNet152, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNet152_pp, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNet152_vd, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNet152_V1_opset7, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNet152_V2_opset7, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNet200_vd, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNeXt50_32x4d, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNeXt50_64x4d, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNeXt50_vd_32x4d, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNeXt50_vd_64x4d, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNeXt101_32x4d, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNeXt101_32x8d_wsl, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNeXt101_32x16d_wsl, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNeXt101_32x32d_wsl, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNeXt101_64x4d, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNeXt101_vd_64x4d, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNeXt152_32x4d, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ResNeXt152_64x4d, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_SE_ResNeXt101, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_SE_ResNeXt152, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_SE_ResNeXt50, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ShuffleNetV2_x0_25, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ShuffleNetV2_x0_33, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ShuffleNetV2_x0_5, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ShuffleNetV2_x1_5, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_ShuffleNetV2_x2_0, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_VGG16, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_VGG16_opset7, batch: 1 }, # model is not reshape-able by batch + { model: Caffe2_VGG16_BN_opset7, batch: 1 }, # model is not reshape-able by batch + + { model: IR_action_recognition_0001_decoder_internal, batch: 1 }, + { model: IR_action_recognition_0001_encoder_internal, batch: 1 }, + { model: IR_driver_action_recognition_adas_0002_decoder_internal, batch: 1 }, + { model: IR_driver_action_recognition_adas_0002_encoder_internal, batch: 1 }, + { model: IR_face_detection_adas_binary_0001_internal, precision: FP32 }, + { model: IR_handwritten_score_recognition_0001_internal, batch: 1 }, + { model: IR_license_plate_recognition_barrier_0001, batch: 1 }, + { model: IR_pedestrian_detection_adas_binary_0001_internal, precision: FP32 }, + { model: IR_person_detection_action_recognition_0005_internal, batch: 1 }, + { model: IR_person_detection_action_recognition_teacher_0002_internal, batch: 1 }, + { model: IR_person_detection_raisinghand_recognition_0001_internal, batch: 1 }, + { model: IR_ResNet50_binary_0001_internal, precision: FP32 }, + { model: IR_text_recognition_0012_internal, batch: 1 }, + { model: IR_vehicle_detection_adas_binary_0001_internal, precision: FP32 }, + { model: IR_vehicle_license_plate_detection_barrier_0106_internal, batch: 1 }, + + { model: KALDI_Cnn_Tdnn_Lstm, device: CPU }, # Only CPU was requested (CVS-62030) + { model: KALDI_Cnn_Tdnn1g_Sp, device: CPU, batch: 1 }, # Only CPU was requested (CVS-82245) + { model: KALDI_Cnntdnnf, device: CPU }, # Only CPU was requested (CVS-48079) + { model: KALDI_Librispeech_Nnet2_Splice_Constdims, batch: 1 }, # (CVS-28939), also model is not reshape-able + { model: KALDI_nnet3_lstm_1m, device: CPU, precision: FP32 }, # Only CPU with FP32 was requested (CVS-54307) + { model: KALDI_Rm_Convnet, device: CPU }, # This model isn't supported on GNA (CVS-51943) + + { model: MXNET_Brain_tumor_segmentation, device: GPU, batch: 1 }, + { model: MXNET_Brain_tumor_segmentation, device: CPU }, #This model cannot be run on GPU with batch>1 (CVS-19959) + { model: MXNET_DeformablePSROIPoolingRfcn, batch: 1 }, # model output will return the same value regardless of value + { model: MXNET_Encoder_Multilayer, batch: 1 }, # Hardcoded original reshape value + { model: MXNET_RNN_Bidirectional_transducer_decoder, batch: 1 }, # Non reshape-able TI + { model: MXNET_RNN_Bidirectional_transducer_encoder, batch: 1 }, # Non reshape-able TI + { model: MXNET_RNN_Bidirectional_single_layer, batch: 1 }, # Non reshape-able TI + { model: MXNET_RNN_Transducer_multi_batch, batch: 1 }, + { model: MXNET_SSD_Vgg16_300_Voc_GluonCV, device: GPU, precision: FP32 }, # GPU do not support FP16 (CVS-87076) + { model: MXNET_SSD_Vgg16_300_Voc_GluonCV, device: CPU }, + { model: MXNET_Word_lm, batch: 1 }, + + { model: ONNX_BabbleLabs_Wavenet, batch: [ 1, 2 ], device: CPU }, + { model: ONNX_BERT_INT8, batch: [ 1, 2 ], device: CPU }, # It takes enormous time to run it on the GPU, also int8 status for that is unclear + { model: ONNX_BERT_NER_FACE_HUG, batch: 1, device: CPU }, # (CVS-51234) + { model: ONNX_BERT_BASE_CASED_SQUAD2, batch: 1 }, # model is not reshape-able by batch (CVS-102507) + { model: ONNX_Conformer_CTC_Hindi, device: [ CPU ] }, # (CVS-91910) + { model: ONNX_ConvPoolFcReLu, device: [ CPU, GNA ], batch: 1 }, # CVS-42787 + { model: ONNX_Intel_DNS, device: [ CPU ], batch: 1 }, # (CVS-51694), model is not reshape-able + { model: ONNX_LPCNet_Decoder, device: [ CPU ] }, # Only CPU target was requested (CVS-41247) + { model: ONNX_LPCNet_Encoder, device: [ CPU ] }, # Only CPU target was requested (CVS-41247) + { model: ONNX_NSNet2_GRU, device: [ CPU, GNA ], batch: 1 }, # For models with GRU operations, the only supported batch size is 1 (CVS-22369) + { model: ONNX_Runtime_DarkNet_53, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_BridgeTower, device: [ CPU ] }, #requested only for CPU (CVS-108319) + { model: ONNX_Runtime_CorelPainterNNArt, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_DCSCN, device: CPU }, # Only CPU target was requested (CVS-37078) + { model: ONNX_Runtime_DenseNet_121, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_DenseNet_161, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_DenseNet_169, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_DenseNet_201, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_DenseNet_264, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_F3NET, device: [ CPU ] }, # Only CPU target was requested (CVS-42385) + { model: ONNX_Runtime_fp16_InceptionV1, precision: FP16 }, + { model: ONNX_Runtime_fp16_ShuffleNet, precision: FP16 }, + { model: ONNX_Runtime_fp16_Tiny_Yolo_V2, precision: FP16 }, + { model: ONNX_Runtime_MNIST_convinteger, device: CPU }, #FP16 for GPU is not supported (CVS-106711) + { model: ONNX_Runtime_MNIST_convinteger, precision: FP32, device: GPU }, #FP16 for GPU is not supported (CVS-106711) + { model: ONNX_Runtime_Mobile_Former, batch: 1, device: CPU }, # model is not reshape-able by batch because of hardcoded values in Reshape node 'Reshape_118', and only CPU was requested + { model: ONNX_Runtime_MobileNet_pp, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_MobileNet_convinteger, device: [ CPU ] }, # GPU does not support FP16 (CVS-92497) + { model: ONNX_Runtime_MobileNet_convinteger, device: [ GPU ], precision: FP32 }, # GPU does not support FP16 (CVS-92497) + { model: ONNX_Runtime_MobileNetV2_x0_25, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_MobileNetV2_x0_5, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_MobileNetV2_x1_0, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_MobileNetV2_x1_5, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_MobileNetV2_x2_0, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_RCAN_rg10_rb20_f64, device: GPU, batch: 1 }, # return full GPU when XDEPS-5646 will be fixed + { model: ONNX_Runtime_RCAN_rg10_rb20_f64, device: CPU }, # return full GPU when XDEPS-5646 will be fixed + { model: ONNX_Runtime_ResNet18, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_ResNet18_V1_opset7, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_ResNet18_V2_opset7, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_ResNet34, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_ResNet34_V1_opset7, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_ResNet34_V2_opset7, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_ResNet50_pp, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_ResNet50_vc, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_ResNet50_vd, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_ResNet101_pp, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_ResNet101_vd, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_ResNet152_pp, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_ResNet152_vd, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_ResNet200_vd, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_ResNeXt50_32x4d, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_ResNeXt50_64x4d, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_ResNeXt50_vd_32x4d, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_ResNeXt50_vd_64x4d, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_ResNeXt101_32x16d_wsl, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_ResNeXt101_32x32d_wsl, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_ResNeXt101_32x4d, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_ResNeXt101_32x8d_wsl, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_ResNeXt101_64x4d, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_ResNeXt101_vd_64x4d, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_ResNeXt152_32x4d, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_ResNeXt152_64x4d, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_ssd_mobilenet_V1_coco_mlperf_opset10, device: CPU }, # revert GPU support when CVS-61600 will be fixed + { model: ONNX_Runtime_ssd_resnet34_mlperf_opset10, device: CPU }, # revert GPU support when CVS-61600 will be fixed + { model: ONNX_Runtime_ShuffleNetV2_x0_25, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_ShuffleNetV2_x0_33, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_ShuffleNetV2_x0_5, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_ShuffleNetV2_x1_5, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_ShuffleNetV2_x2_0, batch: 1 }, # model is not reshape-able by batch + { model: ONNX_Runtime_Wav2vec2, device: CPU }, # return GPU when CVS-104558 will be fixed + { model: ONNX_SplitConvPoolConcatFc, device: [ CPU, GNA ], batch: 1 }, # CVS-42787 + { model: ONNX_TwoInputsConvPoolConcatFcRelu, device: [ CPU, GNA ], batch: 1 }, # CVS-42787 + { model: ONNX_Esrgan, device: [ CPU ] }, # (CVS-102883) + { model: ONNX_V_Diffusion, batch: 1 }, # Batch reshape are not available because of constant values in node with friendly_name '/net/net.4/main/main.5/main/main.5/main/main.5/main/main.2/Reshape + + { model: Precollected_ONNX_Resnet34_BiLSTM_ucf0_85, batch: 1 }, + { model: Precollected_ResNet34_1lstm_ucf082, batch: 1 }, + { model: Precollected_ResNet34_1mkinetics_self_attn_no_norm065, batch: 1 }, + + { model: ONNX_3D_UNet, batch: 1 }, # (CVS-42580) model is not reshape-able + { model: ONNX_BERT_EMD, device: [ CPU ] }, # (CVS-48001) + { model: ONNX_CLIP, device: [ CPU ] }, # (CVS-99096) + { model: ONNX_Customized_Cascade_Rcnn, batch: 1, device: [ CPU ] }, # (CVS-51956) + { model: ONNX_Data2Vec_Audio, batch: 1, device: CPU }, # revert GPU support when CVS-104109 will be fixed + { model: ONNX_DETR_ResNet50_INT8, device: CPU }, # (CVS-55245) + { model: ONNX_DETR_ResNet50_INT8, device: GPU, precision: FP32 }, # (CVS-109561) F16 has out-of-range computed values, not covered by the GPU plugin + { model: ONNX_DLRM_rnd, device: CPU, batch: 1 }, # model supported on CPU and not reshape-able by batch + { model: ONNX_DLRM_rnd_dot, device: CPU, batch: 1 }, # model supported on CPU and not reshape-able by batch + { model: ONNX_DLRM_rnd_cat, device: CPU, batch: 1 }, # model supported on CPU and not reshape-able by batch + { model: ONNX_FasterRCNN_ResNet50_FPN, batch: 1 }, # model is not made for batch not equal to 1 + { model: ONNX_iSeebetter, device: GPU, batch: 1 }, # return full GPU when XDEPS-6238 will be fixed + { model: ONNX_iSeebetter, device: CPU }, # return full GPU when XDEPS-6238 will be fixed + { model: ONNX_KSHD_Head_Detection, device: CPU }, # model supported on CPU (CVS-30554) + { model: ONNX_Magic_Video_Super_Res_WDSR, device: GPU, precision: FP32 }, # (CVS-56198), GPU do not support FP16 (CVS-59192) + { model: ONNX_Magic_Video_Super_Res_WDSR, device: CPU }, # (CVS-56198) + { model: ONNX_MagixStyleSwap, batch: 1 }, # (CVS-82204) model doesn't support batch dimension + { model: ONNX_MagixStyleSwap_INT8, batch: 1 }, # (CVS-82204) model doesn't support batch dimension + { model: ONNX_MaskRCNN_ResNet50_FPN_with_cfg, batch: 1, device: CPU }, # this model doesn't support reshape; The model is not supported on GPU because it contains Experimental* layers (CVS-25104) + { model: ONNX_MaskRCNN_ResNet50_FPN_wo_cfg_wo_infer, batch: 1, device: CPU }, # CVS-39838 + { model: ONNX_ModNet, device: [ CPU,GPU ] }, # (CVS-51155) + { model: ONNX_OpenNMT_Decoder_English2Hindi, device: [ CPU ] }, # Only CPU was requested (CVS-52414) + { model: ONNX_OpenNMT_Decoder_Hindi2English, device: [ CPU ] }, # Only CPU was requested (CVS-52414) + { model: ONNX_OpenNMT_Encoder_English2Hindi, device: [ CPU ] }, # Only CPU was requested (CVS-52414) + { model: ONNX_OpenNMT_Encoder_Hindi2English, device: [ CPU ] }, # Only CPU was requested (CVS-52414) + { model: ONNX_OpenNMT_Generator_English2Hindi, device: [ CPU ] }, # Only CPU was requested (CVS-52414) + { model: ONNX_OpenNMT_Generator_Hindi2English, device: [ CPU ] }, # Only CPU was requested (CVS-52414) + { model: ONNX_Roberta, device: [ CPU ] }, + { model: ONNX_Roberta, precision: FP32, device: [ GPU ] }, # FP16 on GPU is not supported (CVS-111033) + { model: ONNX_SR_Kuaishou_Blur, device: [ CPU ] }, # (CVS-71146) CPU only until e2e will support dGPU + { model: ONNX_SR_Kuaishou_Blocky, device: [ CPU ] }, # (CVS-71146) CPU only until e2e will support dGPU + { model: ONNX_SR_Kuaishou_Defocusv4, device: [ CPU ] }, # (CVS-71146) CPU only until e2e will support dGPU + { model: ONNX_SR_Kuaishou_Dirtylens, batch: 1, device: [ CPU ] }, # (CVS-71146) model is not reshape-able by batch because of hardcoded values in Reshape node 'Reshape_67' + { model: ONNX_SR_Kuaishou_Noise, device: [ CPU ] }, # (CVS-71146) CPU only until e2e will support dGPU + { model: ONNX_SSD_ResNet34_New_MLPerf05, batch: 1 }, # This model is not reshapable (CVS-25049) + { model: ONNX_Stable_Diffusion_Text_Encoder, batch: 1 }, # model is not reshape-able by batch because of hardcoded values in Reshape_146 node + { model: ONNX_Stable_Diffusion_Vae, device: GPU, batch: 1, precision: FP32 }, # model is not reshape-able by batch because of hardcoded values in Reshape_42 node, return full GPU when XDEPS-6238 will be fixed + { model: ONNX_Stable_Diffusion_Vae, device: CPU, batch: 1 }, # model is not reshape-able by batch because of hardcoded values in Reshape_42 node, return full GPU when XDEPS-6238 will be fixed + { model: ONNX_Stable_Diffusion_Unet, batch: 1 }, # model is not reshape-able by batch because of Add '/down_blocks.0/resnets.0/Add' node in which comes tensors with different shapes + { model: ONNX_Tacotron2Decoder, device: [ CPU ] }, # Only CPU target was requested (CVS-40048) + { model: ONNX_Tacotron2Encoder, batch: 1, device: [ CPU ] }, # Only CPU target was requested (CVS-40048); model use case does not use batch size (CVS-58358) + { model: ONNX_Tacotron2Postnet, device: [ CPU ] }, # Only CPU target was requested (CVS-40048) + { model: ONNX_WeNet_Decoder, device: [ CPU ] }, # (CVS-62026) + { model: ONNX_WeNet_Encoder, device: [ CPU ] }, # (CVS-62026) + { model: ONNX_WhisperEncoder, batch: 1 }, # Batch and spatial reshape not available due to fully connected node with name: MatMul_3507 + { model: ONNX_WhisperDecoder, batch: 1}, # Batch and spatial reshape not available due to fully connected node with name: MatMul_3507 + { model: Detectron2_MarkRCNN, batch: 1}, # Batch is absent in shape. Spatial reshape not available due node with name: Add_6031 + { model: ONNX_YoloV5_S6_1, device: [ CPU ] }, # (CVS-102522) + { model: ONNX_YoloV5_M6_1, device: [ CPU ] }, # (CVS-102532) + { model: ONNX_Decodec_24, batch: 1 }, # Batch reshape are not available because of constant values in /Reshape_9 node + { model: ONNX_TinyBert, device: [ CPU ] }, # (CVS-48254) + { model: ONNX_CVT, batch: 1 }, # Model is not reshape-able by batch due to node opset1:: /cvt/encoder/stages.0/embedding/convolution_embeddings/Reshape + + # For PDPD models only CPU and GPU were requested (no ticket) + { model: PDPD_BERT_BASE_UNCASED_SST2, device: [ CPU ], batch: 1}, # (CVS-71981) + { model: PDPD_FastSCNN, device: [ CPU ] }, # (CVS-48738) + { model: PDPD_PPOCRv2_cls, device: [ CPU ] }, # (CVS-71300) + { model: PDPD_PPOCRv2_det, device: [ CPU ] }, # (CVS-71300) + { model: PDPD_PPOCRv2_rec, device: [ CPU ] }, # (CVS-71300) + { model: PDPD_SSD_MobileNetV3, device: [ CPU ], batch: 1 }, # (CVS-71981) + { model: PDPD_YOLOv3, device: [ CPU ] }, # (CVS-48738) + { model: PDPD_PPYOLO, device: [ CPU ] }, # (CVS-48738) + { model: PDPD_PPYOLOv2, device: [ CPU ] }, # (CVS-69465) + { model: PDPD_FastRCNN, batch: 1, device: [ CPU ] }, # revert GPU support when CVS-100016 will be fixed + + { model: PyTorch_TimmTwinsPCPVTBase, batch: 1 }, # Model is not reshape-able by batch due to node aten::reshape_81 because it has fixed shapes + { model: PyTorch_TimmTwinsPCPVTLarge, batch: 1 }, # Model is not reshape-able by batch due to node aten::reshape_81 because it has fixed shapes + { model: PyTorch_TimmTwinsPCPVTSmall, batch: 1 }, # Model is not reshape-able by batch due to node aten::reshape_81 because it has fixed shapes + { model: PyTorch_TimmTwinsSVTBase, batch: 1 }, # Model is not reshape-able by batch due to node aten::view_75 because it has fixed shapes + { model: PyTorch_TimmTwinsSVTLarge, batch: 1 }, # Model is not reshape-able by batch due to node aten::view_75 because it has fixed shapes + { model: PyTorch_TimmTwinsSVTSmall, batch: 1 }, # Model is not reshape-able by batch due to node aten::view_75 because it has fixed shapes + { model: Pytorch_FinBERT, batch: 1 }, # Model is not reshape-able by batch due to node aten::view/Reshape_65 because it has fixed shapes + { model: Pytorch_CVT, batch: 1 }, # Model is not reshape-able by batch due to node 'opset1::Reshape aten::view/Reshape'because it has fixed shapes + { model: Pytorch_BERTmini, batch: 1 }, # Model is not reshape-able by batch due to node aten::view/Reshape_126 because it has fixed shapes + { model: Pytorch_Blip, device: [ CPU ] }, # CVS-105259 + { model: Pytorch_BridgeTower, batch: 1, device: [ CPU ] }, # Model is not reshape-able by batch due to additional operation in node aten::add/Add_1435 between nodes with changed shape and constant. Model requested only for CPU CVS-108319 + { model: Pytorch_V_Diffusion, batch: 1 }, # Model is not reshape-able by batch due to node pset1::Reshape aten::group_norm/Reshape_81 because it has fixed shapes + { model: Pytorch_Bloom, batch: 1 }, # Model is not reshape-able by batch due to node opset1::Reshape aten::reshape/Reshape because it has fixed shapes + { model: Pytorch_Tabnine, batch: 1 }, # Model is not reshape-able by batch due to node aten::view/Reshape_34 because it has fixed shapes + { model: Pytorch_Gpt_J_6B, batch: 1, device: [ CPU ] }, # Model is not reshape-able by batch because node opset1::Reshape aten::view/Reshape_13360 has fixed shapes + { model: Pytorch_SegmentationAnyImgEncoder, batch: 1, device: [ CPU ] }, # Model is not reshape-able by batch because node hardcoded shape node opset1::Reshape aten::view/Reshape (aten::pad_99[0]:f32[2,70,70,768] has fixed shape. Model was requested only for CPU CVS-108279 + { model: Pytorch_SegmentationAnyMaskPredictor, batch: 1, device: [ CPU ] }, # Model is not reshape-able due to prim::ListConstruct node support only constant inputs. Model was requested only for CPU CVS-108279 + { model: Pytorch_Stable_Diffusion_2_1_Text_Encoder, device: [ CPU ]}, # (CVS-110572) + { model: Pytorch_Stable_Diffusion_2_1_Unet, device: [ CPU ], batch: 1 }, # Model is not reshape-able by batch because node aten::group_norm/Reshape_53 has fixed shapes + { model: Pytorch_Stable_Diffusion_2_1_Vae_Decoder, device: [ CPU ]}, # (CVS-110572) + { model: Pytorch_Stable_Diffusion_2_1_Vae_Encoder, device: [ CPU ], batch: 1}, # Model is not reshape-able by batch because node aten::group_norm/Reshape_14 has fixed shapes + { model: Pytorch_Stable_Diffusion_2_Inpainting_Text_Encoder, device: [ CPU ]}, # (CVS-110572) + { model: Pytorch_Stable_Diffusion_2_Inpainting_Unet, device: [ CPU ], batch: 1 }, # Model is not reshape-able by batch because node aten::group_norm/Reshape_53 has fixed shapes + { model: Pytorch_Stable_Diffusion_2_Inpainting_Vae_Decoder, device: [ CPU ]}, # (CVS-110572) + { model: Pytorch_Stable_Diffusion_2_Inpainting_Vae_Encoder, device: [ CPU ], batch: 1}, # Model is not reshape-able by batch because node aten::group_norm/Reshape_71 has fixed shapes + { model: Pytorch_StableLM, device: [ CPU ]}, # Model was requested only for CPU CVS-111394 + { model: Pytorch_Dolly_V2, batch: 1, device: [ CPU ] }, # (CVS-108396) Model is not reshape-able by batch due to node '__module.gpt_neox.layers.0.attention/aten::view/Reshape' because it has fixed shapes + { model: Pytorch_GPT3, batch: 1}, # Model is not reshape-able by batch due to node with name ''opset1::Reshape aten::view/Reshape_3041' because it has fixed shapes + { model: Pytorch_Llama_3b_v2, device: [ CPU ] }, # (CVS-106319) + { model: Pytorch_CocoSpade, batch: 1}, # Model is not reshape-able by batch because of fixed shapes in aten::copy_/Broadcast + { model: Pytorch_Detectron2_MaskRCNN, batch: 1}, # Batch is absent in shape + + { model: TF_3DGAN, batch: 1 }, # Model has constant shape [1,1,1,1,200] for node gen/Reshape + { model: TF_GoogleNet_v3, batch: 1 }, # Model is not reshape-able by batch due to node opset1::Reshape InceptionV3/Predictions/Reshape_1 + { model: TF_A3C_LSTM_GitHub, batch: 1 }, # "States are not broadcastable by batch" + { model: TF_ALBERT, batch: 1 }, # not reshape-able by batch size due to node bert/embeddings/Reshape + { model: TF_Alibaba_ShuffleSeg_138, batch: 1 }, #Model has fixed input shape [1,256,512,3] + { model: TF_Alibaba_ShuffleSeg_02, batch: 1 }, #Model has fixed input shape [1,256,512,3], CVS-19228 + { model: TF_Amazon_RL_LSTM, batch: 1 }, + { model: TF_Basic_LSTM_L, device: [ CPU, GNA ], batch: 1 }, + { model: TF_BERT, batch: 1 }, # Constant shape for layer bert/encoder/Reshape + { model: TF_BERT_BASE_UNCASED, batch: 1 }, # Constant shape for layer bert/encoder/Reshape + { model: TF_BERT_BASE_CASED, batch: 1 }, # Constant shape for layer bert/encoder/Reshape + { model: TF_BERT_LARGE_UNCASED, batch: 1 }, # Constant shape for layer bert/encoder/Reshape + { model: TF_BERT_LARGE_CASED, batch: 1 }, # Constant shape for layer bert/encoder/Reshape + { model: TF_BERT_MULTI_CASED, batch: 1 }, # Constant shape for layer bert/encoder/Reshape + { model: TF_BERT_MULTI_UNCASED, batch: 1 }, # Constant shape for layer bert/encoder/Reshape + { model: TF_BERT_CHINESE, batch: 1 }, # Constant shape for layer bert/encoder/Reshape + { model: TF_BERT_XNLI, device: [ CPU ], batch: 1 }, # Only CPU target was requested (CVS-35249), model is not reshape-able + { model: TF_BlackMagic_Model_E, device: [ CPU ] }, # revert GPU support when CVS-104715 will be fixed + { model: TF_CNN_Transformer, batch: 1, device: [ CPU ] } , # not reshape-able by batch size due to node cnn_and_rnn/transformer/layer_0/attention/self/Reshape + { model: TF_CNN_Transformer, batch: 1, device: [ GPU ], precision: FP32 }, # FP16 on GPU give inf in inf/ref results (CVS-114137) + { model: TF_CRNN, batch: 1 }, # Model is not reshapable + { model: TF_CTPN, batch: 1 }, #Model does not support batch more than 1 (CVS-19388) + { model: TF_Cyberlink_Object_Removal, batch: 1 }, # model is not reshape-able because of Convolution node "generator/model.3/conv1/ffc/convg2g/fu/conv_layer/Conv2D" in which data batch channel count will not match filter input channel count + { model: TF_Cyberlink_NST_1, batch: 1 }, # model is not reshape-able because of node Subtract_80 (CVS-106559) + { model: TF_Cyberlink_NST_2, device: [ CPU ] }, # GPU does not support FP16 (CVS-102674) + { model: TF_Cyberlink_NST_2, device: [ GPU ], precision: FP32 }, # GPU does not support FP16 (CVS-102674) + { model: TF_Custom_WD, device: CPU }, # model wasn't requested for GPU + { model: TF_Dark_Channel_Dehazing, device: CPU }, # revert GPU support when CVS-101355 will be fixed + { model: TF_DeepLabV3_MobileNet_V2, batch: 1 }, #Model has fixed input shape [1, ?, ?, 3] + { model: TF_DeepSpeech041, batch: 1 }, + { model: TF_DeepSpeech061, batch: 1 }, + { model: TF_DeepSpeech061_LowLatency2_Transform, batch: 1, device: [ CPU, GNA ] }, + { model: TF_DeepSpeech061_MakeStateful_Transform, batch: 1, device: [ CPU, GNA ] }, + { model: TF_DeepSpeech071, batch: 1 }, + { model: TF_DIEN_Alibaba, precision: FP32 }, # (CVS-32215), a minimal sub-graph strictly needs FP32 mode (CVS-52843) + { model: TF_Enhance3_Lite, batch: 1 }, # not reshape-able by batch size due to Transpose Sinking + { model: TF_EDSR3, batch: 1, precision: FP16 }, # (CVS-51157) model is not reshape-able, FP16 only + { model: TF_Faster_RCNN_nas_coco, device: CPU }, #GPU was disabled. This model can't be load on GPU device because of large model size + { model: TF_FSMN, batch: 1, device: CPU }, # not reshape-able with batch = 2, only CPU support was requested (CVS-22562) + { model: TF_FSMN_LowLatency2_Transform, batch: 1, device: CPU }, # not reshape-able with batch = 2, only CPU support was requested (CVS-22562) + { model: TF_GNMT, device: CPU }, # GPU is requested in CVS-20579 but nothing is moving there + { model: TF_Inpaint, batch: 1 }, # The model does not support batch 2. It contains a Concat operation with a Constant with a fixed batch dimension value + { model: TF_IstaNet, device: GPU, batch: 2 }, # (CVS-49595), return full GPU when XDEPS-6238 will be fixed + { model: TF_IstaNet, device: CPU }, # (CVS-49595), return full GPU when XDEPS-6238 will be fixed + { model: TF_JDCOM, device: [ CPU ] }, # (CVS-30633) + { model: TF_L0_Smoothing, device: CPU }, # revert GPU support when CVS-101355 will be fixed + { model: TF_LiteResNet50_INT8, device: CPU }, + { model: TF_LiteResNet50_INT8, device: GPU, precision: FP32 }, # FP32 support only (CVS-25776) + { model: TF_LM_1B, batch: 1 }, # model is not reshape-able + { model: TF_LM_1B_DynamicSequenceLength, batch: 1 }, # model is not reshape-ablу + { model: TF_LSTM_Multicell, batch: 1, device: [ CPU ] }, # this model doesn't support reshape, GPU plugin does not support BOOL precision + { model: TF_Microsoft_Model_A, device: [ CPU ] }, # (CVS-50555) + { model: TF_Microsoft_Model_E, device: [ CPU ] }, # (CVS-50555) + { model: TF_Multiscale_Tone_Manipulation, device: CPU }, # revert GPU support when CVS-101355 will be fixed + { model: TF_Nifty_Net, device: CPU }, # Add GPU/MYRIAD after native support of BatchToSpace/SpaceToBatch on these devices + { model: TF_Nonlocal_Dehazing, device: CPU }, # revert GPU support when CVS-101355 will be fixed + { model: TF_Pencil_Drawing, device: CPU }, # revert GPU support when CVS-101355 will be fixed + { model: TF_PixelLink, batch: 1 }, # Model is not reshapable + { model: TF_Photographic_Style, device: CPU }, # revert GPU support when CVS-101355 will be fixed + { model: TF_Relative_Total_Variation, device: CPU }, # revert GPU support when CVS-101355 will be fixed + { model: TF_ResNet_50_fp32_official, precision: FP32 }, + { model: TF_ResNet_50_fp32_v2_official, precision: FP32 }, + { model: TF_Result_Combined, device: CPU }, # revert GPU support when CVS-101355 will be fixed + { model: TF_Result_Parametrized, device: CPU }, #return GPU when CVS-107581 will be fixed + { model: TF_Retina_Net, batch: 1 }, + { model: TF_Rudin_Osher_Fatemi, device: CPU }, # revert GPU support when CVS-101355 will be fixed + { model: TF_Sample0DimSplit, batch: 1, device: CPU }, + { model: TF_Ssd_MobileNet_v1_coco_quantized_finetuned, device: CPU }, + { model: TF_Ssd_MobileNet_v1_coco_quantized_finetuned, device: GPU, precision: FP32 }, # FP32 support only (CVS-25776) + { model: TF_StyleGAN2, batch: 1 }, # model is not reshape-able because of Convolution node "Gs/_Run/Gs/G_synthesis/4x4/Conv/Conv2D" in which data batch channel count will not match filter input channel count + { model: TF_STN, batch: 1 }, # not reshape-able by batch size due to node bilinear_interpolation_2/Reshape + { model: TF_Topaz_Labs_MaskAI_SRGAN, batch: 1 }, # model isn't reshape-able + { model: TF_TopazDenoise, batch: 1 }, # (CVS-47322) + { model: TF_TCN, device: CPU }, # revert GPU support when CVS-101359 will be fixed + { model: TF_TV_L1, device: CPU }, # revert GPU support when CVS-101355 will be fixed + { model: TF_UNet_3D, device: CPU }, #It takes more than 20 minutes to run it on GPU + { model: TF_Unrolled_Basic_LSTM, device: [ CPU, GNA ], batch: 1 }, + { model: TF_VNet, batch: 1 }, + { model: TF_Wide_And_Deep, device: CPU, batch: 1 }, # model wasn't requested for GPU and doesn't support reshape + { model: TF_xj_feature_model_v2, device: [ CPU ], precision: FP32, batch: 1 }, # Only CPU and FP32 were requested (CVS-38601), not reshape-able by batch size due to Transpose Sinking + { model: TF_XLNET_LARGE_CASED, batch: 1, precision: FP32 }, # Constant shape for layer (CVS-28211), weights are clipped to infinity + { model: TF_XLNET_LARGE_SQUAD, batch: 1 }, # model is not reshape-able because of hardcoded values in model/transformer/layer_0/rel_attn/einsum_2/Reshape_2 + { model: TF_XLNET_BASE_CASED, batch: 1, precision: FP32 }, # weights are clipped to infinity; Run only on batch equal to 1 because of hardcoding (CVS-43022) + { model: TF_XLNET_IMDB, batch: 1, precision: FP32 }, # weights are clipped to infinity; Run only on batch equal to 1 because of hardcoding (CVS-43022) + + { model: TF_V2_3D_UNet, batch: 1 }, # (CVS-42580) model is not reshape-able + { model: TF_V2_Context_Encoder, batch: 1, device: CPU }, # revert GPU support when CVS-101969 will be fixed + { model: TF_V2_Context_Joint, batch: 1 }, + { model: TF_V2_CustomOCR, batch: 1, device: CPU }, # (CVS-66717) + { model: TF_V2_BERT_Multi_Cased_Static, device: [ CPU ], batch: 1 }, # (CVS-42073), not reshape-able by batch du to Transpose Sinking + { model: TF_V2_BERT_Multi_Cased_DynamicSequenceLength, device: [ CPU ], precision: FP32, batch: 1 }, # (CVS-42073) model is not reshape-able, FP32 only, not reshape-able by batch due to Transpose Sinking + { model: TF_V2_Efficient_Det, batch: 1 }, # model is not reshape-able by batch + { model: TF_V2_Faster_RCNN_ResNet50_v1_atrous_coco, batch: 1}, # (CVS-35524) + { model: TF_V2_Faster_RCNN_Inception_ResNet_v2_atrous_coco, batch: 1}, # (CVS-51980) + { model: TF_V2_Faster_RCNN_Inception_ResNet_v2_atrous_coco_No_Config, batch: 1, device: CPU }, # not reshapable by batch, return GPU when CVS-107375 will be fixed + { model: TF_V2_Mask_RCNN_ResNetv2_atrous_coco, batch: 1 }, # (CVS-51981) + { model: TF_V2_Mask_RCNN_ResNetv2_atrous_coco_No_Config, batch: 1 }, # not reshape-able by batch due to node reshape:Squeeze_4691575 + { model: TF_V2_MobileNet, batch: 1 }, # not reshape-able by batch due to node Transpose_213060 + { model: TF_V2_SSDMobileNetV1FPN, batch: 1, device: [ CPU ] }, # (CVS-46209), the TF 2.X OD API models aren't reshape-able (CVS-50264) + { model: TF_V2_SSDMobileNetV1FPN_No_Config, batch: 1 }, # not reshape-able by batch due to node reshape:Squeeze_8487548 + { model: TF_V2_SSDMobileNetV2Original, batch: 1, device: [ CPU ] }, # (CVS-46209), the TF 2.X OD API models aren't reshape-able (CVS-50264) + { model: TF_V2_SSDMobileNetV2Custom, batch: 1, device: [ CPU ] }, # (CVS-50258), the TF 2.X OD API models aren't reshape-able (CVS-50264) + { model: TF_V2_SSDMobileNetV2Custom_No_Config, batch: 1 }, # not reshapable by batch + { model: TF_V2_SSDMobileNetV2FPNLite, batch: 1, device: [ CPU ] }, # (CVS-46209), the TF 2.X OD API models aren't reshape-able (CVS-50264) + { model: TF_V2_SSDResNet50V1FPN, batch: 1, device: [ CPU ] }, # (CVS-46209), the TF 2.X OD API models aren't reshape-able (CVS-50264) + { model: TF_V2_SSDResNet101V1FPN, batch: 1, device: [ CPU ] }, # (CVS-46209), the TF 2.X OD API models aren't reshape-able (CVS-50264) + { model: TF_V2_SSDResNet152V1FPN, batch: 1, device: [ CPU ] }, # (CVS-46209), the TF 2.X OD API models aren't reshape-able (CVS-50264) + { model: TF_V2_Wide_And_Deep, device: CPU, batch: 1 }, # model wasn't requested for GPU and doesn't support reshape + + { model: TFLite_AlbertLiteBase, device: CPU, batch: 1 }, # Model is not reshape-able because of hardcoded values in reshape node + { model: TFLite_AlbertLiteBase, device: GPU, precision: FP32, batch: 1 }, # Leave only FP32 for GPU as FP16 give nan in inf results + { model: TFLite_AttentionCenter, device: CPU }, # Leave only FP32 for GPU as FP16 give nan in inf results + { model: TFLite_AttentionCenter, device: GPU, precision: FP32 }, # Leave only FP32 for GPU as FP16 give nan in inf results + { model: TFLite_FaceDetectionShortRange, device: CPU }, # Leave only FP32 for GPU as FP16 give nan in inf results + { model: TFLite_FaceDetectionShortRange, device: GPU, precision: FP32 }, # Leave only FP32 for GPU as FP16 give nan in inf results + { model: TFLite_IrisLandmark, device: CPU }, # Leave only FP32 for GPU as FP16 give nan in inf results + { model: TFLite_IrisLandmark, device: GPU, precision: FP32 }, # Leave only FP32 for GPU as FP16 give nan in inf results + { model: TFLite_MoveNet, device: CPU, batch: 1} , # batch - model is not reshape-able because of Squeeze node which squeezes by batch, device - GPU dynamism doesn't support this model (CVS-105557) + { model: TFLite_GermanMBMelGAN, device: CPU }, # GPU dynamism doesn't support this model (CVS-105553) + { model: TFLite_YamNet, device: CPU, batch: 1 }, # Model doesn't have batch dimension, exclude GPU because this model is dynamic + { model: TFLite_YamNetClassification, batch: 1 }, # Model doesn't have batch dimension + { model: TFLite_SSDLiteOD, batch: 1 }, # Model is not reshape-able because of Reshape 113 node + + { model: RNNT_GNA_Decoder, device: GNA, batch: 1 }, # (CVS-53114) + { model: RNNT_GNA_Decoder_LowLatency2, device: GNA, batch: 1 }, # (CVS-53114) + { model: RNNT_GNA_Encoder, device: GNA, batch: 1 }, # (CVS-53114) + { model: RNNT_GNA_Encoder_LowLatency2_Transform, device: GNA, batch: 1 }, # (CVS-53114) + + # These models shouldn't be run on GNA + { model: KALDI_Tedlium_Tdnn_Lstm, device: not GNA }, # (CVS-28939) + { model: KALDI_Ted_Lstm_Ld5, device: not GNA }, # (CVS-28939) + { model: KALDI_Aspire_Tdnn, device: not GNA }, # (CVS-28939)# (CVS-53114) + ], + filter_by: model + }, + { + rules: [ + { model: KALDI_Librispeech_Tdnn, device: GNA, batch: 1 }, # GNA plugin doesn't support batch 2 for models with LSTM and Convolutional layers (CVS-26359) + { model: KALDI_Rm_Cnn4a, device: GNA, batch: 1 }, + { model: KALDI_Rm_Lstm4f, device: GNA, batch: 1 }, + { model: KALDI_Rm_Nnet4a, device: GNA, batch: 1 }, + { model: KALDI_Swbd_Nnet6c_Mpe, device: GNA, batch: 1 }, + { model: KALDI_Tedlium_Dnn4, device: GNA, batch: [ 1, 2 ] }, + { model: KALDI_Tedlium_Lstm4f, device: GNA, batch: 1 }, + { model: KALDI_Tdnn, device: GNA, batch: 1 }, + { model: KALDI_Tdnn2, device: GNA, batch: 1 }, + { model: KALDI_Tdnn2_Output_Affine, device: GNA, batch: 1 }, + { model: KALDI_Wsj_Cnn4b, device: GNA, batch: 1 }, + { model: KALDI_Wsj_Dnn5b, device: GNA, batch: [ 1, 2 ] }, + { model: KALDI_Librispeech_Nnet2_Splice_Constdims, device: GNA, batch: 1 }, # (CVS-28939), also model is not reshape-able + ], + filter_by: [ device, model ] + }, +] diff --git a/tests/e2e_tests/collect_refs.py b/tests/e2e_tests/collect_refs.py new file mode 100644 index 00000000000000..640886a88936fa --- /dev/null +++ b/tests/e2e_tests/collect_refs.py @@ -0,0 +1,62 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Main entry-point to collect references for E2E tests. + +Default run: +$ pytest collect_refs.py + +Options[*]: +--modules Paths to references +--env_conf Path to environment config +--dry_run Disable reference saving + +[*] For more information see conftest.py +""" +# pylint:disable=invalid-name +import numpy as np +import logging as log +import os +from e2e_tests.common.parsers import pipeline_cfg_to_string +from e2e_tests.common.common.pipeline import Pipeline + +pytest_plugins = ('e2e_tests.common.plugins.ref_collect.conftest', ) + + +def save_reference(refs, path, use_torch_to_save): + log.info("saving reference results to {path}".format(path=path)) + os.makedirs(os.path.dirname(path), mode=0o755, exist_ok=True) + if use_torch_to_save: + import torch + torch.save(refs, path) + else: + np.savez(path, **refs) + + +def test_collect_reference(reference, dry_run): + """Parameterized reference collection. + + :param reference: reference collection instance + + :param dry_run: dry-run flag. if True, disables saving reference result to + filesystem + """ + for attr in ['pipeline', 'store_path']: + if attr not in reference: + raise ValueError( + 'obligatory attribute is missing: {attr}'.format(attr=attr)) + pipeline = Pipeline(reference['pipeline']) + log.debug("Reference Pipeline:\n{}".format(pipeline_cfg_to_string(pipeline._config))) + pipeline.run() + refs = pipeline.fetch_results() + if not dry_run: + save_reference(refs, reference['store_path'], reference.get('use_torch_to_save', False)) + # Always save to `store_path_for_ref_save` (it points to share in automatics) + if 'store_path_for_ref_save' in reference and reference['store_path'] != reference['store_path_for_ref_save']: + save_reference(refs, reference['store_path_for_ref_save'], reference.get('use_torch_to_save', False)) + else: + log.info("dry run option is used. reference results are not saved") + + + + diff --git a/tests/e2e_tests/common/__init__.py b/tests/e2e_tests/common/__init__.py new file mode 100644 index 00000000000000..923d56d04145b6 --- /dev/null +++ b/tests/e2e_tests/common/__init__.py @@ -0,0 +1,2 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 diff --git a/tests/e2e_tests/common/common/base_provider.py b/tests/e2e_tests/common/common/base_provider.py new file mode 100644 index 00000000000000..f64c2ab32b8c74 --- /dev/null +++ b/tests/e2e_tests/common/common/base_provider.py @@ -0,0 +1,64 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +class BaseProviderMeta(type): + def __new__(mcs, name, bases, attrs, **kwargs): + cls = super().__new__(mcs, name, bases, attrs) + # do not create container for abstract provider + if '_is_base_provider' in attrs: + return cls + assert issubclass(cls, BaseProvider), "Do not use metaclass directly" + cls.register(cls) + return cls + + +class BaseProvider(metaclass=BaseProviderMeta): + _is_base_provider = True + registry = {} + __action_name__ = None + + @classmethod + def register(cls, provider): + provider_name = getattr(cls, '__action_name__') + if not provider_name: + return + cls.registry[provider_name] = provider + + @classmethod + def provide(cls, provider, *args, **kwargs): + if provider not in cls.registry: + raise ValueError("Requested provider {} not registered".format(provider)) + root_provider = cls.registry[provider] + root_provider.validate() + return root_provider(*args, **kwargs) + + +class StepProviderMeta(type): + def __new__(mcs, name, bases, attrs, **kwargs): + cls = super().__new__(mcs, name, bases, attrs) + # do not create container for abstract provider + if '_is_base_provider' in attrs: + return cls + assert issubclass(cls, BaseStepProvider), "Do not use metaclass directly" + cls.register(cls) + return cls + + +class BaseStepProvider(metaclass=StepProviderMeta): + _is_base_provider = True + registry = {} + __step_name__ = None + + @classmethod + def register(cls, provider): + provider_name = getattr(cls, '__step_name__', None) + if not provider_name: + return + cls.registry[provider_name] = provider + + @classmethod + def provide(cls, provider, *args, **kwargs): + if provider not in cls.registry: + raise ValueError("Requested provider {} not registered".format(provider)) + root_provider = cls.registry[provider] + return root_provider(*args, **kwargs) diff --git a/tests/e2e_tests/common/common/common_base_class.py b/tests/e2e_tests/common/common/common_base_class.py new file mode 100644 index 00000000000000..a437cf51e529c0 --- /dev/null +++ b/tests/e2e_tests/common/common/common_base_class.py @@ -0,0 +1,210 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import os +import re +from copy import deepcopy +from pathlib import Path +from tempfile import TemporaryDirectory +from logging import getLogger + +import numpy as np +import pytest + +from e2e_tests.test_utils.path_utils import resolve_file_path +# import local modules: +from e2e_tests.test_utils.test_utils import align_output_name +from e2e_tests.common.parsers import mapping_parser as mapping +from e2e_tests.common.common.e2e_utils import get_tensor_names_dict +from e2e_tests.test_utils.env_tools import Environment + +log = getLogger(__name__) + + +def parse_mo_mapping(mo_out, model_name): + """ + Parse model optimizer mapping file given output dir and model name. + + This is the basic function that provides mapping attribute for + CommonConfig class. + + :param mo_out: model optimizer output directory + :param model_name: model name (i.e. alexnet.pb for TF, alexnet.caffemodel + for Caffe) + :return: model optimizer mapping dictionary with fw layer names as keys + and ir layer names as values + """ + model_base_name = os.path.splitext(model_name)[0] + mapping_file = os.path.join(mo_out, model_base_name + ".mapping") + return mapping(resolve_file_path(mapping_file, as_str=True)) + + +class CommonConfig: + """ + Base class for E2E test classes. Provides class-level method to align + reference and IE results. + + :attr mapping: dict-like entity that maps framework (e.g. TensorFlow) + model layers to optimized model (processed by model + optimizer) layers + :attr model: model name used to detect mapping file if not specified + with mapping argument + :attr use_mo_mapping: specifies if should use MO mapping file, one can + override the value in test subclass to control + the behavior + """ + mapping = None + use_mo_mapping = True + convert_pytorch_to_onnx = None + __pytest_marks__ = tuple([ + pytest.mark.api_enabling, + pytest.mark.components("openvino.test:e2e_tests"), + ]) + + def __new__(cls, test_id, *args, **kwargs): + """Specifies all required fields for a test instance""" + instance = super().__new__(cls) + instance.test_id = test_id + instance.required_params = {} + for param_name, param_val in kwargs.items(): + if not hasattr(instance, param_name): + setattr(instance, param_name, param_val) + # Every test instance manages it's own environment. To make tests process-safe, output directories + # are redirected to a subdirectory unique for each test. + instance.environment = Environment.env.copy() + subpath = re.sub(r'[^\w\-_\. ]', "_", test_id) # filter all symbols not supported in a file systems + tmpdir_subpath = Path(TemporaryDirectory(prefix=subpath).name).name + for env_key in ["mo_out", "pytorch_to_onnx_dump_path", "pregen_irs_path"]: + instance.environment[env_key] = str(Path(instance.environment[env_key]) / tmpdir_subpath) + return instance + + def __deepcopy__(self, memo): + cls = self.__class__ + result = cls.__new__(cls, self.test_id) + memo[id(self)] = result + for key, value in self.__dict__.items(): + setattr(result, deepcopy(key, memo), deepcopy(value, memo)) + return result + + def prepare_prerequisites(self, *args, **kwargs): + """ + Prepares prerequisites required for tests: download models, references etc. + Function also may fill instance's fields. + """ + pass + + def align_results(self, ref_res, optim_model_res, xml=None): + """ + Aligns optimized model results with reference model results. + + This is achieved by changing optimized model result keys (corresponding + to output layers) to framework model results names according to + mapping attribute. + + When use_mo_mapping is False, no alignment is performed. + + If mapping is not provided, it is deduced from model attribute. + + If mapping and model both not set, no alignment is performed. + + :param ref_res: reference model results + :param optim_model_res: + :param xml: XML file generated by MO + :return: aligned results (ref_res, optim_model_res) with same keys + """ + + log.debug(f"Aligning results") + log.debug(f"ref_res.keys() {ref_res.keys()}") + log.debug(f"optim_model_res.keys() {optim_model_res.keys()}") + if len(ref_res) == 1 and len(optim_model_res) == 1: + ref_res_vals = list(ref_res.values())[0] + ie_res_vals = list(optim_model_res.values())[0] + if (isinstance(ref_res_vals, np.ndarray) and isinstance( + ie_res_vals, np.ndarray)) and ref_res_vals.shape == ie_res_vals.shape: + ref_layer_name = next(iter(ref_res.keys())) + optim_model_res = {ref_layer_name: ie_res_vals} + ref_res = {ref_layer_name: ref_res_vals} + return ref_res, optim_model_res + + if not self.use_mo_mapping: + return ref_res, optim_model_res + + if ref_res.keys() == optim_model_res.keys(): + return ref_res, optim_model_res + + if not self.mapping: + log.debug(f"Aligning results using mapping") + pre_generated_irs = self.ie_pipeline.get('get_ir').get('pregenerated') + if pre_generated_irs: + log.info("Construct mapping attribute from pre-generated IRs") + xml_file = pre_generated_irs.get('xml') + resolved_path = resolve_file_path(xml_file, as_str=True) + self.mapping = get_tensor_names_dict(xml_ir=resolved_path) + elif not pre_generated_irs: + resolved_path = resolve_file_path(xml, as_str=True) + self.mapping = get_tensor_names_dict(xml_ir=resolved_path) + else: + error = f"{self.__class__.__name__} should use 'model' or 'model_path' attribute to define model" + raise Exception(error) + + missed_ir_layer_names = [] + missed_fw_layer_names = [] + not_contain_layers_in_mapping_err_msg = '' + not_found_layers_in_inference_err_msg = '' + for fw_layer_name in ref_res.keys(): + if fw_layer_name not in optim_model_res.keys(): + aligned_name = align_output_name(fw_layer_name, optim_model_res.keys()) + ir_layer_name = self.mapping.get(fw_layer_name, None) + + # WA for CVS-94674 + if isinstance(ir_layer_name, list): + for name in ir_layer_name: + for ov_name in optim_model_res.keys(): + if name == ov_name: + ir_layer_name = ov_name + break + if isinstance(ir_layer_name, list): + raise Exception(f"Output tensor names in references and in ov model are different\nRef names: " + f"{ref_res.keys()}\nOV names: {optim_model_res.keys()}") + + if not ir_layer_name and not aligned_name: + missed_fw_layer_names.append(fw_layer_name) + continue + if aligned_name: + optim_model_res[fw_layer_name] = optim_model_res.pop(aligned_name) + continue + if ir_layer_name not in optim_model_res: + missed_ir_layer_names.append(ir_layer_name) + continue + optim_model_res[fw_layer_name] = optim_model_res.pop(ir_layer_name) + + if missed_fw_layer_names: + not_contain_layers_in_mapping_err_msg = 'mapping file does not contain {fw_layer}. Mapping: {mapping}'\ + .format(fw_layer=missed_fw_layer_names, mapping=self.mapping) + if missed_ir_layer_names: + not_found_layers_in_inference_err_msg = 'found IR layer {ir_layer} is not found in inference result. '\ + 'available layers: {avail_layers}. Mapping: {mapping}' \ + .format(ir_layer=missed_ir_layer_names, + avail_layers=optim_model_res.keys(), + mapping=self.mapping) + if not_contain_layers_in_mapping_err_msg or not_found_layers_in_inference_err_msg: + raise ValueError('{}\n{}'.format(not_contain_layers_in_mapping_err_msg, + not_found_layers_in_inference_err_msg)) + return ref_res, optim_model_res + + def _add_defect(self, name, condition, params, test_name=None): + self.__pytest_marks__ += tuple([ + pytest.mark.bugs( + name, + condition, + params, + test_name + )] + ) + + def _set_test_group(self, name, condition=True, params=None, test_name=None): + mark = pytest.mark.test_group(name, condition, params, test_name) + + # Note: it is possible that other test groups are already in __pytest_marks__, + # so we wish to resolve inserted mark prior any existing test_group marks. + self.__pytest_marks__ = (mark, ) + self.__pytest_marks__ # add mark as first element in tuple. diff --git a/tests/e2e_tests/common/common/dummy_comparator.py b/tests/e2e_tests/common/common/dummy_comparator.py new file mode 100644 index 00000000000000..142ef1c7ff8e22 --- /dev/null +++ b/tests/e2e_tests/common/common/dummy_comparator.py @@ -0,0 +1,34 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from collections import OrderedDict + +from e2e_tests.common.common.common_base_class import CommonConfig +from e2e_tests.pipelines.pipeline_templates.comparators_template import dummy_comparators +from e2e_tests.pipelines.pipeline_templates.infer_templates import common_infer_step +from e2e_tests.pipelines.pipeline_templates.input_templates import read_npz_input +from e2e_tests.pipelines.pipeline_templates.ir_gen_templates import common_ir_generation +from e2e_tests.pipelines.pipeline_templates.preproc_templates import assemble_preproc +from e2e_tests.test_utils.path_utils import prepend_with_env_path, resolve_file_path +from e2e_tests.common.pytest_utils import mark + + +class IE_Infer_Only_Base(CommonConfig): + input_file = resolve_file_path("test_data/inputs/caffe/classification_imagenet.npz") + additional_mo_args = {} + + align_results = None + + def __init__(self, batch, device, precision, **kwargs): + self.__pytest_marks__ += tuple([mark("no_comparison", is_simple_mark=True)]) + model_path = prepend_with_env_path(self.model_env_key, self.model) + self.ref_pipeline = {} + self.ie_pipeline = OrderedDict([ + read_npz_input(path=self.input_file), + assemble_preproc(h=self.h, w=self.w, batch=batch, rename_inputs=[("data", self.input_name)], + permute_order=(2, 0, 1)), + common_ir_generation(mo_out=self.environment["mo_out"], model=model_path, precision=precision, + **self.additional_mo_args), + common_infer_step(device=device, batch=batch, **kwargs) + ]) + self.comparators = dummy_comparators() diff --git a/tests/e2e_tests/common/common/e2e_utils.py b/tests/e2e_tests/common/common/e2e_utils.py new file mode 100644 index 00000000000000..9555526ded5a30 --- /dev/null +++ b/tests/e2e_tests/common/common/e2e_utils.py @@ -0,0 +1,76 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from openvino.runtime import Core, Model +import torch +from typing import Any +import logging + +log = logging.getLogger(__name__) + + +def collect_tensor_names(instance: Model, tensor_type_name: str, out: dict) -> dict: + """ + @param instance: Read OpenVino model + @param tensor_type_name: Type of tensors + @param out: Dictionary for tensor names + @return: Dictionary with collected tensor names + """ + tensor_dicts = getattr(instance, tensor_type_name, None) + assert tensor_dicts, f"Wrong tensor type name is used: {tensor_type_name}" + for tensor in tensor_dicts: + tensor_names = getattr(tensor, 'names', None) + assert tensor_names, f"Tensor {tensor_type_name} must have 'names' field" + for tensor_name in tensor_names: + out[tensor_name] = tensor_name + return out + + +def get_tensor_names_dict(xml_ir: Any) -> dict: + """ + @param xml_ir: Path to xml part of IR + @return: output dictionary with collected tensor names + """ + log.debug(f"IR xml path: {xml_ir}") + + core = Core() + ov_model = core.read_model(model=xml_ir) + log.debug(f"Read OpenVino model: {ov_model}") + + out_dict = collect_tensor_names(ov_model, 'inputs', {}) + out_dict = collect_tensor_names(ov_model, 'outputs', out_dict) + log.debug(f"Output dictionary with collected tensor names : {out_dict}") + return out_dict + + +def mo_additional_args_static_dict(descriptor: dict, tensor_type) -> dict: + """ + Convert input descriptor to MO additional static arguments dictionary like + {"input": string with inputs name, "input_shape": string with inputs shape} + @param descriptor: input descriptor as dict + @param tensor_type: type of output tensors + @return: MO additional arguments as dict + """ + output_dict = {"example_input": []} + for key in descriptor.keys(): + shape = descriptor[key].get('default_shape') + output_dict["example_input"].append(torch.ones(shape, dtype=tensor_type)) + return output_dict + + +def mo_additional_args_static_str(input_descriptor: dict, port: Any = None, precision: int = 32) -> dict: + """ + Convert input descriptor to MO additional static arguments with dict like + {"input": inputs string with precision and shape} + @param input_descriptor: input descriptor as dict + @param precision: precision + @param port: port if needed + @return: MO additional arguments as dict + """ + temp = "" + precision = "{" + f"i{precision}" + "}" + port = port if port else "" + for k in input_descriptor.keys(): + temp += f"{k}{port}{precision}{str(input_descriptor[k]['default_shape']).replace(' ', '')}," + return {"input": input[:-1]} + diff --git a/tests/e2e_tests/common/common/pipeline.py b/tests/e2e_tests/common/common/pipeline.py new file mode 100644 index 00000000000000..66056c15e97e9d --- /dev/null +++ b/tests/e2e_tests/common/common/pipeline.py @@ -0,0 +1,64 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import e2e_tests.common.readers +import e2e_tests.common.preprocessors +import e2e_tests.common.preprocessors_tf_hub +import e2e_tests.common.ir_provider +import e2e_tests.common.infer +import e2e_tests.common.postprocessors +import e2e_tests.common.ref_collector +import e2e_tests.common.model_loader +from e2e_tests.common.common.base_provider import BaseStepProvider +from types import SimpleNamespace + + +class PassThroughData(dict): + """ + Syntactic sugar around standard dictionary class. + Encapsulates error handling while working with passthrough_data in StepProvider classes + """ + def strict_get(self, key, step): + assert key in self, \ + "Step `{}` requires `{}` key to be defined by previous steps".format(step.__step_name__, key) + return self.get(key) + + +class Pipeline: + + def __init__(self, config, passthrough_data=None): + self._config = config + self.steps = [] + for name, params in config.items(): + self.steps.append(BaseStepProvider.provide(name, params)) + self.details = SimpleNamespace(xml=None, mo_log=None) + # passthrough_data delivers necessary data from / to steps including first step + # it doesn't have any restriction on steps being consecutive to pass the data + # steps are allowed to read and write to passthrough_data + self.passthrough_data = PassThroughData() if passthrough_data is None else PassThroughData(passthrough_data) + + def run(self): + try: + for i, step in enumerate(self.steps): + self.passthrough_data = step.execute(self.passthrough_data) + finally: + # Handle exception and fill `Pipeline_obj.details` to provide actual information for a caller + self.details.xml = self.passthrough_data.get('xml', None) + self.details.mo_log = self.passthrough_data.get('mo_log', None) + + def fetch_results(self): + if len(self.steps) == 0: + # raise ValueError("Impossible to fetch results from an empty pipeline") + return None + return self.passthrough_data.get('output', None) + + def fetch_test_info(self): + if len(self.steps) == 0: + return None + test_info = {} + for step in self.steps: + info_from_step = getattr(step, "test_info", {}) + assert len(set(test_info.keys()).intersection(info_from_step.keys())) == 0,\ + 'Some keys have been overwritten: {}'.format(set(test_info.keys()).intersection(info_from_step.keys())) + test_info.update(info_from_step) + return test_info diff --git a/tests/e2e_tests/common/comparator/__init__.py b/tests/e2e_tests/common/comparator/__init__.py new file mode 100644 index 00000000000000..289267eda225c0 --- /dev/null +++ b/tests/e2e_tests/common/comparator/__init__.py @@ -0,0 +1,11 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from . import classification +from . import dummy +from . import eltwise +from . import object_detection +from . import ocr +from . import semantic_segmentation +from . import ssim +from . import ssim_4d diff --git a/tests/e2e_tests/common/comparator/classification.py b/tests/e2e_tests/common/comparator/classification.py new file mode 100644 index 00000000000000..e8287efe3fc593 --- /dev/null +++ b/tests/e2e_tests/common/comparator/classification.py @@ -0,0 +1,85 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Classification results comparator. + +Compares reference and IE models results for top-N classes (usually, top-1 or +top-5). + +Basic result example: list of 1000 class probabilities for ImageNet +classification dataset. +""" +import logging as log +import sys + +from e2e_tests.common.table_utils import make_table +from .provider import ClassProvider +from .threshold_utils import get_default_thresholds + + +class ClassificationComparator(ClassProvider): + __action_name__ = "classification" + log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) + + def __init__(self, config, infer_result, reference): + self._config = config + self.ntop = config["ntop"] + default_thresholds = get_default_thresholds(config.get("precision", "FP32"), config.get("device", "CPU")) + self.a_eps = config.get("a_eps") if config.get("a_eps") else default_thresholds[0] + self.r_eps = config.get("r_eps") if config.get("r_eps") else default_thresholds[1] + self.infer_result = infer_result + self.reference = reference + self.ignore_results = config.get("ignore_results", False) + self.target_layers = config.get("target_layers") if config.get("target_layers") else self.infer_result.keys() + + def compare(self): + log.info( + "Running Classification comparator with following parameters:\n" + "\t\t Number compared top classes: {} \n" + "\t\t Absolute difference threshold: {}\n" + "\t\t Relative difference threshold: {}".format( + self.ntop, self.a_eps, self.r_eps)) + + table_header = [ + "Class id", "Reference prob", "Infer prob", "Abs diff", "Rel diff", + "Passed" + ] + status = [] + + assert sorted(self.infer_result.keys()) == sorted(self.reference.keys()), \ + "Output layers for comparison doesn't match.\n Output layers in infer results: {}\n" \ + "Output layers in reference: {}".format(sorted(self.infer_result.keys()), sorted(self.reference.keys())) + + layers = set(self.infer_result.keys()).intersection(self.target_layers) + assert layers, \ + "No layers for comparison specified for comparator '{}', target_layers={}, infer_results={}".format( + str(self.__action_name__), self.target_layers, self.infer_result.keys()) + + for layer in layers: + data = self.infer_result[layer] + for b in range(len(data)): + table_rows = [] + log.info("Comparing results for layer '{}' and batch {}".format( + layer, b + 1)) + infer = data[b] + ref = self.reference[layer][b] + ntop_classes_ref = list( + self.reference[layer][b].keys())[:self.ntop] + for class_id in ntop_classes_ref: + abs_diff = abs(infer[class_id] - ref[class_id]) + rel_diff = 0 if max(infer[class_id], + ref[class_id]) == 0 else abs_diff / max( + infer[class_id], ref[class_id]) + passed = (abs_diff < self.a_eps) or (rel_diff < self.r_eps) + status.append(passed) + table_rows.append([ + class_id, ref[class_id], infer[class_id], abs_diff, + rel_diff, passed + ]) + log.info("Top {} results comparison:\n{}".format( + self.ntop, make_table(table_rows, table_header))) + if self.ignore_results: + self.status = True + else: + self.status = all(status) + return self.status diff --git a/tests/e2e_tests/common/comparator/container.py b/tests/e2e_tests/common/comparator/container.py new file mode 100644 index 00000000000000..c2db37ccdcf896 --- /dev/null +++ b/tests/e2e_tests/common/comparator/container.py @@ -0,0 +1,91 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +""" Postprocessors and comparators container. + +Applies specified postprocessors to reference and IE results. +Applies specified comparators to reference and IE results. + +Typical flow: +1. Initialize with `config` that specifies comparators to use. +2. Apply postprocessors to inferred data. +3. Apply comparators to postprocessed data and collect comparisons results. +4. Report results. +""" +import logging as log +import sys +from collections import OrderedDict + +from e2e_tests.common.common.pipeline import PassThroughData +from e2e_tests.common.postprocessors.provider import StepProvider +from .provider import ClassProvider + + +class ComparatorsContainer: + log.basicConfig( + format="[ %(levelname)s ] %(message)s", + level=log.INFO, + stream=sys.stdout) + + def __init__(self, config, infer_result, reference, result_aligner=None, xml=None): + self._config = config + if result_aligner: + if type(reference) is list: + reference = [cur_reference for cur_reference, cur_infer_result in + map(result_aligner, reference, infer_result, xml)] + infer_result = [cur_infer_result for cur_reference, cur_infer_result in + map(result_aligner, reference, infer_result, xml)] + else: + reference, infer_result = result_aligner(reference, infer_result, xml) + self.comparators = OrderedDict() + for name, comparator in config.items(): + self.comparators[name] = ClassProvider.provide( + name, + config=comparator, + infer_result=infer_result, + reference=reference) + self._set_postprocessors() + + def apply_postprocessors(self): + for _, comparator in self.comparators.items(): + if comparator.postprocessors is not None: + infer_data = PassThroughData({'output': comparator.infer_result}) + infer_data = comparator.postprocessors.execute(infer_data) + comparator.infer_result = infer_data['output'] + + reference_data = PassThroughData({'output': comparator.reference}) + reference_data = comparator.postprocessors.execute(reference_data) + comparator.reference = reference_data['output'] + + def apply_all(self): + for _, comparator in self.comparators.items(): + comparator.compare() + + def report_statuses(self): + statuses = [] + for name, comparator in self.comparators.items(): + if getattr(comparator, "ignore_results", False): + log.info("Results comparison in comparator '{}' ignored!". + format(name)) + continue + if comparator.status: + log.info("Results comparison in comparator '{}' passed!".format( + name)) + else: + log.error("Results comparison in comparator '{}' failed!". + format(name)) + statuses.append(comparator.status) + if len(statuses) == 0: + log.warning( + "Statuses of all comparators are ignored! Test will be failed") + return False + else: + return all(statuses) + + def _set_postprocessors(self): + for _, comparator in self.comparators.items(): + if "postprocessors" in comparator._config: + comparator_postproc = comparator._config["postprocessors"] + comparator.postprocessors = StepProvider(comparator_postproc) + else: + comparator.postprocessors = None diff --git a/tests/e2e_tests/common/comparator/dummy.py b/tests/e2e_tests/common/comparator/dummy.py new file mode 100644 index 00000000000000..d3685a5c1e7ac7 --- /dev/null +++ b/tests/e2e_tests/common/comparator/dummy.py @@ -0,0 +1,38 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import logging as log + +from e2e_tests.common.table_utils import make_table +from .provider import ClassProvider +import sys + + +class Dummy(ClassProvider): + __action_name__ = "dummy" + log.basicConfig( + format="[ %(levelname)s ] %(message)s", + level=log.INFO, + stream=sys.stdout) + + def __init__(self, config, infer_result, reference): + self._config = {} + self.infer_result = infer_result + self.reference = reference + + def compare(self): + log.info("Running Dummy comparator. No comparison performed") + + table_header = ["Layer Name", "Shape", "Data Range"] + + if self.infer_result: + table_rows = [] + for layer, data in self.infer_result.items(): + table_rows.append([layer, str(data.shape), "[{:.3f}, {:.3f}]".format(data.min(), data.max())]) + log.info("Inference Engine tensors statistic:\n{}".format(make_table(table_rows, table_header))) + if self.reference: + table_rows = [] + for layer, data in self.reference.items(): + table_rows.append([layer, str(data.shape), "[{:.3f}, {:.3f}]".format(data.min(), data.max())]) + log.info("Reference tensors statistic:\n{}".format(make_table(table_rows, table_header))) + self.status = True diff --git a/tests/e2e_tests/common/comparator/eltwise.py b/tests/e2e_tests/common/comparator/eltwise.py new file mode 100644 index 00000000000000..f271052109746c --- /dev/null +++ b/tests/e2e_tests/common/comparator/eltwise.py @@ -0,0 +1,130 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import logging as log +import re +import sys + +import numpy as np + +from e2e_tests.common.table_utils import make_table +from .provider import ClassProvider +from .threshold_utils import get_default_thresholds + + +class EltwiseComparator(ClassProvider): + __action_name__ = "eltwise" + log.basicConfig( + format="[ %(levelname)s ] %(message)s", + level=log.INFO, + stream=sys.stdout) + + def __init__(self, config, infer_result, reference): + default_thresholds = get_default_thresholds( + config.get("precision", "FP32"), config.get("device", "CPU")) + self.a_eps = config.get("a_eps") if config.get("a_eps") else default_thresholds[0] + self.r_eps = config.get("r_eps") if config.get("r_eps") else default_thresholds[1] + self.mean_r_eps = config.get("mean_r_eps") if config.get("mean_r_eps") else default_thresholds[2] + self._config = config + self.infer_result = infer_result + self.reference = reference + self.ignore_results = config.get("ignore_results", False) + self.target_layers = config.get("target_layers") if config.get("target_layers") else self.infer_result.keys() + + def compare(self): + log.info("Running Element-Wise comparator with following parameters:\n" + "\t\t Absolute difference threshold: {}\n" + "\t\t Relative difference threshold: {}".format(self.a_eps, self.r_eps)) + + statuses = [] + table_header = [ + "Layer name", "Shape", "Data type", "Infer range", "Reference range", "Max Abs diff", + "Max Abs diff ind", "Max Rel diff", "Max Rel diff ind", "Mean Rel diff", "Passed" + ] + table_rows = [] + + if sorted(self.infer_result.keys()) != sorted(self.reference.keys()): + log.warning("Output layers for comparison doesn't match.\n Output layers in infer results: {}\n" + "Output layers in reference: {}".format(sorted(self.infer_result.keys()), + sorted(self.reference.keys()))) + + layers = set(self.infer_result.keys()).intersection(self.target_layers) + assert layers, \ + "No layers for comparison specified for comparator '{}', target_layers={}, infer_results={}".format( + str(self.__action_name__), self.target_layers, self.infer_result.keys()) + for layer in layers: + data = self.infer_result[layer] + ref = self.reference[layer] + if data.shape != ref.shape: + log.error("Shape of IE output {} isn't equal with shape of FW output {} for layer '{}'. " + "Run Dummy comparator to get statistics.".format(data.shape, ref.shape, layer)) + from e2e_tests.common.comparator.dummy import Dummy + Dummy({}, infer_result={layer: data}, reference={layer: ref}).compare() + statuses.append(False) + if not np.any(data) and not np.any(ref): + log.info("Array of IE and FW output {} is zero".format(layer)) + continue + else: + # In case when there are inf/nan in data + if (np.isnan(data)==np.isnan(ref)).all() and (np.isinf(data)==np.isinf(ref)).all(): + log.info("All output values were 'nan'/'inf' have converted to numbers") + data = np.nan_to_num(data) + ref = np.nan_to_num(ref) + # In case when there are boolean datatype + if (data.dtype == np.bool_) and (ref.dtype == np.bool_): + data = data.astype('float32') + ref = ref.astype('float32') + # Compare output tensors + abs_diff = np.absolute(data - ref) + # In case when there are zeros in data and/or ref tensors, rel error is undefined, + # ignore corresponding 'invalid value in true_divide' warning + with np.errstate(invalid='ignore'): + rel_diff = np.array(abs_diff / np.maximum(np.absolute(data), np.absolute(ref))) + status = ((abs_diff < self.a_eps) | (rel_diff < self.r_eps)).all() + # Compare types of output tensors + data_type = re.sub(r'\d*', '', data.dtype.name) + ref_type = re.sub(r'\d*', '', ref.dtype.name) + common_type = data_type if data_type == ref_type else "mixed" + if common_type == "mixed": + log.error("Type of IE output {} isn't equal with type of FW output {} for layer '{}'" + .format(data_type, ref_type, layer)) + status = False + + statuses.append(status) + # Collect statistics + infer_max = np.amax(data) + infer_min = np.amin(data) + infer_range_str = "[{:.3f}, {:.3f}]".format(infer_min, infer_max) + ref_max = np.amax(ref) + ref_min = np.amin(ref) + ref_range_str = "[{:.3f}, {:.3f}]".format(ref_min, ref_max) + max_abs_diff = np.amax(abs_diff) + max_abs_diff_ind = np.unravel_index( + np.argmax(abs_diff), abs_diff.shape) + max_rel_diff = np.amax(rel_diff) + max_rel_diff_ind = np.unravel_index( + np.argmax(rel_diff), rel_diff.shape) + + # In case when there are zeros in data and/or ref tensors, rel error is undefined, + # ignore corresponding 'invalid value in true_divide' warning + with np.errstate(invalid='ignore'): + mean_rel_diff = np.mean(rel_diff) + if self.mean_r_eps is not None: + status = status and (mean_rel_diff < self.mean_r_eps).all() + statuses.append(status) + + table_rows.append([ + layer, data.shape, common_type, infer_range_str, ref_range_str, max_abs_diff, + max_abs_diff_ind, max_rel_diff, max_rel_diff_ind, mean_rel_diff, status + ]) + if np.isnan(rel_diff).all(): + log.warning("Output data for layer {} consists only of zeros in both " + "inference and reference results.".format(layer)) + + log.info("Element-Wise comparison statistic:\n{}".format(make_table(table_rows, table_header))) + + if self.ignore_results: + self.status = True + else: + self.status = all(statuses) + return self.status diff --git a/tests/e2e_tests/common/comparator/object_detection.py b/tests/e2e_tests/common/comparator/object_detection.py new file mode 100644 index 00000000000000..c530338819ae05 --- /dev/null +++ b/tests/e2e_tests/common/comparator/object_detection.py @@ -0,0 +1,266 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import copy +import logging as log +import sys +from collections import OrderedDict + +import numpy as np + +from .threshold_utils import get_default_thresholds, get_default_iou_threshold +from e2e_tests.common.table_utils import make_table +from .provider import ClassProvider + + +class ObjectDetectionComparator(ClassProvider): + __action_name__ = "object_detection" + log.basicConfig( + format="[ %(levelname)s ] %(message)s", + level=log.INFO, + stream=sys.stdout) + + def __init__(self, config, infer_result, reference): + self._config = config + default_thresholds = get_default_thresholds(config.get("precision", "FP32"), config.get("device", "CPU")) + self.infer_result = infer_result + self.reference = reference + self.a_eps = config.get("a_eps") if config.get("a_eps") else default_thresholds[0] + self.r_eps = config.get("r_eps") if config.get("r_eps") else default_thresholds[1] + self.p_thr = config["p_thr"] + self.iou_thr = config.get("iou_thr") if config.get("iou_thr") else get_default_iou_threshold( + config.get("precision", "FP32"), config.get("device", "CPU")) + self.ignore_results = config.get('ignore_results', False) + self.mean_iou_only = config.get("mean_only_iou", False) + self.target_layers = config.get("target_layers") if config.get("target_layers") else self.infer_result.keys() + + def intersection_over_union(self, pred_coord, ref_coord): + """ + :param pred_coord: dict with coordinates of one bound box from predicted ones + :param ref_coord: dict with coordinates of one bound box from reference set + :return: float value of IOU metric for one pair of bound boxes + """ + if (pred_coord['xmax'] < ref_coord['xmin']) or ( + ref_coord['xmax'] < pred_coord['xmin']) or ( + ref_coord['ymax'] < pred_coord['ymin']) or ( + pred_coord['ymax'] < ref_coord['ymin']): + iou = 0 + else: + intersection_coord = {} + intersection_coord['xmin'] = max(pred_coord['xmin'], + ref_coord['xmin']) + intersection_coord['xmax'] = min(pred_coord['xmax'], + ref_coord['xmax']) + intersection_coord['ymin'] = max(pred_coord['ymin'], + ref_coord['ymin']) + intersection_coord['ymax'] = min(pred_coord['ymax'], + ref_coord['ymax']) + intersection_square = (intersection_coord['xmax'] - intersection_coord['xmin']) * \ + (intersection_coord['ymax'] - intersection_coord['ymin']) + union_square = (pred_coord['xmax'] - pred_coord['xmin']) * (pred_coord['ymax'] - pred_coord['ymin']) + \ + (ref_coord['xmax'] - ref_coord['xmin']) * ( + ref_coord['ymax'] - ref_coord['ymin']) - intersection_square + if union_square == 0: + iou = 1 + else: + iou = intersection_square / union_square + return iou if not np.isnan(iou) else 0 + + def prob_threshold_filter(self, threshold, data): + """ + Filters bound boxes by probability + :param threshold: probability threshold + :param data: reference or prediction data as it comes + :return:filtered version of data, number of deleted bound boxes + """ + deleted_bound_boxes = 0 + filtered_data = {} + for layer in data.keys(): + if layer in self.target_layers: + filtered_data[layer] = [] + for batch_num in range(len(data[layer])): + batch_filtered = [bbox for bbox in data[layer][batch_num] if bbox['prob'] >= threshold] + deleted_bound_boxes += len(data[layer][batch_num]) - len(batch_filtered) + if batch_filtered: + filtered_data[layer].append(batch_filtered) + return filtered_data, deleted_bound_boxes + + def prob_dif_threshold(self, pairs): + """ + True if absolute or relative threshold is passed + :param pairs: list of dicts with pairs of bound boxes + :return: same list of dicts with pairs with 'prob_status' value added + """ + flag = True # False if at least one pair has False status + for i in range(len(pairs)): + if pairs[i]['abs_diff'] < self.a_eps or pairs[i]['rel_diff'] < self.r_eps: + pairs[i]['prob_status'] = True + else: + pairs[i]['prob_status'] = False + flag = False + return pairs, flag + + def iou_threshold(self, pairs): + """ + True if IOU threshold is passed + :param pairs: list of dicts with pairs of bound boxes + :return: same list of dicts with pairs with 'iou_status' value added + """ + flag = True # False if at least one pair has False status + for i in range(len(pairs)): + if pairs[i]['iou'] > self.iou_thr: + pairs[i]['iou_status'] = True + else: + pairs[i]['iou_status'] = False + flag = False + return pairs, flag + + def find_matches(self, prediction, reference): + """ + matrix with IOU values is constructed for every class in every batch + (rows -- reference bound boxes, columns -- predicted bound boxes) + pairs of bound boxes from reference and prediction sets are chosen by taking + the maximum value from this matrix until all possible ones are found + :param prediction: filtered prediction data + :param reference: filtered reference data + :return: overall status + """ + status = [] + layers = set(prediction.keys()).intersection(self.target_layers) + assert layers, "No layers for comparison specified for comparator '{}'".format(str(self.__action_name__)) + for layer in layers: + for batch_num in range(len(prediction[layer])): + force_fail = False + log.info("Comparing results for layer '{}' and batch {}".format(layer, batch_num)) + matrix = {} + ref_detections = reference[layer][batch_num] + pred_detections = prediction[layer][batch_num] + detected_classes = set([bbox['class'] for bbox in ref_detections]) + + # Number of detections check + if len(ref_detections) != len(pred_detections): + log.error( + "Number of detected objects is different in batch {} for layer '{}' (reference: {}, inference: {})".format( + batch_num, layer, len(ref_detections), len(pred_detections))) + force_fail = True + else: + if len(ref_detections) == 0: + log.error( + "Reference doesn't contain detections in batch {} for layer '{}'".format(batch_num, layer)) + force_fail = True + + if len(pred_detections) == 0: + log.error( + "Prediction doesn't contain detections in batch {} for layer '{}'".format(batch_num, layer)) + force_fail = True + if len(ref_detections) == 0 and len(pred_detections) == 0: + force_fail = False + log.error("Both reference and prediction results doesn't contain " + "detections in batch {} for layer '{}'. Test will not be force failed".format( + batch_num, layer)) + if detected_classes != set([bbox['class'] for bbox in pred_detections]): + log.error( + "Classes of detected objects are different in batch {} for layer '{}'".format(batch_num, layer)) + force_fail = True + + if force_fail: + status.append(False) + continue + + # Computing IoU for objects with equal class, IoU for objects with diff class == 0 + for class_num in detected_classes: + matrix[class_num] = np.zeros((len(ref_detections), len(pred_detections))) + for i, ref_bbox in enumerate(ref_detections): + for j, pred_bbox in enumerate(pred_detections): + if ref_bbox['class'] == pred_bbox['class']: + matrix[ref_bbox['class']][i][j] = self.intersection_over_union(ref_bbox, pred_bbox) + + required_pairs_len = 0 + pairs = [] + no_detections = False + for class_num in detected_classes: + if np.max(matrix[class_num]) == 0: + log.warning( + "There is no pair of detections which has IOU > 0 for class {}".format(class_num)) + no_detections = True + else: + required_pairs_len += len([1 for bbox in ref_detections if bbox['class'] == class_num]) + while len(pairs) != required_pairs_len: + # Search pair of detected objects with max IoU + i, j = np.unravel_index(np.argmax(matrix[class_num], axis=None), matrix[class_num].shape) + ref_bbox = ref_detections[i] + pred_bbox = pred_detections[j] + pairs.append( + OrderedDict( + [('class_num', class_num), + ('ref_prob', ref_bbox['prob']), + ('pred_prob', pred_bbox['prob']), + ('iou', np.amax(matrix[class_num])), + ('abs_diff', abs(ref_bbox['prob'] - pred_bbox['prob'])), + ('rel_diff', + abs(ref_bbox['prob'] - pred_bbox['prob']) / max(ref_bbox['prob'], + pred_bbox['prob'])), + ('ref_coord', + ((round(ref_bbox['xmin'], 3), round(ref_bbox['ymin'], 3)), + (round(ref_bbox['xmax'], 3), round(ref_bbox['ymax'], 3)) + ) + ), + ('pred_coord', + ((round(pred_bbox['xmin'], 3), round(pred_bbox['ymin'], 3)), + (round(pred_bbox['xmax'], 3), round(pred_bbox['ymax'], 3)))) + ]) + ) + # Fill matrix with zeroes for found objects + matrix[class_num][i] = np.zeros(matrix[class_num].shape[1]) + matrix[class_num][:, j] = np.zeros(matrix[class_num].shape[0]) + + if pairs: + mean_iou = np.mean([pair['iou'] for pair in pairs]) + pairs, flag_prob = self.prob_dif_threshold(pairs) + if not self.mean_iou_only: + pairs, flag_iou = self.iou_threshold(pairs) + table_rows = [[pair[key] for key in pair.keys()] for pair in pairs] + log.info('\n' + make_table(table_rows, pairs[0].keys())) + log.info("Mean IOU is {}".format(mean_iou)) + if no_detections: + status.append(False) + else: + if self.mean_iou_only: + status.append(flag_prob and mean_iou >= self.iou_thr) + else: + status.append(all([flag_prob, flag_iou])) + else: + status.append(False) + log.warning("No detection pairs have IOU > 0 for batch {}".format(batch_num)) + + return all(status) if not self.ignore_results else True + + def logs_prereq(self): + log.info( + "Running Object Detection comparator with following parameters:\n" + "\t\t Probability threshold: {} \n" + "\t\t Absolute difference threshold: {}\n" + "\t\t Relative difference threshold: {}\n" + "\t\t IOU threshold: {}".format(self.p_thr, self.a_eps, self.r_eps, + self.iou_thr)) + if self.mean_iou_only: + log.info("For comparison will be used mean IoU of all boxes' pairs instead IoU of every pair") + if sorted(self.infer_result.keys()) != sorted(self.reference.keys()): + log.error("Output layers for comparison doesn't match.\n Output layers in infer results: {}\n" \ + "Output layers in reference: {}".format(sorted(self.infer_result.keys()), + sorted(self.reference.keys()))) + + def compare(self): + self.logs_prereq() + log.debug("Original reference results: {}".format(self.reference)) + log.debug("Original IE results: {}".format(self.infer_result)) + infer_result_filtered, infer_num_deleted = self.prob_threshold_filter(self.p_thr, self.infer_result) + reference_filtered, reference_num_deleted = self.prob_threshold_filter(self.p_thr, self.reference) + log.info("{} predictions were deleted from IE predictions set after comparing with probability threshold!" + .format(str(infer_num_deleted))) + log.info("{} predictions were deleted from reference set after comparing with probability threshold!" + .format(str(reference_num_deleted))) + log.debug("Filtered reference results: {}".format(self.reference)) + log.debug("Filtered IE results: {}".format(self.infer_result)) + self.status = self.find_matches(infer_result_filtered, reference_filtered) + return self.status diff --git a/tests/e2e_tests/common/comparator/ocr.py b/tests/e2e_tests/common/comparator/ocr.py new file mode 100644 index 00000000000000..e3f16c308c94cc --- /dev/null +++ b/tests/e2e_tests/common/comparator/ocr.py @@ -0,0 +1,83 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Optical character recognition output comparator. + +Compares reference and IE model results for top-N paths. + +Basic result example: list of paths with probabilities +""" +import logging as log +import sys + +from .threshold_utils import get_default_thresholds +from e2e_tests.common.table_utils import make_table +from .provider import ClassProvider + + +class OCRComparator(ClassProvider): + __action_name__ = "ocr" + log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) + + def __init__(self, config, infer_result, reference): + self._config = config + default_thresholds = get_default_thresholds(config.get("precision", "FP32"), config.get("device", "CPU")) + self.a_eps = config.get("a_eps") if config.get("a_eps") else default_thresholds[0] + self.r_eps = config.get("r_eps") if config.get("r_eps") else default_thresholds[1] + self.infer_result = infer_result + self.reference = reference + self.ignore_results = config.get("ignore_results", False) + self.top_paths = config.get("top_paths") + self.beam_width = config.get("beam_width") + + def compare(self): + log.info( + "Running OCR comparator with following parameters:\n" + "\t\t Number compared top paths: {} \n" + "\t\t Absolute difference threshold: {}\n" + "\t\t Relative difference threshold: {}".format(self.top_paths, self.a_eps, self.r_eps)) + + table_header = ["Reference predicted text", "Reference probability", "IE probability", "Abs diff", "Rel diff", + "Passed"] + statuses = [] + + assert sorted(self.infer_result.keys()) == sorted(self.reference.keys()), \ + "Output layers for comparison doesn't match.\n Output layers in infer results: {}\n" \ + "Output layers in reference: {}".format(sorted(self.infer_result.keys()), sorted(self.reference.keys())) + + data = self.infer_result + for batch in range(len(data["predictions"])): + table_rows = [] + log.info("Comparing results for batch {}".format(batch + 1)) + ie_predicts = data["predictions"][batch] + ie_probs = data["probs"][batch] + ref_predicts = self.reference["predictions"][batch] + ref_probs = self.reference["probs"][batch] + for ref_predict, ref_prob in zip(ref_predicts, ref_probs): + if ref_predict in ie_predicts: + abs_diff = abs(ie_probs[ie_predicts.index(ref_predict)] - ref_prob) + rel_diff = 0 if max(ie_probs[ie_predicts.index(ref_predict)], + ref_prob) == 0 else \ + abs_diff / max(ie_probs[ie_predicts.index(ref_predict)], ref_prob) + status = (abs_diff < self.a_eps) or (rel_diff < self.r_eps) + statuses.append(status) + + table_rows.append([ + ref_predict, ref_prob, ie_probs[ie_predicts.index(ref_predict)], abs_diff, + rel_diff, status + ]) + else: + status = False + statuses.append(status) + table_rows.append([ + ref_predict, ref_prob, None, None, + None, status + ]) + + log.info("Top {} results comparison:\n{}".format( + self.top_paths, make_table(table_rows, table_header))) + if self.ignore_results: + self.status = True + else: + self.status = all(statuses) + return self.status diff --git a/tests/e2e_tests/common/comparator/provider.py b/tests/e2e_tests/common/comparator/provider.py new file mode 100644 index 00000000000000..005063ec850d09 --- /dev/null +++ b/tests/e2e_tests/common/comparator/provider.py @@ -0,0 +1,20 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import inspect +from e2e_tests.common.common.base_provider import BaseProvider + + +class ClassProvider(BaseProvider): + __step_name__ = "compare" + registry = {} + + @classmethod + def validate(cls): + methods = [ + f[0] for f in inspect.getmembers(cls, predicate=inspect.isfunction) + ] + if 'compare' not in methods: + raise AttributeError( + "Requested class {} registred as '{}' doesn't provide required method compare" + .format(cls.__name__, cls.__action_name__)) diff --git a/tests/e2e_tests/common/comparator/semantic_segmentation.py b/tests/e2e_tests/common/comparator/semantic_segmentation.py new file mode 100644 index 00000000000000..ea892ab3d9b843 --- /dev/null +++ b/tests/e2e_tests/common/comparator/semantic_segmentation.py @@ -0,0 +1,73 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import logging as log +import sys +import numpy as np +from e2e_tests.common.table_utils import make_table + +from .provider import ClassProvider +from e2e_tests.common.comparator.threshold_utils import get_default_iou_threshold + + + +class SemanticSegmentationComparator(ClassProvider): + __action_name__ = "semantic_segmentation" + log.basicConfig( + format="[ %(levelname)s ] %(message)s", + level=log.INFO, + stream=sys.stdout) + + def __init__(self, config, infer_result, reference): + ''' + Comparator takes reference and inference matrices of image size that contain a class + number for every image pixel and counts relative error. + Data should have both layer and batch dimensions. + ''' + self._config = config + self.thr = config.get("thr") if config.get("thr") else get_default_iou_threshold(config.get("precision", "FP32"), + config.get("device", "CPU")) + self.infer_result = infer_result + self.reference = reference + self.ignore_results = config.get('ignore_results', False) + self.target_layers = config.get("target_layers") if config.get("target_layers") else self.infer_result.keys() + + + def compare(self): + compared = False + log.info("Running Semantic Segmentation comparator with threshold: {}\n".format(self.thr)) + table_header = ["Layer name", "Class Number", "Class intersect part", "Class union part", "Class iou"] + statuses = [] + for layer in self.reference.keys(): + if self.target_layers and (layer in self.target_layers): + compared = True + for batch_num in range(len(self.reference[layer])): + table_rows = [] + ref_batch = self.reference[layer][batch_num] + pred_batch = self.infer_result[layer][batch_num] + intersect_sum = union_sum = 0 + for pixel_class in np.unique(ref_batch): + intersect = np.sum(np.logical_and(ref_batch == pixel_class, pred_batch == pixel_class)) + union = np.sum(np.logical_or(ref_batch == pixel_class, pred_batch == pixel_class)) + intersect_sum += intersect + union_sum += union + iou = intersect / union + class_part_intersect = intersect / (pred_batch.shape[0] * pred_batch.shape[1]) + class_part_union = union / (pred_batch.shape[0] * pred_batch.shape[1]) + table_rows.append([layer, pixel_class, class_part_intersect, class_part_union, iou]) + log.info("Semantic Segmentation comparison statistic:\n{}".format( + make_table(table_rows, table_header))) + + mean_iou = intersect_sum / union_sum + statuses.append(mean_iou > self.thr) + log.info("IoU between segmentations with the same class form reference and inference: {}".format(mean_iou)) + log.info("Batch {0} status: {1}".format(str(batch_num), str(statuses[-1]))) + + if compared == False: + log.info("Comparator {} has nothing to compare".format(str(self.__action_name__))) + if self.ignore_results: + self.status = True + else: + self.status = all(statuses) + return self.status + diff --git a/tests/e2e_tests/common/comparator/ssim.py b/tests/e2e_tests/common/comparator/ssim.py new file mode 100644 index 00000000000000..ffabd0058d8878 --- /dev/null +++ b/tests/e2e_tests/common/comparator/ssim.py @@ -0,0 +1,84 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import logging as log +import sys +import numpy as np +from skimage.metrics import structural_similarity as ssim + +from .provider import ClassProvider +from e2e_tests.common.comparator.threshold_utils import get_default_ssim_threshold + + +class SSIMComparator(ClassProvider): + __action_name__ = "ssim" + log.basicConfig( + format="[ %(levelname)s ] %(message)s", + level=log.INFO, + stream=sys.stdout) + + def __init__(self, config, infer_result, reference): + self._config = config + self.ssim_thr = config.get("ssim_thr") if config.get("ssim_thr") else get_default_ssim_threshold( + config.get("precision", "FP32"), config.get("device", "CPU")) + self.infer_result = infer_result + self.reference = reference + self.ignore_results = config.get("ignore_results", False) + self.target_layers = config.get("target_layers") if config.get("target_layers") else self.infer_result.keys() + + def compare(self): + log.info(f"Running SSIM comparator with following threshold " + f"(the higher SSIM (0-1), the better the result): {self.ssim_thr}\n") + if sorted(self.infer_result.keys()) != sorted(self.reference.keys()): + log.warning(f"Output layers for comparison doesn't match.\n " + f"Output layers in infer results: {sorted(self.infer_result.keys())}\n " + f"Output layers in reference: {sorted(self.reference.keys())}") + layers = set(self.infer_result.keys()).intersection(self.target_layers) + assert layers, f"No layers for comparison specified for comparator '{self.__action_name__}'" + + statuses = [] + for layer in layers: + for batch_num in range(len(self.infer_result[layer])): + log.info(f"Comparing results for layer '{layer}' and batch {batch_num}") + data = self.infer_result[layer][batch_num] + ref = self.reference[layer][batch_num] + + # In case when there are inf/nan in data + if np.isnan(data).any() or np.isinf(data).any() or np.isnan(ref).any() or np.isinf(ref).any(): + log.info(f"Data or reference for layer {layer} contains np.nan or np.inf values. " + f"Lets compare their positions before filtering") + if (np.isnan(data) == np.isnan(ref)).all(): + log.info(f"Data and reference for layer {layer} contains np.nan values at the same positions. " + f"Filtering them") + data, ref = np.nan_to_num(data), np.nan_to_num(ref) + else: + log.info(f"Data and reference for layer {layer} contains np.nan values but not at " + f"the same positions. Proceed further") + + if (np.isinf(data) == np.isinf(ref)).all(): + log.info(f"Data and reference for layer {layer} contains np.inf values at the same positions. " + f"Filtering them") + data, ref = np.nan_to_num(data), np.nan_to_num(ref) + else: + log.info(f"Data and reference for layer {layer} contains np.inf values but not at " + f"the same positions. Proceed further") + + assert data.shape == ref.shape, \ + f"Shape of IE output isn't equal with shape of FW output for layer '{layer}'" + args = {"im1": data, "im2": ref, "data_range": 255, "multichannel": True} + win_size = min(data.shape) + if win_size > 1: + args.update({'win_size': win_size}) + elif win_size == 1: + args.update({'win_size': win_size, 'use_sample_covariance': False}) + else: + raise ValueError("win_size parameter must not be < 1") + ssim_value = ssim(**args) + statuses.append(ssim_value > self.ssim_thr) + log.info(f"SSIM value is: {ssim_value}") + + if self.ignore_results: + self.status = True + else: + self.status = all(statuses) + return self.status diff --git a/tests/e2e_tests/common/comparator/ssim_4d.py b/tests/e2e_tests/common/comparator/ssim_4d.py new file mode 100644 index 00000000000000..3b90b6fcaf5246 --- /dev/null +++ b/tests/e2e_tests/common/comparator/ssim_4d.py @@ -0,0 +1,72 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import logging as log +import sys +from statistics import mean + +from skimage.metrics import structural_similarity as ssim + +from .provider import ClassProvider +from e2e_tests.common.comparator.threshold_utils import get_default_ssim_threshold + + +class SSIM_4D_Comparator(ClassProvider): + + __action_name__ = "ssim_4d" + + log.basicConfig( + format="[ %(levelname)s ] %(message)s", + level=log.INFO, + stream=sys.stdout) + + def __init__(self, config, infer_result, reference): + self._config = config + self.ssim_thr = config.get("ssim_4d_thr") if config.get( + "ssim_4d_thr") else get_default_ssim_threshold( + config.get("precision", "FP32"), config.get("device", "CPU")) + self.infer_result = infer_result + self.reference = reference + self.ignore_results = config.get("ignore_results", False) + self.target_layers = config.get("target_layers") if config.get( + "target_layers") else self.infer_result.keys() + self.win_size = config.get("win_size") + + def compare(self): + log.info( + "Running 4D SSIM comparator with following threshold" + "(the higher mean SSIM (0-1), the better the result): {}\n".format(self.ssim_thr)) + if sorted(self.infer_result.keys()) != sorted(self.reference.keys()): + log.warning( + "Output layers for comparison doesn't match.\n Output layers in infer results: {}\n" + "Output layers in reference: {}".format(sorted(self.infer_result.keys()), + sorted(self.reference.keys()))) + layers = set(self.infer_result.keys()).intersection(self.target_layers) + assert layers, "No layers for comparison specified for comparator '{}'".format( + str(self.__action_name__)) + + statuses = [] + for layer in layers: + for batch_num in range(len(self.infer_result[layer])): + log.info("Comparing results for layer '{}' and batch {}".format(layer, batch_num)) + data = self.infer_result[layer][batch_num] + ref = self.reference[layer][batch_num] + assert data.shape == ref.shape, "Shape of IE output isn't equal with shape of" \ + "FW output for layer '{}'".format(layer) + dim_count = len(data.shape) + assert dim_count == 4, "The number of dimensions in the output ({})" \ + " isn't equal 4.".format(dim_count) + ssim_values = [] + for image_num in range(data.shape[0]): + data_range = ref[image_num].max() - ref[image_num].min() + image_ssim = ssim(data[image_num], ref[image_num], data_range=data_range, multichannel=True, win_size=self.win_size) + ssim_values.append(image_ssim) + mean_ssim = mean(ssim_values) + statuses.append(mean_ssim > self.ssim_thr) + log.info("Mean SSIM value is: {}".format(mean_ssim)) + + if self.ignore_results: + self.status = True + else: + self.status = all(statuses) + return self.status diff --git a/tests/e2e_tests/common/comparator/threshold_utils.py b/tests/e2e_tests/common/comparator/threshold_utils.py new file mode 100644 index 00000000000000..05d368d5b8d875 --- /dev/null +++ b/tests/e2e_tests/common/comparator/threshold_utils.py @@ -0,0 +1,86 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import sys +import logging as log + +# default thresholds for comparators +DEFAULT_THRESHOLDS = { + "FP32": (1e-4, 1e-4, None), + "BF16": (2, 2, None), + "FP16": (0.01, 2, None) +} + +DEFAULT_IOU_THRESHOLDS = { + "FP32": 0.9, + "BF16": 0.8, + "FP16": 0.8 +} + +DEFAULT_SSIM_THRESHOLDS = { + "FP32": 0.99, + "BF16": 0.9, + "FP16": 0.9 +} + +# fallback thresholds if precision not found +FALLBACK_EPS = (1e-4, 1e-4, None) + + +def get_default_thresholds(precision, device): + """Get default comparison thresholds (a_eps, r_eps) for specific precision. + + :param precision: network's precision (e.g. FP16) + :return: pair of thresholds (absolute eps, relative eps) + """ + # setup logger + log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) + if precision not in DEFAULT_THRESHOLDS: + log.warning("Specified precision {precision} for comparison thresholds " + "not found. Using {fallback} instead.".format(precision=precision, + fallback=FALLBACK_EPS)) + + #for FPGA FP16 thresholds are used always + if "FPGA" in device or "HDDL" in device: + return DEFAULT_THRESHOLDS.get("FP16", FALLBACK_EPS) + + return DEFAULT_THRESHOLDS.get(precision, FALLBACK_EPS) + + +def get_default_iou_threshold(precision, device): + """Get default comparison thresholds (a_eps, r_eps) for specific precision. + + :param precision: network's precision (e.g. FP16) + :return: pair of thresholds (absolute eps, relative eps) + """ + # setup logger + log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) + if precision not in DEFAULT_IOU_THRESHOLDS: + log.warning("Specified precision {precision} for comparison thresholds " + "not found. Using {fallback} instead.".format(precision=precision, + fallback=0.9)) + + # for FPGA FP16 thresholds are used always + if "FPGA" in device or "HDDL" in device: + return DEFAULT_IOU_THRESHOLDS.get("FP16", FALLBACK_EPS) + + return DEFAULT_IOU_THRESHOLDS.get(precision, 0.9) + + +def get_default_ssim_threshold(precision, device): + """Get default comparison thresholds (a_eps, r_eps) for specific precision. + + :param precision: network's precision (e.g. FP16) + :return: pair of thresholds (absolute eps, relative eps) + """ + # setup logger + log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) + if precision not in DEFAULT_SSIM_THRESHOLDS: + log.warning("Specified precision {precision} for comparison thresholds " + "not found. Using {fallback} instead.".format(precision=precision, + fallback=0.9)) + # for FPGA FP16 thresholds are used always + if "FPGA" in device or "HDDL" in device: + return DEFAULT_SSIM_THRESHOLDS.get("FP16", FALLBACK_EPS) + + return DEFAULT_SSIM_THRESHOLDS.get(precision, 0.9) diff --git a/tests/e2e_tests/common/config.py b/tests/e2e_tests/common/config.py new file mode 100644 index 00000000000000..bd55e724d82d44 --- /dev/null +++ b/tests/e2e_tests/common/config.py @@ -0,0 +1,117 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + + +""" Fields for logger """ +import os +import re + +from .core import get_bool, get_list, get_path + + +class StrippingLists: + DEFAULT_SENSITIVE_KEYS_TO_BE_MASKED = [ + r"(?!zabbix_operator_initial_).*pass(word)?", r".*client_id", r".*(access)?(_)?(? api-on-commit tests are not run, + True -> api-on-commit tests are run, default: False""" +run_on_commit_tests = get_bool("TT_ON_COMMIT_TESTS", True) + +"""TT_RUN_REGRESSION_TESTS - False -> api-regression tests are not run, + True -> api-regression tests are run, default: False""" +run_regression_tests = get_bool("TT_RUN_REGRESSION_TESTS", True) + +"""TT_RUN_ENABLING_TESTS - False -> api-enabling tests are not run, + True -> api-enabling tests are run, default: False""" +run_enabling_tests = get_bool("TT_ENABLING_TESTS", True) diff --git a/tests/e2e_tests/common/core.py b/tests/e2e_tests/common/core.py new file mode 100644 index 00000000000000..b64e5858a86d1a --- /dev/null +++ b/tests/e2e_tests/common/core.py @@ -0,0 +1,45 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + + +import os + + +def get_list(key_name, delimiter=',', fallback=None): + value = os.environ.get(key_name, fallback) + if value != fallback: + value = value.split(delimiter) + elif not value: + value = [] + return value + + +def get_bool(key_name, fallback=None): + value = os.environ.get(key_name, fallback) + if value != fallback: + value = value.lower() + if value == "true": + value = True + elif value == "false": + value = False + else: + raise ValueError("Value of {} env variable is '{}'. Should be 'True' or 'False'.".format(key_name, value)) + return value + + +def get_int(key_name, fallback=None): + value = os.environ.get(key_name, fallback) + if value != fallback: + try: + value = int(value) + except ValueError: + raise ValueError("Value '{}' of {} env variable cannot be cast to int.".format(value, key_name)) + return value + + +def get_path(key_name, fallback=None): + value = os.environ.get(key_name, fallback) + if value: + value = os.path.expanduser(value) + value = os.path.realpath(value) + return value diff --git a/tests/e2e_tests/common/decorators.py b/tests/e2e_tests/common/decorators.py new file mode 100644 index 00000000000000..7c8d2bf8aedb2a --- /dev/null +++ b/tests/e2e_tests/common/decorators.py @@ -0,0 +1,24 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from collections import OrderedDict + + +def wrap_ord_dict(func): + """Wrap values in OrderedDict.""" + + def wrapped(*args, **kwargs): + items = func(*args, **kwargs) + if isinstance(items, tuple): + return OrderedDict([items]) + elif isinstance(items, list): + return OrderedDict(items) + elif isinstance(items, dict) or isinstance(items, OrderedDict): + return items + else: + raise TypeError( + "Decorated function '{}' returned '{}' but 'tuple', 'list', 'dict' or 'OrderedDict' expected" + .format(func.__name__, type(items))) + + wrapped.unwrap = func + return wrapped diff --git a/tests/e2e_tests/common/env_utils.py b/tests/e2e_tests/common/env_utils.py new file mode 100644 index 00000000000000..4120b8918479b1 --- /dev/null +++ b/tests/e2e_tests/common/env_utils.py @@ -0,0 +1,31 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Utility module with config environment utilities.""" + +import os + + +def fix_path(path, root_path=None): + """ + Fix path: expand environment variables if any, make absolute path from + root_path/path if path is relative, resolve symbolic links encountered. + """ + path = os.path.expandvars(path) + if not os.path.isabs(path) and root_path is not None: + path = os.path.join(root_path, path) + return os.path.realpath(os.path.abspath(path)) + + +def fix_env_conf(env, root_path=None): + """Fix paths in environment config.""" + for name, value in env.items(): + if isinstance(value, dict): + # if value is dict, think of it as of a (sub)environment + # within current environment + # since it can also contain envvars/relative paths, + # recursively update (sub)environment as well + env[name] = fix_env_conf(value, root_path=root_path) + else: + env[name] = fix_path(value, root_path=root_path) + return env diff --git a/tests/e2e_tests/common/environment_info.py b/tests/e2e_tests/common/environment_info.py new file mode 100644 index 00000000000000..233c8d5cab99a0 --- /dev/null +++ b/tests/e2e_tests/common/environment_info.py @@ -0,0 +1,102 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + + +import importlib + +import distro + +from . import config +from .logger import get_logger + +logger = get_logger(__name__) + +DEFAULT_BUILD_NUMBER = 0 +DEFAULT_SHORT_VERSION_NUMBER = "0.0.0" +DEFAULT_FULL_VERSION_NUMBER = "{}-{}-{}".format(DEFAULT_SHORT_VERSION_NUMBER, config.product_version_suffix, + DEFAULT_BUILD_NUMBER) + + +class BaseInfo: + """Retrieves environment info""" + glob_version = None + glob_os_distname = None + + @property + def version(self): + """Retrieves version, but only once. + + If retrieval doesn't work, default version is returned. + """ + if self.glob_version is None: + self.glob_version = self.get() + self.glob_version = \ + self.glob_version["version"] + + return self.glob_version + + @property + def os_distname(self): + """Retrieves os distname, but only once.""" + if self.glob_os_distname is None: + self.glob_os_distname = distro.linux_distribution()[0] + + return self.glob_os_distname + + @classmethod + def get(cls): + """ + Returns constant environment info. + """ + logger.info("BASIC INFO WITHOUT ANY API CALL") + return {"version": DEFAULT_FULL_VERSION_NUMBER} + + +class EnvironmentInfo(object): + """Stores details about environment such as build number, version number + and allows their retrieval""" + module_class_string = config.info_module + module_name, class_name = module_class_string.rsplit(".", 1) + module = importlib.import_module(module_name) + class_info = getattr(module, class_name) + env_info = class_info() + + @classmethod + def get_build_number(cls): + """Retrieves build number from the environment info""" + if config.product_build_number: + return config.product_build_number + return DEFAULT_BUILD_NUMBER + + @classmethod + def get_version_number(cls): + """Retrieves version number from the environment info""" + if config.product_version: + return config.product_version + return DEFAULT_FULL_VERSION_NUMBER + + @classmethod + def get_environment_name(cls): + """Retrieves the environment name that will be reported for a test run""" + return config.environment_name + + @classmethod + def get_os_distname(cls): + """Retrieves the operating system distribution name""" + return cls.env_info.os_distname + + @classmethod + def _retrieve_version_number_from_environment(cls): + return cls._version_number_from_environment_version(cls.env_info.version) + + @classmethod + def _retrieve_build_number_from_environment(cls): + return cls._build_number_from_environment_version(cls.env_info.version) + + @classmethod + def _build_number_from_environment_version(cls, environment_version): + return environment_version.split("-")[-1] + + @classmethod + def _version_number_from_environment_version(cls, environment_version): + return '-'.join(environment_version.split('-')[:2]) diff --git a/tests/e2e_tests/common/hook_utils.py b/tests/e2e_tests/common/hook_utils.py new file mode 100644 index 00000000000000..1049a87a3d343c --- /dev/null +++ b/tests/e2e_tests/common/hook_utils.py @@ -0,0 +1,117 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + + +from _pytest.python import Function + +from . import config +from .config import bug_ids, components_ids, req_ids +from .logger import get_logger +from .marks import MarkGeneral, MarkRunType + +logger = get_logger(__name__) +_current_test_run = "" +test_run_reporters = {} + + +def get_required_marker_ids_for_test_run(): + required_marker_ids = [] + if bug_ids is not None: + required_marker_ids.append(bug_ids) + if req_ids is not None: + required_marker_ids.append(req_ids) + if components_ids is not None: + required_marker_ids.append(components_ids) + if len(required_marker_ids) == 0: + return None + return required_marker_ids + + +def update_components(item): + components = item.get_closest_marker(MarkGeneral.COMPONENTS.mark) + if components is not None: + current_components = next( + (component for component in item.own_markers if component.name == MarkGeneral.COMPONENTS.mark), None) + if current_components is None: + item.own_markers.append(components) + + +def update_markers(item, test_type, markers, marker_type): + marker = item.get_closest_marker(marker_type) + if marker is not None: + if test_type not in markers: + markers[test_type] = set() + markers[test_type].update(set(marker.args)) + + +def deselect_items(items, config, deselected): + config.hook.pytest_deselected(items=deselected) + for item in deselected: + test_name = item.parent.nodeid + # nodeid comes in a way: + # 1) test.py::TestClass::() + # 2) test.py:: + if test_name[-2:] == "()": + test_name = test_name[:-2] + else: + test_name += "::" + + test_name += item.name + logger.info("Deselecting test: " + test_name) + items.remove(item) + + +def deselect(item, test_type, required_marker_ids): + if isinstance(item, Function): + if test_type is None: + logger.warning(f"Test type for item={item} is None") + return True + if required_marker_ids is not None: + for marker_id in required_marker_ids: + if _is_test_marker_id_is_matched_with_id(item, marker_id): + return False + return True + else: + if _test_deselected(item): + return True + return False + + +def _test_deselected(item): + result = any([ + MarkRunType.get_test_type_mark(item) == MarkRunType.TEST_MARK_ON_COMMIT and not config.run_on_commit_tests, + MarkRunType.get_test_type_mark(item) == MarkRunType.TEST_MARK_REGRESSION and not config.run_regression_tests, + MarkRunType.get_test_type_mark(item) == MarkRunType.TEST_MARK_ENABLING and not config.run_enabling_tests, + ]) + return result + + +def _is_test_marker_id_is_matched_with_id(test, id_to_check: str): + for marker in test.own_markers: + if marker.name is MarkGeneral.BUGS.value or marker.name is MarkGeneral.REQIDS.value or \ + marker.name is MarkGeneral.COMPONENTS.value: + marker_arg = marker.args[0] + if isinstance(marker_arg, dict): + for param in marker_arg: + if param is None: + if id_to_check in str(marker_arg.values): + return True + else: + if param in test.name: + if id_to_check in str(marker_arg.values()): + return True + elif isinstance(marker_arg, str): + if id_to_check in marker_arg: + return True + else: + raise RuntimeError(f"Test {test.name} do not have mark in correct form. Form: {type(marker_arg)} ") + return False + + +def _get_current_test_run(): + return _current_test_run + + +def _set_current_test_run(test_run): + _current_test_run = test_run + return _current_test_run diff --git a/tests/e2e_tests/common/infer/__init__.py b/tests/e2e_tests/common/infer/__init__.py new file mode 100644 index 00000000000000..7ab0a0e2f0760c --- /dev/null +++ b/tests/e2e_tests/common/infer/__init__.py @@ -0,0 +1,10 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from .dummy_infer_class import use_dummy +from .provider import StepProvider + +try: + from .common_inference import Infer +except ImportError as e: + Infer = use_dummy('ie_sync', str(e)) diff --git a/tests/e2e_tests/common/infer/common_inference.py b/tests/e2e_tests/common/infer/common_inference.py new file mode 100644 index 00000000000000..af0e806afe8be8 --- /dev/null +++ b/tests/e2e_tests/common/infer/common_inference.py @@ -0,0 +1,246 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Inference engine runners.""" +# pylint:disable=import-error +import logging as log +import os +import platform +import sys +from pprint import pformat + +import numpy as np +from e2e_tests.utils.test_utils import align_input_names, get_shapes_with_frame_size +from e2e_tests.utils.test_utils import get_infer_result + +try: + import resource + + mem_info_available = True +except ImportError: + mem_info_available = False + +from openvino.runtime import Core +from openvino.inference_engine import get_version as ie_get_version +from e2e_tests.common.multiprocessing_utils import multiprocessing_run + +log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) + +from e2e_tests.common.infer.provider import ClassProvider +from e2e_tests.common.infer.network_modifiers import Container + + +def resolve_library_name(libname): + """Return platform-specific library name given basic libname.""" + if not libname: + return libname + if os.name == 'nt': + return libname + '.dll' + if platform.system() == 'Darwin': + return 'lib' + libname + '.dylib' + return 'lib' + libname + '.so' + + +def parse_device_name(device_name): + device_name_ = device_name + if "HETERO:" in device_name: + device_name_ = "HETERO" + elif "MULTI:" in device_name: + device_name_ = "MULTI" + elif ("AUTO:" in device_name) or ("AUTO" == device_name): + device_name_ = "AUTO" + elif "BATCH:" in device_name: + device_name_ = "BATCH" + else: + device_name_ = device_name + + return device_name_ + + +class Infer(ClassProvider): + """Basic inference engine runner.""" + __action_name__ = "ie_sync" + + def __init__(self, config): + self.device = parse_device_name(config["device"]) + self.timeout = config.get("timeout", 300) + self.res = None + self.network_modifiers = Container(config=config.get("network_modifiers", {})) + self.plugin_cfg = config.get("plugin_config", {}) + self.plugin_cfg_target_device = config.get("plugin_cfg_target_device", self.device) + self.consecutive_infer = config.get("consecutive_infer", False) + self.index_infer = config.get('index_infer') + self.xml = None + self.bin = None + self.model_path = None + + def _get_thermal_metric(self, exec_net, ie): + if "MYRIAD" in self.device: + supported_metrics = exec_net.get_property("SUPPORTED_METRICS") + if "DEVICE_THERMAL" in supported_metrics: + return round(exec_net.get_property("DEVICE_THERMAL"), 3) + else: + log.warning("Expected metric 'DEVICE_THERMAL' doesn't present in " + "supported metrics list {} for MYRIAD plugin".format(supported_metrics)) + return None + elif "HDDL" in self.device: + # TODO: Uncomment when HDDL plugin will support 'SUPPORTED_METRICS' metric and remove try/except block + # supported_metrics = ie.get_metric("HDDL", "SUPPORTED_METRICS") + # if "DEVICE_THERMAL" in supported_metrics: + # return ie.get_metric("HDDL", "VPU_HDDL_DEVICE_THERMAL") + # else: + # log.warning("Expected metric 'DEVICE_THERMAL' doesn't present in " + # "supported metrics list {} for HDDL plugin".format(supported_metrics)) + # return None + try: + return [round(t, 3) for t in ie.get_property("HDDL", "VPU_HDDL_DEVICE_THERMAL")] + except RuntimeError: + log.warning("Failed to query metric 'VPU_HDDL_DEVICE_THERMAL' for HDDL plugin") + return None + + else: + return None + + def _configure_plugin(self, ie): + if self.plugin_cfg: + supported_props = ie.get_property(self.plugin_cfg_target_device, 'SUPPORTED_PROPERTIES') + if 'INFERENCE_PRECISION_HINT' not in supported_props: + log.warning( + f'inference precision hint is not supported for device {self.plugin_cfg_target_device},' + f' option will be ignored') + return + log.info("Setting config to the {} plugin. \nConfig:\n{}".format(self.plugin_cfg_target_device, + pformat(self.plugin_cfg))) + ie.set_property(self.plugin_cfg_target_device, self.plugin_cfg) + + def _infer(self, input_data): + log.info("Inference Engine version: {}".format(ie_get_version())) + log.info("Using API v2.0") + result, load_net_to_plug_time = None, None + if mem_info_available: + mem_usage_in_kbytes_before_run = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss + + log.info("Creating Core Engine...") + ie = Core() + self._configure_plugin(ie) + + log.info("Loading network files") + + if self.model_path: + self.ov_model = ie.read_model(model=self.model_path) + if self.xml: + self.ov_model = ie.read_model(model=self.xml) + self.network_modifiers.execute(network=self.ov_model, input_data=input_data) + + log.info("Loading network to the {} device...".format(self.device)) + compiled_model = ie.compile_model(self.ov_model, self.device) + + for input_tensor in self.ov_model.inputs: + # all input and output tensors have to be named + assert input_tensor.names, "Input tensor {} has no names".format(input_tensor) + + result = [] + if self.consecutive_infer: + for infer_run_counter in range(2): + helper = get_infer_result(input_data[infer_run_counter], compiled_model, self.ov_model, + infer_run_counter, self.index_infer) + result.append(helper) + else: + infer_result = get_infer_result(input_data, compiled_model, self.ov_model, index_infer=self.index_infer) + result.append(infer_result) + + if not self.consecutive_infer: + result = result[0] + + if mem_info_available: + mem_usage_in_kbytes_after_run = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss + mem_usage_ie = round((mem_usage_in_kbytes_after_run - mem_usage_in_kbytes_before_run) / 1024) + else: + mem_usage_ie = -1 + + if "exec_net" in locals(): + del compiled_model + if "ie" in locals(): + del ie + + return result, load_net_to_plug_time, mem_usage_ie + + def infer(self, input_data): + self.res, self.load_net_to_plug_time, self.mem_usage_ie = \ + multiprocessing_run(self._infer, [input_data], "Inference Engine Python API", self.timeout) + + return self.res + + +class SequenceInference(Infer): + """Sequence inference engine runner.""" + __action_name__ = "ie_sequence" + + def __init__(self, config): + super().__init__(config=config) + self.default_shapes = config.get('default_shapes') + + def _infer(self, input_data): + log.info("Inference Engine version: {}".format(ie_get_version())) + log.info("Using API v2.0") + result, load_net_to_plug_time = None, None + if mem_info_available: + mem_usage_in_kbytes_before_run = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss + + log.info("Creating Core Engine...") + ie = Core() + self._configure_plugin(ie) + + log.info("Loading network files") + if self.model_path: + ov_model = ie.read_model(model=self.model_path) + else: + ov_model = ie.read_model(model=self.xml) + self.network_modifiers.execute(network=ov_model, input_data=input_data) + + log.info("Loading network to the {} device...".format(self.device)) + compiled_model = ie.compile_model(ov_model, self.device) + + for input_tensor in ov_model.inputs: + # all input and output tensors have to be named + assert input_tensor.names, "Input tensor {} has no names".format(input_tensor) + + result = [] + input_data = align_input_names(input_data, ov_model) + # make input_data (dict) a list of frame feed dicts + input_data = get_shapes_with_frame_size(self.default_shapes, ov_model, input_data) + + new_input = [] + num_frames = max([input_data[key].shape[0] for key in input_data]) + input_data = {key: value if value.shape[0] == num_frames else np.tile(value, num_frames).reshape(num_frames, *( + list(value.shape)[1:])) for key, value in input_data.items()} + log.info("Total number of input frames: {}".format(num_frames)) + + for current_frame_index in range(0, num_frames): + cur_frame_data = {key: value[current_frame_index] for key, value in input_data.items()} + infer_result = get_infer_result(cur_frame_data, compiled_model, ov_model, current_frame_index) + result.append(infer_result) + + # make result (list of infer result for each frame) a dict (each layer contains infer result for all frames) + result = {key: [value[key] for value in result] for key in result[0]} + result = {key: np.stack(values, axis=0).reshape(num_frames, *(list(values[0].shape[1:]))) for key, values in + result.items()} + + if mem_info_available: + mem_usage_in_kbytes_after_run = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss + mem_usage_ie = round((mem_usage_in_kbytes_after_run - mem_usage_in_kbytes_before_run) / 1024) + else: + mem_usage_ie = -1 + + if "exec_net" in locals(): + del compiled_model + if "ie" in locals(): + del ie + + return result, load_net_to_plug_time, mem_usage_ie + + def infer(self, model): + self.res, self.load_net_to_plug_time, self.mem_usage_ie = \ + multiprocessing_run(self._infer, [model], "Inference Engine Python API", self.timeout) + + return self.res diff --git a/tests/e2e_tests/common/infer/dummy_infer_class.py b/tests/e2e_tests/common/infer/dummy_infer_class.py new file mode 100644 index 00000000000000..036c591ed077e9 --- /dev/null +++ b/tests/e2e_tests/common/infer/dummy_infer_class.py @@ -0,0 +1,21 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +""" +Dummy infer provider to be used when real provider is unavailable due to absence of IE Python API +e.g. for IR collection environment +""" +from .provider import ClassProvider + + +def use_dummy(name, message): + class DummyInfer(ClassProvider): + __action_name__ = name + + def __init__(self, *args, **kwargs): + pass + + def infer(self, *args, **kwargs): + raise RuntimeError(message) + + return DummyInfer diff --git a/tests/e2e_tests/common/infer/network_modifiers/__init__.py b/tests/e2e_tests/common/infer/network_modifiers/__init__.py new file mode 100644 index 00000000000000..5ae99cd8b24395 --- /dev/null +++ b/tests/e2e_tests/common/infer/network_modifiers/__init__.py @@ -0,0 +1,5 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from . import network_modifiers +from .container import Container diff --git a/tests/e2e_tests/common/infer/network_modifiers/container.py b/tests/e2e_tests/common/infer/network_modifiers/container.py new file mode 100644 index 00000000000000..cb834485b25131 --- /dev/null +++ b/tests/e2e_tests/common/infer/network_modifiers/container.py @@ -0,0 +1,30 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import inspect +from e2e_tests.common.common.base_provider import BaseProvider + + +class ClassProvider(BaseProvider): + registry = {} + + @classmethod + def validate(cls): + methods = [ + f[0] for f in inspect.getmembers(cls, predicate=inspect.isfunction) + ] + if 'apply' not in methods: + raise AttributeError( + "Requested class {} registred as '{}' doesn't provide required method 'apply'" + .format(cls.__name__, cls.__action_name__)) + + +class Container: + def __init__(self, config): + self.executors = [] + for name, params in config.items(): + self.executors.append(ClassProvider.provide(name, params)) + + def execute(self, network, **kwargs): + for executor in self.executors: + executor.apply(network, **kwargs) diff --git a/tests/e2e_tests/common/infer/network_modifiers/network_modifiers.py b/tests/e2e_tests/common/infer/network_modifiers/network_modifiers.py new file mode 100644 index 00000000000000..28a24a59dce3c7 --- /dev/null +++ b/tests/e2e_tests/common/infer/network_modifiers/network_modifiers.py @@ -0,0 +1,140 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""IE network modifiers applied to IE network.""" + +import logging as log +import sys + +from e2e_tests.utils.test_utils import align_input_names +from e2e_tests.common.test_utils import get_shapes_from_data, convert_shapes_to_partial_shape +from .container import ClassProvider + +log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) + + +class ReshapeInputShape(ClassProvider): + """Reshape IE network modifier. + + Reshapes IE network on the same shapes of + the corresponding input data. + """ + + __action_name__ = "reshape_input_shape" + + def __init__(self, config): + self.path = config["input_path"] + + def apply(self, network, input_data): + shapes = get_shapes_from_data(input_data, api_version='2') + log.info("OV Model will be reshaped on {}".format(shapes)) + network.reshape(shapes) + return network + + +class ReshapeCurrentShape(ClassProvider): + """Reshape IE network modifier. + + Reshapes IE network on the same shapes of + the corresponding input layers + """ + + __action_name__ = "reshape_current_shape" + + def __init__(self, config): + pass + + def apply(self, network, **kwargs): + shapes = {} + for input in network.input_info: + shapes[input] = network.input_info[input].input_data.shape + log.info("IE Network will be reshaped on {}".format(shapes)) + network.reshape(shapes) + return network + + +class Reshape(ClassProvider): + """Reshape IE network modifier. + + Reshapes IE network on shapes specified in config + + Config should have 'shapes' field with dictionary + where keys are input layers' names and values are + corresponding input shapes. + + Example: + shapes = {"Placeholder": (1, 224, 224, 3)} + """ + + __action_name__ = "reshape" + + def __init__(self, config): + self.shapes = config["shapes"] + + def apply(self, network, **kwargs): + log.info("OV Model will be reshaped on {}".format(self.shapes)) + self.shapes = convert_shapes_to_partial_shape(self.shapes) + network.reshape(align_input_names(self.shapes, network)) + return network + + +class SetBatchReshape(ClassProvider): + """Batch IE network modifier. + + Sets batch of IE network to BATCH value specified in config + + Config should have 'batch' field with 'int' value. + """ + + __action_name__ = "set_batch_using_reshape" + + def __init__(self, config): + self.batch = config["batch"] + self.target_layers = config.get('target_layers', None) + self.batch_dim = config.get('batch_dim', 0) + + def apply(self, network, **kwargs): + log.info("OV Model's batch will be set to {}".format(self.batch)) + input_shapes = {} + for network_input in network.inputs: + input_name = network_input.get_any_name() + if self.target_layers and input_name not in self.target_layers: + common_names = network_input.names.intersection(set(self.target_layers)) + if common_names: + input_name = common_names.pop() + input_shapes[input_name] = network_input.get_partial_shape() + + apply_to = self.target_layers if self.target_layers is not None else input_shapes.keys() + + reshaped = False + for layer in apply_to: + if input_shapes[layer][self.batch_dim] == self.batch: + log.info("For layer '{}' target shape {} " + "equals to initial shape, no reshape done".format(layer, input_shapes[layer])) + continue + input_shapes[layer][self.batch_dim] = self.batch + reshaped = True + if reshaped: + network.reshape(input_shapes) + return network + + +class AddOutputs(ClassProvider): + """Network outputs modifier. + + Adds additional outputs to the network allowing to get intermediate tensors. + + Config should have 'outputs' field with tuples ("node_name", output_port) or a single element "node_name". In the + latter case the output port is implicitly set to 0. + """ + + __action_name__ = "add_outputs" + + def __init__(self, config): + self.outputs = config["outputs"] + assert self.outputs is not None, 'The "outputs" must be specified for the Network output modifier' + + def apply(self, network, **kwargs): + log.info("IE Network outputs will be expanded with the following ones: {}".format(self.outputs)) + network.add_outputs(self.outputs) + return network diff --git a/tests/e2e_tests/common/infer/provider.py b/tests/e2e_tests/common/infer/provider.py new file mode 100644 index 00000000000000..a571c2d2f0688d --- /dev/null +++ b/tests/e2e_tests/common/infer/provider.py @@ -0,0 +1,35 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import inspect +from e2e_tests.test_utils.test_utils import log_timestamp +from e2e_tests.common.common.base_provider import BaseProvider, BaseStepProvider + + +class ClassProvider(BaseProvider): + registry = {} + + @classmethod + def validate(cls): + methods = [ + f[0] for f in inspect.getmembers(cls, predicate=inspect.isfunction) + ] + if 'infer' not in methods: + raise AttributeError( + "Requested class {} registred as '{}' doesn't provide required method infer" + .format(cls.__name__, cls.__action_name__)) + + +class StepProvider(BaseStepProvider): + __step_name__ = "infer" + + def __init__(self, config): + action_name = next(iter(config)) + self.executor = ClassProvider.provide(action_name, config=config[action_name]) + + def execute(self, passthrough_data=None): + feed_dict = passthrough_data.strict_get('feed_dict', self) + self.executor.xml, self.executor.bin = passthrough_data.get('xml'), passthrough_data.get('bin') + with log_timestamp('Inference'): + passthrough_data['output'] = self.executor.infer(feed_dict) + return passthrough_data diff --git a/tests/e2e_tests/common/ir_provider/__init__.py b/tests/e2e_tests/common/ir_provider/__init__.py new file mode 100644 index 00000000000000..268933264c7ee9 --- /dev/null +++ b/tests/e2e_tests/common/ir_provider/__init__.py @@ -0,0 +1,4 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from . import model_optimizer_runner, pregenerated \ No newline at end of file diff --git a/tests/e2e_tests/common/ir_provider/model_optimizer_runner.py b/tests/e2e_tests/common/ir_provider/model_optimizer_runner.py new file mode 100644 index 00000000000000..d09dcf669b8af1 --- /dev/null +++ b/tests/e2e_tests/common/ir_provider/model_optimizer_runner.py @@ -0,0 +1,143 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from openvino.tools.mo.utils.cli_parser import parse_input_value +from openvino.tools.ovc.cli_parser import split_inputs + +from e2e_tests.test_utils.test_utils import log_timestamp +from e2e_tests.test_utils.path_utils import resolve_file_path +from .provider import ClassProvider +import sys +import logging as log +import os + + +class OVCMORunner(ClassProvider): + """OpenVINO converter runner.""" + __action_name__ = "get_ovc_model" + log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) + + def __init__(self, config): + self.target_ir_name = config.get("target_ir_name") + self._config = config + self.xml = None + self.bin = None + self.prepared_model = None # dynamically set prepared model + self.args = self._build_arguments() + + def _build_arguments(self): + """Construct model optimizer arguments.""" + args = { + 'output_dir': self._config['mo_out'], + } + + if self._config['precision'] == 'FP32': + args['compress_to_fp16'] = False + else: + args['compress_to_fp16'] = True + + if self.target_ir_name is not None: + args.update({"model_name": self.target_ir_name}) + # if isinstance(self._config['model'], str): + # if os.path.splitext(self._config['model'])[1] == ".meta": + # args["input_meta_graph"] = args.pop("input_model") + # # If our model not a regular file but directory then remove + # # '--input_model' attr and add use '--saved_model_dir' + # if os.path.isdir(self._config['model']): + # args["saved_model_dir"] = args.pop("input_model") + + if 'proto' in self._config.keys(): + args.update({"input_proto": str(self._config['proto'])}) + + if 'fusing' in self._config.keys() and not self._config['fusing']: + args.update({"disable_fusing": None}) + + if "additional_args" in self._config: + if 'tensorflow_object' in self._config['additional_args']: + self._config['additional_args']['tensorflow_object_detection_api_pipeline_config'] = self._config[ + 'additional_args'].pop('tensorflow_object') + + for key, val in self._config["additional_args"].items(): + if key == 'batch': + val = int(val) + args.update({key: val}) + + return args + + def get_ir(self, passthrough_data): + from openvino import convert_model, save_model + from openvino.tools.mo.utils.cli_parser import input_shape_to_input_cut_info, input_to_input_cut_info + + ir_name = self.target_ir_name if self.target_ir_name else 'model' + xml_file = os.path.join(self.args['output_dir'], ir_name + '.xml') + bin_file = os.path.join(self.args['output_dir'], ir_name + '.bin') + compress_to_fp16 = self.args.pop('compress_to_fp16') + self.args.pop('output_dir') + + filtered_args = {} + args_to_pop = [] + for k in self.args: + if k in ['example_input', 'output']: + filtered_args[k] = self.args[k] + if k in ['saved_model_dir']: + filtered_args['input_model'] = self.args['saved_model_dir'] + args_to_pop.append('saved_model_dir') + if k in ['input_checkpoint']: + filtered_args['input_model'] = self.args['input_checkpoint'] + args_to_pop.append('input_checkpoint') + if k in ['input_meta_graph']: + filtered_args['input_model'] = self.args['input_meta_graph'] + args_to_pop.append('input_meta_graph') + + if 'input' in self.args and 'input_shape' not in self.args: + inputs = [] + for input_value in split_inputs(self.args['input']): + # Parse string with parameters for single input + node_name, shape, value, data_type = parse_input_value(input_value) + inputs.append([attr for attr in [node_name, shape, value, data_type] if attr is not None]) + filtered_args['input'] = inputs + elif 'input_shape' in self.args and 'input' not in self.args: + if isinstance(self.args['input_shape'], str): + _, shape, _, _ = parse_input_value(self.args['input_shape']) + filtered_args['input'] = shape + else: + filtered_args['input'] = self.args['input_shape'] + elif 'input' in self.args and 'input_shape' in self.args: + filtered_args['input'] = input_to_input_cut_info(self.args['input']) + input_shape_to_input_cut_info(self.args['input_shape'], filtered_args['input']) + for idx in range(len(filtered_args['input'])): + if filtered_args['input'][idx].type: + filtered_args['input'][idx] = (filtered_args['input'][idx].name, filtered_args['input'][idx].shape, + filtered_args['input'][idx].type) + else: + filtered_args['input'][idx] = (filtered_args['input'][idx].name, filtered_args['input'][idx].shape) + + for key in args_to_pop: + self.args.pop(key) + + removed_keys = sorted(self.args.keys() - filtered_args.keys()) + log.info(f"Removed MO args: {removed_keys}") + removed_values = [self.args[k] for k in removed_keys] + log.info(f"Removed MO values: {removed_values}") + + with log_timestamp('Convert Model'): + for k, v in filtered_args.items(): + if k == 'example_input': + v = True + log.info(f'{k}={v}') + + ov_model = convert_model(self.prepared_model, + input=filtered_args.get('input'), + output=filtered_args.get('output'), + example_input=filtered_args.get('example_input'), + extension=filtered_args.get('extension'), + verbose=filtered_args.get('verbose'), + share_weights=filtered_args.get('share_weights', True)) + save_model(ov_model, xml_file, compress_to_fp16) + + self.xml = resolve_file_path(xml_file, as_str=True) + self.bin = resolve_file_path(bin_file, as_str=True) + log.info(f'XML file with compress_to_fp16={compress_to_fp16} was saved to: {self.xml}') + log.info(f'BIN file with compress_to_fp16={compress_to_fp16} was saved to: {self.bin}') + + return self.xml, self.bin diff --git a/tests/e2e_tests/common/ir_provider/pregenerated.py b/tests/e2e_tests/common/ir_provider/pregenerated.py new file mode 100644 index 00000000000000..83f035e0f438f4 --- /dev/null +++ b/tests/e2e_tests/common/ir_provider/pregenerated.py @@ -0,0 +1,24 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from e2e_tests.test_utils.path_utils import resolve_file_path +from .provider import ClassProvider +import logging as log +import sys + + +class Pregenerated(ClassProvider): + """Pregenerated IR provider.""" + __action_name__ = "pregenerated" + log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) + + def __init__(self, config): + self.xml = resolve_file_path(config.get("xml")) if config.get("xml") else None + self.bin = resolve_file_path(config.get("bin")) if config.get("bin") else None + self.ov_model = config.get("ov_model") + self.mo_log = None + + def get_ir(self, data=None): + log.info("Reading ie IR from files:\n\t\tXML: {}\n\t\tBIN: {}".format(self.xml, self.bin)) + """Return existing IR.""" + return self.xml diff --git a/tests/e2e_tests/common/ir_provider/provider.py b/tests/e2e_tests/common/ir_provider/provider.py new file mode 100644 index 00000000000000..3a90091fdbb83d --- /dev/null +++ b/tests/e2e_tests/common/ir_provider/provider.py @@ -0,0 +1,37 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import inspect +from e2e_tests.common.common.base_provider import BaseProvider, BaseStepProvider + + +class ClassProvider(BaseProvider): + registry = {} + + @classmethod + def validate(cls): + methods = [ + f[0] for f in inspect.getmembers(cls, predicate=inspect.isfunction) + ] + if 'get_ir' not in methods: + raise AttributeError( + "Requested class {} registered as '{}' doesn't provide required method get_ir" + .format(cls.__name__, cls.__action_name__)) + + +class StepProvider(BaseStepProvider): + __step_name__ = "get_ir" + + def __init__(self, config): + action_name = next(iter(config)) + cfg = config[action_name] + self.executor = ClassProvider.provide(action_name, config=cfg) + + def execute(self, passthrough_data): + # this may be considered a WA. To properly remove prepared_model + # we need to refactor all the class providers and handle pytorch cases with care + self.executor.prepared_model = passthrough_data.get("model_obj") + data = passthrough_data.get('feed_dict') + passthrough_data['xml'], passthrough_data['bin'] = self.executor.get_ir(data) + # passthrough_data['mo_log'] = self.executor.mo_log + return passthrough_data diff --git a/tests/e2e_tests/common/logger.py b/tests/e2e_tests/common/logger.py new file mode 100644 index 00000000000000..a95c9f723a8d95 --- /dev/null +++ b/tests/e2e_tests/common/logger.py @@ -0,0 +1,299 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + + +import base64 +import inspect +import logging +import os +import re +import weakref +from datetime import datetime +from typing import cast, List, Union, Tuple, Generator + +from e2e_tests.common import config + +SEPARATOR = "=" * 20 +FIXTURE_SEPARATOR = "*" * 20 +UNDEFINED = "" +UNDEFINED_BASE64 = base64.b64encode(UNDEFINED.encode('utf-8')) +API = "api" +LOCALHOST = "localhost" + +ONE_K = 1024 +ONE_M = ONE_K * ONE_K + + +def get_xdist_worker_count() -> int: + return int(os.environ.get("PYTEST_XDIST_WORKER_COUNT", "1")) + + +log_username = f"- [{config.host_os_user}] " if config.log_username else "" +worker_count = get_xdist_worker_count() +worker_id = os.environ.get("PYTEST_XDIST_WORKER", "") +worker_string = f"[{worker_id}] " if worker_count > 0 else "" +logger_format = config.logger_format(worker_string, log_username) + + +class Chunks(Generator): + """ + generator yielding tuple: no of part, number of parts, and part of the input list + """ + + def __init__(self, seq: List[str], max_number_of_elements: int = 1000) -> None: + super().__init__() + self.seq = tuple(seq) + assert max_number_of_elements > 0, "Incorrect number of elements, should be more than zero" + self.chunk_len = max_number_of_elements + self.no_of_chunks = (len(self.seq) // self.chunk_len) + 1 + self.current_chunk = 0 + self.index_iterator = iter(range(0, len(self.seq), self.chunk_len)) + + def __next__(self) -> Tuple[int, int, list]: + return self.send(None) + + def __iter__(self) -> 'Chunks': + return self + + def send(self, ignored_value) -> Tuple[int, int, list]: + index = next(self.index_iterator) + return_chunk = self.current_chunk, self.no_of_chunks, list(self.seq[index:index + self.chunk_len]) + self.current_chunk += 1 + return return_chunk + + def throw(self, typ, val=None, tb=None): + raise StopIteration + + def close(self) -> None: + raise GeneratorExit + + +class SensitiveKeysStrippingFilter(logging.Filter): + instance = None + sensitive_pairs = None # type: dict + sensitive_values_to_be_masked = None # type: re + + def __new__(cls) -> 'SensitiveKeysStrippingFilter': + if cls.instance is None: + cls.instance = super().__new__(cls) + cls.sensitive_pairs = cls.gather_sensitive_pairs() + cls.sensitive_values_to_be_masked = list(cls.sensitive_pairs.values()) + return cls.instance + + @classmethod + def build_sensitive_values_regexp(cls) -> re: + return re.compile( + "|".join([r"{value}".format(value=var) + for var in cls.sensitive_pairs.values()])) + + @classmethod + def gather_sensitive_pairs(cls) -> dict: + return dict([(var, getattr(config, var, None)) + for var in dir(config) + if cls.is_matching_variable(var)]) + + @staticmethod + def is_matching_variable(var) -> bool: + if config.sensitive_keys_to_be_masked.match(var): + var_value = getattr(config, var, UNDEFINED) + if var_value is not UNDEFINED and \ + isinstance(var_value, str) and \ + len(var_value) > 0: + return True + return False + + def filter(self, record: logging.LogRecord) -> bool: + record.msg = self.strip_sensitive_data(record.msg) + record.args = self.filter_args(record.args) + return True + + def filter_args(self, args: Union[dict, tuple]) -> Union[dict, tuple]: + if not isinstance(args, (dict, tuple)): + return args + if isinstance(args, dict): + args = self.strip_sensitive_data(args) + else: + args = tuple(self.strip_sensitive_data(arg) for arg in args) + return args + + def strip_sensitive_data(self, data: Union[dict, str]) -> Union[dict, str]: + if config.strip_sensitive_data: + if isinstance(data, str) and len(data) > 0: + data = self.strip_sensitive_str_values(data) + elif isinstance(data, dict): + data = self.strip_sensitive_dict_values(data.copy()) + return data + + def strip_sensitive_dict_values(self, data: dict) -> dict: + for key, value in data.items(): + if value in self.sensitive_values_to_be_masked: + data[key] = "******" + return data + + def strip_sensitive_str_values(self, data: str) -> str: + stripped_data = data + for sensitive_value_to_be_masked in self.sensitive_values_to_be_masked: + stripped_data = stripped_data.replace(sensitive_value_to_be_masked, "******") + return stripped_data + + +class LoggerType(object): + """Logger types definitions""" + HTTP_REQUEST = "http_request" + HTTP_RESPONSE = "http_response" + REMOTE_LOGGER = "remote logger" + SHELL_COMMAND = "shell command" + STEP_LOGGER = "STEP" + FIXTURE_LOGGER = "FIXTURE" + FINALIZER_LOGGER = "FINALIZER" + + +class Logger(logging.Logger): + """src: https://stackoverflow.com/a/22586200""" + MIN_NUMBER_OF_LINES_TO_PRESENT_FINAL_MSG = 20 + VERBOSE = 5 + + def __init__(self, name, level=logging.NOTSET): + super().__init__(name, level) + # noinspection PyTypeChecker + self.last_record = None # type: weakref.ReferenceType + logging.addLevelName(self.VERBOSE, "VERBOSE") + + def makeRecord(self, name, level, fn, lno, msg, args, exc_info, + func=None, extra=None, sinfo: Union[None, bool] = None): + record = super().makeRecord(name, level, fn, lno, msg, args, exc_info, func, extra, sinfo) + self.last_record = weakref.ref(record) # type: weakref.ReferenceType + return record + + def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False, + list_of_strings: List[str] = None, + chunk_len: int = 1000, + chunk_msg: str = None, + final_msg: str = None): + super()._log(level, msg, args, exc_info, extra, stack_info) + self.log_list_of_strings(level, chunk_msg, args, exc_info, extra, + stack_info, list_of_strings, chunk_len, final_msg) + + def findCaller(self, stack_info: bool = False, stacklevel: int = 1): + last_record = self.last_record() if self.last_record is not None else None # type: logging.LogRecord + if last_record is not None: + return last_record.pathname, last_record.lineno, last_record.funcName, last_record.stack_info + else: + return super().findCaller(stack_info=stack_info) + + def log_list_of_strings(self, level, chunk_msg, args, exc_info=None, extra=None, stack_info=False, + list_of_strings: List[str] = None, + chunk_len: int = 1000, + final_msg: str = None): + fn, lno, func, sinfo = self.findCaller(stack_info=stack_info) + if list_of_strings is not None and len(list_of_strings): + chunks = Chunks(list_of_strings, max_number_of_elements=chunk_len) + if chunks.no_of_chunks > 1: + chunk_msg = chunk_msg.rstrip() if chunk_msg is not None else "Presenting chunk" + chunk_msg = " ".join([chunk_msg.rstrip(), "({index}/{no_of_chunks}):\n{chunk}\n"]) + else: + chunk_msg = "\n{chunk}\n" + list_chunk = [] + for chunk_number, no_of_chunks, list_chunk in chunks: + formatted_chunk_msg = chunk_msg.format(index=chunk_number, + no_of_chunks=no_of_chunks, + chunk="\n".join(list_chunk)) + chunk_record = self.makeRecord(self.name, level, fn, lno, formatted_chunk_msg, args, + exc_info, func, extra, sinfo) + self.handle(chunk_record) + else: + if len(list_chunk) > self.MIN_NUMBER_OF_LINES_TO_PRESENT_FINAL_MSG: + final_msg = final_msg.rstrip() if final_msg is not None else "End of presenting chunks" + if chunks.no_of_chunks > 1: + final_msg = " ".join([final_msg, "Presented {no_of_chunks} chunks.". + format(no_of_chunks=chunks.no_of_chunks)]) + final_record = self.makeRecord(self.name, level, fn, lno, final_msg, args, + exc_info, func, extra, sinfo) + self.handle(final_record) + + def verbose(self, msg, *args, **kwargs): + if self.isEnabledFor(self.VERBOSE): + self._log(self.VERBOSE, msg, args, **kwargs) + + +logging.setLoggerClass(Logger) +logging.addLevelName(Logger.VERBOSE, "VERBOSE") + +__LOGGING_LEVEL = config.logging_level + + +def get_logger(name) -> Logger: + logger = logging.getLogger(name) + logger.addFilter(SensitiveKeysStrippingFilter()) + logger.setLevel(__LOGGING_LEVEL) + return cast(Logger, logger) + + +def step(message): + caller = inspect.stack()[1][3] + _log_separator(logger_type=LoggerType.STEP_LOGGER, separator=SEPARATOR, caller=caller, message=message) + + +def log_fixture(message, separator=FIXTURE_SEPARATOR): + caller = inspect.stack()[1][3] + _log_separator(logger_type=LoggerType.FIXTURE_LOGGER, separator=separator, caller=caller, message=message) + + +def _log_separator(logger_type, separator, caller, message): + get_logger(logger_type).info("{0} {1}: {2} {0}".format(separator, caller, message)) + + +def line_trimmer(line: str, max_number_of_elements: int = 1 * ONE_K // 4): + if len(line) > max_number_of_elements: + line = "t: " + \ + line[:max_number_of_elements // 2] + \ + "[...]" + \ + line[-max_number_of_elements // 2:] + return line + + +def list_trimmer(seq: list, max_number_of_elements: int = 4 * ONE_K): + if len(seq) > max_number_of_elements: + first_element = ["Too long output was trimmed! Original len {}, showing first and last {} lines:" + .format(len(seq), max_number_of_elements // 2)] + seq = first_element + seq[:max_number_of_elements // 2] + ["", "[...]", ""] + seq[-max_number_of_elements // 2:] + return seq + + +def log_trimmer(logs: str): + logs_list = logs.split(sep="\n") + logs_list = [line_trimmer(line) for line in logs_list] + logs_trimmed = list_trimmer(seq=logs_list, max_number_of_elements=ONE_K) + logs = "\n".join(logs_trimmed) + return logs + + +def sanitize_node(name_or_node_id): + name_or_node_id = "__".join(name_or_node_id.split("/")) + name_or_node_id = "..".join(name_or_node_id.split("::")) + name_or_node_id = "-".join(name_or_node_id.split(" ")) + return name_or_node_id + + +class FileHandler(logging.FileHandler): + def __init__(self, item: Union["Item", str] = None, + mode='a', encoding=None, delay=False): + self.filename = item if isinstance(item, str) else self.log_file_name(item) + file_path = os.path.join(config.test_log_directory, self.filename) + if item is not None: + item._log_file_name = self.filename + super().__init__(file_path, mode, encoding, delay) + fmt = logging.Formatter(logger_format) + self.setFormatter(fmt) + + @staticmethod + def safe_node_id(item: "Item"): + return sanitize_node(item.nodeid) + + @classmethod + def log_file_name(cls, item: "Item" = None): + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + worker = f"_{worker_id}" if worker_count > 0 else "" + prefix = f"{cls.safe_node_id(item)}" if item is not None else "api_tests" + file_name = f"{prefix}{worker}_{timestamp}.log" + return file_name diff --git a/tests/e2e_tests/common/marks.py b/tests/e2e_tests/common/marks.py new file mode 100644 index 00000000000000..7a1bf17c50b947 --- /dev/null +++ b/tests/e2e_tests/common/marks.py @@ -0,0 +1,275 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + + +import re +from enum import Enum +from itertools import chain +from typing import Union + +from _pytest.nodes import Item + +from .config import repository_name +from .logger import get_logger + +RePattern = type(re.compile("")) +logger = get_logger(__name__) + + +class MarkMeta(str, Enum): + def __new__(cls, mark: str, description: str = None, *args): + obj = str.__new__(cls, mark) # noqa + obj._value_ = mark + obj.description = description + return obj + + def __init__(self, *args): + super(MarkMeta, self).__init__() + + def __hash__(self) -> int: + return hash(self.mark) + + def __format__(self, format_spec): + return self.mark + + def __repr__(self): + return self.mark + + def __str__(self): + return self.mark + + @classmethod + def get_by_name(cls, name): + return name + + @property + def mark(self): + return self._value_ + + @property + def marker_with_description(self): + return "{}{}".format(self.mark, + ": {}".format(self.description) if self.description is not None else "") + + def __eq__(self, o: object) -> bool: + if isinstance(o, str): + return self.mark.__eq__(o) + return super().__eq__(o) + + +class ConditionalMark(MarkMeta): + @classmethod + def get_conditional_marks_from_item(cls, name, item): + marks = list(filter(lambda x: x.name == name and x.args is not None, item.keywords.node.own_markers)) + return marks + + @classmethod + def _test_name_phrase_match_test_item(cls, test_name, item): + """ + Verify if current 'item' test_name match pytest Mark from test case + """ + if test_name is None: # no filtering -> any test_name will match + return True + _name = item.keywords.node.originalname + if isinstance(test_name, RePattern): + return bool(test_name.match(_name)) + elif isinstance(test_name, str): + return test_name == _name + else: + raise AttributeError(f"Unexpected conditional marker params {test_name} for {item}") + + @classmethod + def _params_phrase_match_item(cls, params, item): + """ + Verify if current 'item' parameter match pytest Mark from test case + """ + if params is None: # no filtering -> any param will match + return True + test_params = item.keywords.node.callspec.id + if isinstance(params, RePattern): + return bool(params.match(test_params)) + elif isinstance(params, str): + return params == test_params + else: + raise AttributeError(f"Unexpected conditional marker params {params} for {item}") + + @classmethod + def _process_single_entry(cls, entry, item): + """ + Check if mark 'condition' is meet and item parameters match re/str phrase. + Then return mark value + """ + value, condition, params, test_name = None, True, None, None + if isinstance(entry, str): + # Simple string do not have condition nor parameters. + value = entry + elif isinstance(entry, dict): + value = entry.get('value') # required + condition = entry.get('condition', True) + params = entry.get('params', None) + test_name = entry.get('test_name', None) + elif isinstance(entry, tuple): + value, *_optional = entry + if isinstance(value, list): + return cls._process_single_entry(value, item) + + if len(_optional) > 0: + condition = _optional[0] + if len(_optional) > 1: + params = _optional[1] + if len(_optional) > 2: + test_name = _optional[2] + elif isinstance(entry, list): + for _element in entry: + value = cls._process_single_entry(_element, item) + if value: # Return first match + return value + return None + else: + raise AttributeError(f"Unexpected conditional marker entry {entry}") + + if not condition: + return None + + if not cls._test_name_phrase_match_test_item(test_name, item): + return None + + return value if cls._params_phrase_match_item(params, item) else None + + @classmethod + def get_all_marks_values_from_item(cls, item, marks): + mark_values = [] + for mark in marks: + values = cls.get_all_marker_values_from_item(item, mark) + if values: + mark_values.extend(values) + return mark_values + + @classmethod + def get_all_marker_values_from_item(cls, item, mark, _args=None): + """ + Marker can be set as 'str', 'list', 'tuple', 'dict'. + Process it accordingly and list of values. + """ + marker_values = [] + args = _args if _args else mark.args + if isinstance(args, list): + for entry in args: + value = cls._process_single_entry(entry, item) + if not value: + continue + marker_values.append(value) + elif isinstance(args, tuple): + value = cls._process_single_entry(args, item) + if value: + marker_values.append(value) + elif isinstance(args, str): + marker_values.append(args) + elif isinstance(args, dict): + for params, value in args.items(): + if not cls._params_phrase_match_item(params, item): + continue + if isinstance(value, list): + marker_values.extend(value) + else: + marker_values.append(value) + else: + raise AttributeError(f"Unrecognized conditional marker {mark}") + return marker_values + + @classmethod + def get_markers_values_from_item(cls, item, marks): + result = [] + for mark in marks: + result.extend(cls.get_all_marker_values_from_item(item, mark)) + return result + + @classmethod + def get_markers_values_via_conditional_marker(cls, item, name): + conditional_marks = cls.get_conditional_marks_from_item(name, item) + markers_values = cls.get_markers_values_from_item(item, conditional_marks) + return markers_values + + @classmethod + def get_mark_from_item(cls, item: Item, conditional_marker_name=None): + marks = cls.get_markers_values_via_conditional_marker(item, conditional_marker_name) + if not marks: + return cls.get_closest_mark(item) + + marks = marks[0] + return marks + + @classmethod + def get_closest_mark(cls, item: Item): + for mark in cls: # type: 'MarkRunType' + if item.get_closest_marker(mark.mark): + return mark + return None + + @classmethod + def get_by_name(cls, name): + mark = list(filter(lambda x: x.value == name, list(cls))) + return mark[0] + + +class MarkBugs(ConditionalMark): + @classmethod + def get_all_bug_marks_values_from_item(cls, item: Item): + conditional_marks = cls.get_conditional_marks_from_item("bugs", item) + bugs = cls.get_all_marks_values_from_item(item, conditional_marks) + return bugs + + +class MarkGeneral(MarkMeta): + COMPONENTS = "components" + REQIDS = "reqids", "Mark requirements tested" + + +class MarkRunType(ConditionalMark): + TEST_MARK_COMPONENT = "component", "run component tests", "component" + TEST_MARK_ON_COMMIT = "api_on_commit", "run api-on-commit tests", "api_on-commit" + TEST_MARK_REGRESSION = "api_regression", "run api-regression tests", "api_regression" + TEST_MARK_ENABLING = "api_enabling", "run api-enabling tests", "api_enabling" + TEST_MARK_MANUAL = "manual", "run api-manual tests", "api_manual" + TEST_MARK_OTHER = "api_other", "run api-other tests", "api_other" + TEST_MARK_STRESS_AND_LOAD = "api_stress_and_load", "run api-stress-and-load tests", "api_stress-and-load" + TEST_MARK_LONG = "api_long", "run api-long tests", "api_long" + TEST_MARK_PERFORMANCE = "api_performance", "run api-performance tests", "api_performance" + + def __init__(self, mark: str, description: str = None, run_type: str = None) -> None: + super().__init__(self, mark, description) + self.run_type = f"{repository_name}_{run_type}" if repository_name is not None else run_type + + @classmethod + def test_mark_to_test_run_type(cls, test_type_mark: Union['MarkRunType', str]): + if isinstance(test_type_mark, str): + return MarkRunType(test_type_mark).run_type + return test_type_mark.run_type + + @classmethod + def get_test_type_mark(cls, item: Item): + mark = cls.get_mark_from_item(item, "test_group") + if not mark and getattr(item, "parent", None): + mark = cls.get_mark_from_item(item.parent, "test_group") # try to deduce test type from parent + return mark + + @classmethod + def test_type_mark_to_int(cls, item): + mark = cls.get_test_type_mark(item) + if not mark: + return -1 + return list(cls).index(mark) + + +class MarksRegistry(tuple): + MARKERS = "markers" + MARK_ENUMS = [MarkGeneral, MarkRunType, MarkBugs] + + def __new__(cls) -> 'MarksRegistry': + # noinspection PyTypeChecker + return tuple.__new__(cls, [mark for mark in chain(*cls.MARK_ENUMS)]) + + @staticmethod + def register(pytest_config): + for mark in MarksRegistry(): + pytest_config.addinivalue_line(MarksRegistry.MARKERS, mark.marker_with_description) diff --git a/tests/e2e_tests/common/model_loader/__init__.py b/tests/e2e_tests/common/model_loader/__init__.py new file mode 100644 index 00000000000000..cb3dbc7f3f0cb6 --- /dev/null +++ b/tests/e2e_tests/common/model_loader/__init__.py @@ -0,0 +1,5 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from . import load_pytorch_model, tf_hub_model_loader +from .provider import StepProvider diff --git a/tests/e2e_tests/common/model_loader/load_pytorch_model.py b/tests/e2e_tests/common/model_loader/load_pytorch_model.py new file mode 100644 index 00000000000000..d88561e936b610 --- /dev/null +++ b/tests/e2e_tests/common/model_loader/load_pytorch_model.py @@ -0,0 +1,49 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + + +import logging as log +import os +import torch + +from e2e_tests.test_utils.pytorch_loaders import * +from e2e_tests.common.model_loader.provider import ClassProvider + + +class PyTorchModelLoader(ClassProvider): + """PyTorch models loader runner.""" + __action_name__ = "load_pytorch_model" + log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) + + def __init__(self, config): + self._config = config + self.prepared_model = None + + def load_model(self, input_data): + os.environ['TORCH_HOME'] = self._config.pop('torch_model_zoo_path') + args = {k: v for k, v in self._config.items()} + module = args['import-module'] + try: + log.info('Preparing model for MO ...') + pytorch_loader = LoadPyTorchModel(module=module, + args=args, + inputs=input_data) + self.prepared_model = pytorch_loader.load_model() + if args['weights']: + self.prepared_model.load_state_dict(torch.load(args['weights'], map_location='cpu')) + except Exception as err: + raise Exception from err + + return self.prepared_model + + +class CustomPytorchModelLoader(ClassProvider): + __action_name__ = "custom_pytorch_model_loader" + + def __init__(self, config): + self.execution_function = config["execution_function"] + self.prepared_model = None + + def load_model(self, data): + self.prepared_model = self.execution_function(data) + return self.prepared_model diff --git a/tests/e2e_tests/common/model_loader/provider.py b/tests/e2e_tests/common/model_loader/provider.py new file mode 100644 index 00000000000000..3b938436a681e1 --- /dev/null +++ b/tests/e2e_tests/common/model_loader/provider.py @@ -0,0 +1,35 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import inspect + +from e2e_tests.common.common.base_provider import BaseProvider, BaseStepProvider + + +class ClassProvider(BaseProvider): + registry = {} + + @classmethod + def validate(cls): + methods = [ + f[0] for f in inspect.getmembers(cls, predicate=inspect.isfunction) + ] + if 'load_model' not in methods: + raise AttributeError( + "Requested class {} registered as '{}' doesn't provide required method load_model" + .format(cls.__name__, cls.__action_name__)) + + +class StepProvider(BaseStepProvider): + __step_name__ = "load_model" + + def __init__(self, config): + action_name = next(iter(config)) + cfg = config[action_name] + self.executor = ClassProvider.provide(action_name, config=cfg) + + def execute(self, passthrough_data): + data = passthrough_data.get('feed_dict') + passthrough_data['model_obj'] = self.executor.load_model(data) + passthrough_data['output'] = passthrough_data['model_obj'] + return passthrough_data diff --git a/tests/e2e_tests/common/model_loader/tf_hub_model_loader.py b/tests/e2e_tests/common/model_loader/tf_hub_model_loader.py new file mode 100644 index 00000000000000..a8f6af6e85916a --- /dev/null +++ b/tests/e2e_tests/common/model_loader/tf_hub_model_loader.py @@ -0,0 +1,34 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import logging as log +import sys +import tensorflow_hub as hub + +from e2e_tests.common.model_loader.provider import ClassProvider + + +class TFHubModelLoader(ClassProvider): + """TFHub models loader runner.""" + __action_name__ = "load_tf_hub_model" + log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) + + def __init__(self, config): + self._config = config + self.prepared_model = None + + def load_model(self, input_data): + model_name = self._config['model_name'] + model_link = self._config['model_link'] + load = hub.load(model_link) + if 'serving_default' in list(load.signatures.keys()): + self.prepared_model = load.signatures['serving_default'] + elif 'default' in list(load.signatures.keys()): + self.prepared_model = load.signatures['default'] + else: + signature_keys = sorted(list(load.signatures.keys())) + assert len(signature_keys) > 0, "No signatures for a model {}, url {}".format(model_name, model_link) + self.prepared_model = load.signatures[signature_keys[0]] + self.prepared_model._backref_to_saved_model = load + return self.prepared_model + diff --git a/tests/e2e_tests/common/multiprocessing_utils.py b/tests/e2e_tests/common/multiprocessing_utils.py new file mode 100644 index 00000000000000..e248cfdaa83a4a --- /dev/null +++ b/tests/e2e_tests/common/multiprocessing_utils.py @@ -0,0 +1,104 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import logging as log +import os +import platform +import signal +import sys +import traceback +from logging.handlers import QueueHandler +from multiprocessing import Process, Queue, TimeoutError, ProcessError +from queue import Empty as QueueEmpty +from typing import Callable, Union + +if platform.system() == "Darwin": + # Fix for MacOS + import multiprocessing + multiprocessing.set_start_method("forkserver", True) + + +def _mp_wrapped_func(func: Callable, func_args: list, queue: Queue, logger_queue: Queue): + """ + Wraps callable object with exception handling. Current wrapper is a target for + `multiprocessing_run` function + :param func: see `multiprocessing_run` + :param func_args: see `multiprocessing_run` + :param queue: multiprocessing.Queue(). Used for getting callable object return values + :param logger_queue: multiprocessing.Queue(). Used for getting logs from child process in parent process + :return: + """ + + # Remove all handlers from root logger in child process in favor of `QueueHandler` + # to prevent double console logs in stdout + log.getLogger().handlers = [QueueHandler(logger_queue)] + + error_message = "" + res = None + try: + res = func(*func_args) + except: + ex_type, ex_value, tb = sys.exc_info() + error_message = "{tb}\n{ex_type}: {ex_value}".format(tb=''.join(traceback.format_tb(tb)), + ex_type=ex_type.__name__, ex_value=ex_value) + queue.put((error_message, res)) + + +def multiprocessing_run(func: Callable, func_args: list, func_log_name: str, timeout: Union[int, None] = None): + """ + Wraps callable object to a separate process using multiprocessing module + :param func: callable object + :param func_args: list of arguments for callable + :param func_log_name: name of callable used for logging + :param timeout: positive int to limit execution time + :return: return value (or values) from callable object + """ + queue = Queue() + logger_queue = Queue(-1) + process = Process(target=_mp_wrapped_func, args=(func, func_args, queue, logger_queue)) + process.start() + try: + error_message, *ret_args = queue.get(timeout=timeout) + except QueueEmpty: + raise TimeoutError("{func} running timed out!".format(func=func_log_name)) + finally: + queue.close() + + # Extract logs from Queue and pass to root logger + while not logger_queue.empty(): + rec = logger_queue.get() + log.getLogger().handle(rec) + logger_queue.close() + + if process.is_alive(): + process.terminate() + process.join() + else: + exit_signal = multiprocessing_exitcode_to_signal(process.exitcode) + if exit_signal: + raise ProcessError( + "{func} was killed with a signal {signal}".format(func=func_log_name, signal=exit_signal)) + + if error_message: + raise ProcessError("\n{func} running failed: \n{msg}".format(func=func_log_name, msg=error_message)) + + ret_args = ret_args[0] if len(ret_args) == 1 else ret_args # unwrap from list if only 1 item is returned + return ret_args + + +def multiprocessing_exitcode_to_signal(exitcode): + """ + Map multiprocessing exitcode to signals from "signal" module + :param exitcode: multiprocessing exitcode + :return: signal from "signal" if exitcode mapped on signal or None + """ + # Multiprocessing return negative values of signal of the process, but on Win they are positive. + # Bring the value to the positive format. + exit_code = exitcode if os.name == "nt" else -exitcode + if exit_code > 0: + code_map = {int(getattr(signal, sig)): str(getattr(signal, sig)) + for sig in dir(signal) if sig.startswith("SIG")} + exit_signal = code_map[exit_code] if exit_code in code_map else exit_code + else: + exit_signal = None + return exit_signal diff --git a/tests/e2e_tests/common/openvino_resources.py b/tests/e2e_tests/common/openvino_resources.py new file mode 100644 index 00000000000000..ea7e4d4820a383 --- /dev/null +++ b/tests/e2e_tests/common/openvino_resources.py @@ -0,0 +1,211 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + + +# pylint: disable=import-error,logging-fstring-interpolation,fixme + +""" +The module implements OpenVINOResources class which provide interface for getting paths to various +OpenVINO resources (tools, samples, etc) according to product installation layout. +""" + +import logging +import os +import subprocess + +from pathlib import Path +from distutils import spawn + +from e2e_tests.common.config import openvino_root_dir +from e2e_tests.common.sys_info_utils import os_type_is_windows + + +class OpenVINOResourceNotFound(Exception): + """OpenVINO resource not found exception""" + + +class OpenVINOResources: + """Class for getting paths to OpenVINO resources""" + + _resources = {} + _instance = None + + def __new__(cls, *_args, **_kwargs): + """Singleton""" + if not OpenVINOResources._instance: + OpenVINOResources._instance = super(OpenVINOResources, cls).__new__(cls) + return OpenVINOResources._instance + + def __init__(self): + if self._resources: + return + self._log = logging.getLogger(self.__class__.__name__) + + def _check_resource(self, resource_name, resource_path): + """Save resource with specified name, path to self._resources and return True if resource + path exists, return False otherwise""" + if resource_path: + resource_path = Path(resource_path) + if resource_path.exists(): + self._resources[resource_name] = resource_path + self._log.info(f"OpenVINO resource {resource_name} found: {resource_path}") + return True + + self._log.warning(f"OpenVINO resource {resource_name} not found: {resource_path}") + return False + + def _get_executable_from_os_path(self, resource_name, resource_filename): + """Find and return absolute path to resource_name executable from system os PATH""" + if self._resources.get(resource_name): + return self._resources[resource_name] + + if self._check_resource(resource_name, spawn.find_executable(str(resource_filename))): + return self._resources[resource_name] + + raise OpenVINOResourceNotFound(f"OpenVINO resource {resource_name} not found") + + @property + def setupvars(self): + """Return absolute path to OpenVINO setupvars.[bat|sh] script""" + resource_name = "setupvars" + + if self._resources.get(resource_name): + return self._resources[resource_name] + + setupvars = "setupvars.bat" if os_type_is_windows() else "setupvars.sh" + + if os.getenv("OPENVINO_ROOT_DIR"): + if self._check_resource( + resource_name, Path(os.getenv("OPENVINO_ROOT_DIR")) / setupvars + ): + return self._resources[resource_name] + + raise OpenVINOResourceNotFound( + f"OpenVINO resource {resource_name} not found, " + f"OPENVINO_ROOT_DIR environment variable is not set." + ) + + @property + def install_openvino_dependencies(self): + """Return absolute path to OpenVINO install_dependencies/install_openvino_dependencies.sh script""" + resource_name = "install_openvino_dependencies" + + if openvino_root_dir: + if self._check_resource( + resource_name, + Path(openvino_root_dir) + / "install_dependencies" + / "install_openvino_dependencies.sh", + ): + return self._resources[resource_name] + + raise OpenVINOResourceNotFound( + f"OpenVINO resource {resource_name} not found, " + f"OPENVINO_ROOT_DIR environment variable is not set." + ) + + @property + def omz_pytorch_to_onnx_converter(self): + """Return absolute path to omz pytorch to onnx converter""" + resource_name = "model_loader" + + omz_root_path = self.omz_root + if self._check_resource( + resource_name, + omz_root_path + / "internal_scripts" + / "pytorch_to_onnx.py" + ): + return self._resources[resource_name] + + @property + def omz_root(self): + """Return absolute path to OMZ root directory""" + resource_name = "omz_root" + + if self._resources.get(resource_name): + return self._resources[resource_name] + + try: + # pylint: disable=import-outside-toplevel + + # Import only when really called to avoid import errors when OpenVINOResources is + # imported but accuracy checker tool is absent on the system. + from openvino.tools import accuracy_checker + + if self._check_resource( + resource_name, Path(accuracy_checker.__file__).parents[2] / "model_zoo" + ): + return self._resources[resource_name] + except ImportError as exc: # pylint: disable=unused-variable + if os.getenv("OMZ_ROOT"): + print("OMZ ROOT IS: {}".format(os.getenv("OMZ_ROOT"))) + if self._check_resource(resource_name, Path(os.getenv("OMZ_ROOT"))): + return self._resources[resource_name] + + raise OpenVINOResourceNotFound(f"OpenVINO resource {resource_name} not found") + + @property + def omz_info_dumper(self): + """Return absolute path to OMZ info_dumper tool""" + return self._get_executable_from_os_path("omz_info_dumper", "omz_info_dumper") + + @property + def omz_downloader(self): + """Return absolute path to OMZ downloader tool""" + return self._get_executable_from_os_path("omz_downloader", "omz_downloader") + + @property + def omz_converter(self): + """Return absolute path to OMZ converter tool""" + return self._get_executable_from_os_path("omz_converter", "omz_converter") + + @property + def omz_quantizer(self): + """Return absolute path to OMZ quantizer tool""" + return self._get_executable_from_os_path("omz_quantizer", "omz_quantizer") + + @property + def pot(self): + """Return absolute path to Post-training Optimization tool (pot)""" + return self._get_executable_from_os_path("pot", "pot") + + @property + def pot_speech_sample(self): + """Return absolute path to POT speech sample (gna_sample.py)""" + resource_name = "pot_speech_sample" + + if self._resources.get(resource_name): + return self._resources[resource_name] + + try: + # pylint: disable=import-outside-toplevel + + # Import only when really called to avoid import errors when OpenVINOResources is + # imported but pot tool is absent on the system. + from openvino import tools + except ImportError as exc: + raise OpenVINOResourceNotFound(f"OpenVINO resource {resource_name} not found") from exc + + if self._check_resource( + resource_name, + Path(tools.__file__).parent / "pot" / "api" / "samples" / "speech" / "gna_sample.py", + ): + return self._resources[resource_name] + + raise OpenVINOResourceNotFound(f"OpenVINO resource {resource_name} not found") + + def add_setupvars_cmd(self, cmd): + """Return final command line with setupvars script""" + + input_cmd = ( + subprocess.list2cmdline(list(map(str, cmd))) if isinstance(cmd, list) else str(cmd) + ) + input_cmd_escaped = input_cmd.replace('"', '\\"') + + output_cmd = ( + f"call {self.setupvars} && set && {input_cmd}" + if os_type_is_windows() + else f'bash -c ". {self.setupvars} && env && {input_cmd_escaped}"' + ) + return output_cmd diff --git a/tests/e2e_tests/common/parsers.py b/tests/e2e_tests/common/parsers.py new file mode 100644 index 00000000000000..696e4050263d77 --- /dev/null +++ b/tests/e2e_tests/common/parsers.py @@ -0,0 +1,41 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import os +import xml.etree.ElementTree + + +def mapping_parser(file): + """ + Parse mapping file if it exists + :param file: Name of mapping file + :return: Dictionary with framework layers as keys and IR layers as values + """ + mapping_dict = {} + if os.path.splitext(file)[1] == '.mapping' and os.path.isfile(file): + xml_tree = xml.etree.ElementTree.parse(file) + xml_root = xml_tree.getroot() + for child in xml_root: + framework_info = child.find('.//framework') + ir_info = child.find('.//IR') + if framework_info is None: + continue + framework_name = framework_info.attrib['name'] + ir_name = ir_info.attrib['name'] if ir_info is not None else None + mapping_dict[framework_name] = ir_name + else: + raise FileNotFoundError("Mapping file was not found at path {}!".format(os.path.dirname(file))) + return mapping_dict + + +def pipeline_cfg_to_string(cfg): + str = "" + for step, actions in cfg.items(): + str += "Step: {}\t\nActions:".format(step) + for action, params in actions.items(): + str += "\n\t\t{}".format(action) + str += "\n\t\tParameters:" + for key, val in params.items(): + str += "\n\t\t\t{}: {}".format(key, val) + str += "\n" + return str diff --git a/tests/e2e_tests/common/platforms.yml b/tests/e2e_tests/common/platforms.yml new file mode 100644 index 00000000000000..f5283aae9da99b --- /dev/null +++ b/tests/e2e_tests/common/platforms.yml @@ -0,0 +1,138 @@ +# CPU / GPU +apl: + CPU: + description: 'ApolloLake' + sockets: 1 + numa_nodes: 1 +cfl: + CPU: + description: 'CoffeeLake' + sockets: 1 + numa_nodes: 1 + GPU: + description: 'GPU (GEN9)' +clx: + CPU: + description: 'CascadeLake/8280' + sockets: 2 + numa_nodes: 2 +clx-ap: + CPU: + description: 'CascadeLake' + sockets: 2 + numa_nodes: 4 +cslx: + CPU: + description: 'CascadeLake/10980' + sockets: 1 + numa_nodes: 1 +dg1: + CPU: + description: 'CoffeeLake/9900' + sockets: 1 + numa_nodes: 1 + GPU: + description: 'dGPU (DG1)' +cpx: + CPU: + description: 'CooperLake' + sockets: 4 + numa_nodes: 4 +halo: + CPU: + description: 'Skylake/8160' + sockets: 1 + numa_nodes: 1 +iclu: + CPU: + description: 'IceLake' + sockets: 1 + numa_nodes: 1 + GPU: + description: 'GPU (GEN11)' +skl: + CPU: + description: 'Skylake' + sockets: 1 + numa_nodes: 1 +sklx: + CPU: + description: 'Skylake/8180' + sockets: 2 + numa_nodes: 2 +skx-avx512: + CPU: + description: 'Skylake' + sockets: 1 + numa_nodes: 1 +skl-e: + CPU: + description: 'Skylake' + sockets: 1 + numa_nodes: 1 +tglu: + CPU: + description: 'TigerLake' + sockets: 1 + numa_nodes: 1 + GPU: + description: 'GPU (GEN12)' +whl: + CPU: + description: 'WhiskyLake' + sockets: 1 + numa_nodes: 1 +epyc: + CPU: + description: 'AMD EPYC 7601' + sockets: 2 + numa_nodes: 8 + + +# VPU +myriad: + MYRIAD: + description: 'Myriad 2 Stick' + 'HETERO:MYRIAD,CPU': + description: 'Myriad 2 Stick' +myriad-evm: + MYRIAD: + description: 'Myriad 2 Board' + 'HETERO:MYRIAD,CPU': + description: 'Myriad 2 Board' +myriadx: + MYRIAD: + description: 'Myriad X Stick' + 'HETERO:MYRIAD,CPU': + description: 'Myriad X Stick' +myriadx-evm: + MYRIAD: + description: 'Myriad X Board' + 'HETERO:MYRIAD,CPU': + description: 'Myriad X Board' +myriadx-pc: + MYRIAD: + description: 'Myriad X Board 2085' + 'HETERO:MYRIAD,CPU': + description: 'Myriad X Board 2085' +hddl: + HDDL: + description: 'HDDL-R' + + +# VCAA +vcaa: + HDDL: + description: 'Harker Heights PCI-e board CPU/GPU/HDDL' + + +# FPGA +fpgadcp: + 'HETERO:FPGA,CPU': + description: 'Rush Creek' +hddlf: + 'HETERO:FPGA,CPU': + description: 'PyramidLake' +hddlf_SG2: + 'HETERO:FPGA,CPU': + description: 'PyramidLake SG2' diff --git a/tests/e2e_tests/common/plugins/__init__.py b/tests/e2e_tests/common/plugins/__init__.py new file mode 100644 index 00000000000000..8ba81a92b19c53 --- /dev/null +++ b/tests/e2e_tests/common/plugins/__init__.py @@ -0,0 +1,3 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + diff --git a/tests/e2e_tests/common/plugins/common/__init__.py b/tests/e2e_tests/common/plugins/common/__init__.py new file mode 100644 index 00000000000000..8ba81a92b19c53 --- /dev/null +++ b/tests/e2e_tests/common/plugins/common/__init__.py @@ -0,0 +1,3 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + diff --git a/tests/e2e_tests/common/plugins/common/base_conftest.py b/tests/e2e_tests/common/plugins/common/base_conftest.py new file mode 100644 index 00000000000000..348e0928d46380 --- /dev/null +++ b/tests/e2e_tests/common/plugins/common/base_conftest.py @@ -0,0 +1,121 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Local pytest plugins shared code.""" +import importlib.util +import inspect +import os +from fnmatch import fnmatch +from glob import glob +from inspect import getsourcefile + +from _pytest.mark import MarkDecorator + +from e2e_tests.common.pytest_utils import mark as Mark + +from e2e_tests.test_utils.test_utils import BrokenTestException + + +def apply_glob(paths, file_ext="py"): + """ + Apply glob to paths list. + + If path is file and matches pattern *., add it. + + If path is directory, search for pattern /**/*. recursively. + + If path contains special characters (*, ?, [, ], !), + pass path to glob and add resolved values that match *.. + + :param paths: list of paths + :param file_ext: file extension to filter by (i.e. if "py", only .py + files are returned) + :return: resolved paths + """ + file_pattern = '*.{ext}'.format(ext=file_ext) + globbed_paths = [] + for path in paths: + # resolve files + if os.path.isfile(path): + if fnmatch(path, file_pattern): + globbed_paths.append(path) + # resolve directories + elif os.path.isdir(path): + globbed_paths.extend( + glob( + '{dir}/**/{file}'.format(dir=path, file=file_pattern), + recursive=True)) + # resolve patterns + elif any(special in path for special in ['*', '?', '[', ']', '!']): + resolved = glob(path, recursive=True) + globbed_paths.extend( + [entry for entry in resolved if fnmatch(entry, file_pattern)]) + return list(set(globbed_paths)) + + +def find_tests(modules, attributes): + """ + Find tests given list of modules where to look for. + + If class has all attributes specified, append it to found tests. + + :param modules: .py files with test classes + :param attributes: class attributes that each test class must have + :return: found test classes + """ + modules = apply_glob(modules) + tests = [] + broken_modules = [] + + for module in modules: + name = os.path.splitext(os.path.basename(module))[0] + spec = importlib.util.spec_from_file_location(name, module) + try: + loaded_module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(loaded_module) + except Exception as e: + broken_modules.append((module, str(e))) + continue + classes = inspect.getmembers(loaded_module, predicate=inspect.isclass) + for cls in classes: + if all(getattr(cls[1], attr, False) for attr in attributes): + setattr(cls[1], "definition_path", module) + tests.append(cls[1]) + + return tests, broken_modules + + +def set_pytest_marks(_test, _object, _runner, log): + """ Set pytest markers from object to the test according to test runner. """ + _err = False + if hasattr(_object, '__pytest_marks__'): + for mark in _object.__pytest_marks__: + if isinstance(mark, MarkDecorator): + _test.add_marker(mark) + continue + if not isinstance(mark, Mark): + _err = True + log.error("Current mark '{}' for instance '{}' from '{}' isn't wrapped in 'mark' from '{}'" + .format(mark, str(_object), _object.definition_path, getsourcefile(Mark))) + continue + if mark.target_runner != "all" and mark.target_runner != _runner: + continue + if mark.is_simple_mark: + mark_to_add = str(mark.pytest_mark) + else: + try: + mark_to_add, reason = mark.pytest_mark + except ValueError as ve: + _err = True + log.exception("Error with marks for {}".format(str(_object)), exc_info=ve) + continue + if mark_to_add is None: # skip None values + continue + if not reason: + _err = True + log.error("Mark '{mark}' exists in instance '{instance}' without specified reason" + .format(mark=mark_to_add, instance=str(_object))) + continue + _test.add_marker(mark_to_add) + if _err: + raise BrokenTestException diff --git a/tests/e2e_tests/common/plugins/common/conftest.py b/tests/e2e_tests/common/plugins/common/conftest.py new file mode 100644 index 00000000000000..81333a14b34ca1 --- /dev/null +++ b/tests/e2e_tests/common/plugins/common/conftest.py @@ -0,0 +1,611 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +""" +Basic high-level plugin file for pytest. + +See [Writing plugins](https://docs.pytest.org/en/latest/writing_plugins.html) +for more information. + +This plugin adds the following command-line options: + +* `--modules` - Paths to modules to be run by pytest (these can contain tests, + references, etc.). Format: Unix style pathname patterns or .py files. +* `--env_conf` - Path to environment configuration file. Used to initialize test + environment. Format: yaml file. +* `--test_conf` - Path to test configuration file. Used to parameterize tests. + Format: yaml file. +* `--dry_run` - Specifies that reference collection should not store collected + results to filesystem. +* `--bitstream` - Path to bitstream to ran tests with. +* `--tf_models_version` - TensorFlow models version. +""" +import json +import logging as log +import os +import platform +import re +import time +from contextlib import contextmanager +from inspect import getsourcefile +from pathlib import Path +import shutil + +# pylint:disable=import-error +import pytest +from jsonschema import validate, ValidationError + +from e2e_tests.test_utils.test_utils import get_framework_from_model_ex +from e2e_tests.test_utils.env_tools import Environment + + +@contextmanager +def import_from(path): + """ Set import preference to path""" + os.sys.path.insert(0, os.path.realpath(path)) + yield + os.sys.path.remove(os.path.realpath(path)) + + +def pytest_addoption(parser): + """Specify command-line options for all plugins""" + if getattr(parser, "after_preparse", False): + return + parser.addoption( + "--modules", + nargs='+', + help="Path to test modules", + default=["pipelines"] + ) + parser.addoption( + "--env_conf", + action="store", + help="Path to environment configuration file", + default="env_config_local.yml" + ) + parser.addoption( + "--test_conf", + action="store", + help="Path to test configuration file", + default="test_config_local.yml" + ) + parser.addoption( + "--dry_run", + action="store_true", + help="Dry run reference collection: not saving to filesystem", + default=False + ) + parser.addoption( + "--collect_output", + action="store", + help="Path to dry run output file", + default=None + ) + parser.addoption( + "--base_rules_conf", + action="store", + help="Path to base test rules configuration file", + default="base_test_rules.yml" + ) + parser.addoption( + "--reshape_rules_conf", + action="store", + help="Path to reshape test rules configuration file", + default="reshape_test_rules.yml" + ) + parser.addoption( + "--dynamism_rules_conf", + action="store", + help="Path to dynamism test rules configuration file", + default="dynamism_test_rules.yml" + ) + parser.addoption( + "--bitstream", + action="store", + help="Bitstream path; run tests for models supported by this bitstream", + default="" + ) + parser.addoption( + "--pregen_irs", + type=Path, + help="Name of IR's mapping file (CSV-formatted) to use pre-generated IRs in tests." + " File and pre-generated IRs will be located in `pregen_irs_path` defined in environment config", + default=None + ) + parser.addoption( + "--ir_gen_time_csv_name", + action="store", + help="Name for csv file with IR generation time", + default=False + ) + parser.addoption( + "--load_net_to_plug_time_csv_name", + action="store", + help="Name for csv file with load net to plugin time", + default=False + ) + parser.addoption( + "--mem_usage_mo_csv_name", + action="store", + help="Name for csv file with MO memory usage information", + default=False + ) + parser.addoption( + "--mem_usage_ie_csv_name", + action="store", + help="Name for csv file with IE memory usage information", + default=False + ) + parser.addoption( + "--gpu_throughput_mode", + action="store_true", + help="Enable GPU_THROUGHPUT_STREAMS mode for multi_request tests", + default=False + ) + parser.addoption( + "--cpu_throughput_mode", + action="store_true", + help="Enable GPU_THROUGHPUT_STREAMS mode for multi_request tests", + default=False + ) + parser.addoption( + "--tf_models_version", + action="store", + help="Specify TensorFlow models version", + default=None + ) + parser.addoption( + "--dynamism_type", + action="store", + help="This option is used in dynamism tests. Possible types: negative_ones, range_values", + default=None + ) + parser.addoption( + "--skip_mo_args", + help="List of args to remove from MO command line", + required=False + ) + parser.addoption( + "--dynamic_inference", + help="Enable dynamic inference mode", + action="store_true", + default=False + ) + parser.addoption( + "--db_url", + type=str, + help="Url to send post request to DataBase. http:///api/v1/e2e/push-2-db-facade", + action="store", + default=None + ) + parser.addoption( + '--infer_binary_path', + type=Path, + help='Path to timetest_infer/memtest_infer binary file', + default=None + ) + parser.addoption( + "--consecutive_infer", + action="store_true", + help="This option is used in dynamism tests. Specify if values from input_descriptor should be used", + default=False + ) + parser.addoption( + "--skip_ir_generation", + action="store_true", + help="Load model to IE plugin as is (uses ONNX or PDPD Importer)", + default=False + ) + parser.addoption( + '--inference_precision_hint', + help='Inference Precision hint for device', + required=False + ) + parser.addoption( + "--convert_pytorch_to_onnx", + action="store_true", + help="Whether or not use pytorch to onnx OMZ converter", + default=False + ) + + +@pytest.fixture(scope="session") +def modules(request): + """Fixture function for command-line option.""" + return request.config.getoption('modules') + + +@pytest.fixture(scope="session") +def env_conf(request): + """Fixture function for command-line option.""" + return request.config.getoption('env_conf') + + +@pytest.fixture(scope="session") +def test_conf(request): + """Fixture function for command-line option.""" + return request.config.getoption('test_conf') + + +@pytest.fixture(scope="session") +def dry_run(request): + """Fixture function for command-line option.""" + return request.config.getoption('dry_run') + + +@pytest.fixture(scope="session") +def base_rules_conf(request): + """Fixture function for command-line option.""" + return request.config.getoption('base_rules_conf') + + +@pytest.fixture(scope="session") +def dynamism_rules_conf(request): + """Fixture function for command-line option.""" + return request.config.getoption('dynamism_rules_conf') + + +@pytest.fixture(scope="session") +def reshape_rules_conf(request): + """Fixture function for command-line option.""" + return request.config.getoption('reshape_rules_conf') + + +@pytest.fixture(scope="session") +def bitstream(request): + """Fixture function for command-line option.""" + return request.config.getoption('bitstream') + + +@pytest.fixture(scope="session") +def pregen_irs(request): + """Fixture function for command-line option.""" + path = request.config.getoption('pregen_irs') + if path: + # Create sub-folders and file before tests to make execution via pytest-xdist safer + path = Path(Environment.env['pregen_irs_path']) / path + path.parent.mkdir(parents=True, exist_ok=True) + path.touch(exist_ok=True) + return path + + +@pytest.fixture(scope="session") +def ir_gen_time_csv_name(request): + """Fixture function for command-line option.""" + return request.config.getoption('ir_gen_time_csv_name') + + +@pytest.fixture(scope="session") +def load_net_to_plug_time_csv_name(request): + """Fixture function for command-line option.""" + return request.config.getoption('load_net_to_plug_time_csv_name') + + +@pytest.fixture(scope="session") +def mem_usage_mo_csv_name(request): + """Fixture function for command-line option.""" + return request.config.getoption('mem_usage_mo_csv_name') + + +@pytest.fixture(scope="session") +def mem_usage_ie_csv_name(request): + """Fixture function for command-line option.""" + return request.config.getoption('mem_usage_ie_csv_name') + + +@pytest.fixture(scope="session") +def gpu_throughput_mode(request): + """Fixture function for command-line option.""" + if request.config.getoption('gpu_throughput_mode') and request.config.getoption('cpu_throughput_mode'): + raise ValueError("gpu_throughput_mode and cpu_throughput_mode options can't be specified simultaneously") + return request.config.getoption('gpu_throughput_mode') + + +@pytest.fixture(scope="session") +def cpu_throughput_mode(request): + """Fixture function for command-line option.""" + if request.config.getoption('gpu_throughput_mode') and request.config.getoption('cpu_throughput_mode'): + raise ValueError("gpu_throughput_mode and cpu_throughput_mode options can't be specified simultaneously") + return request.config.getoption('cpu_throughput_mode') + + +@pytest.fixture(scope="session") +def dynamism_type(request): + """Fixture function for command-line option.""" + return request.config.getoption('dynamism_type') + + +@pytest.fixture(scope="session") +def infer_binary_path(request): + """Fixture function for command-line option.""" + return request.config.getoption('infer_binary_path') + + +@pytest.fixture(scope="session") +def skip_mo_args(request): + """Fixture function for command-line option.""" + return request.config.getoption('skip_mo_args') + + +@pytest.fixture(scope="session") +def dynamic_inference(request): + """Fixture function for command-line option.""" + return request.config.getoption('dynamic_inference') + + +@pytest.fixture(scope="session") +def consecutive_infer(request): + """Fixture function for command-line option.""" + return request.config.getoption('consecutive_infer') + + +@pytest.fixture(scope="session") +def skip_ir_generation(request): + """Fixture function for command-line option.""" + return request.config.getoption('skip_ir_generation') + + +@pytest.fixture(scope="session") +def inference_precision_hint(request): + """Fixture function for command-line option.""" + return request.config.getoption('inference_precision_hint') + + +@pytest.fixture(scope="session") +def convert_pytorch_to_onnx(request): + """Fixture function for command-line option.""" + return request.config.getoption('convert_pytorch_to_onnx') + + +def pytest_collection_finish(session): + """ Pytest hook for test collection. + Dump list of tests to 'dry_run.csv' file. + :param session: session object + :return: None + """ + if session.config.getoption('collect_output'): + with import_from(getsourcefile(lambda: 0) + '/../../../../common'): + from metrics_utils import write_csv + + collect_output = session.config.getoption('collect_output') or \ + Environment.abs_path('logs_dir', 'dry_run.csv') + collect_only = session.config.getoption('collectonly') + for item in session.items: + # Extract test name + match = re.search(r'\[((\w|\.|-)+)\]', item.name) + + if match and match.group(1): + test_name = match.group(1) + # Write csv + if collect_only: + session_items_params = item._fixtureinfo.name2fixturedefs.get('instance')[0].params + write_csv({'test_filter': test_name}, collect_output, ',') + else: + log.error('Unable to extract test name from string "{}"'.format(item.name)) + + +@pytest.fixture(scope="function") +def prepare_test_info(request, instance): + """ + Fixture for preparing and validating data to submit to a database. + """ + setattr(request.node._request, 'test_info', {}) + + test_id = getattr(instance, 'test_id') + network_name = test_id + + # add test info + info = { + # results will be added immediately before uploading to DB in `pytest_runtest_makereport` + 'insertTime': 0, # Current date when call upload to DataBase + 'topLevelLink': '', + 'lowLevelLink': os.getenv('RUN_DISPLAY_URL', 'Local run'), + 'subset': os.getenv('model_type', 'Not set or precommit'), + 'platform': os.getenv('node_selector', 'Undefined'), + 'os': os.getenv('os', 'Undefined'), + 'framework': '', + 'network': network_name, + 'inputsize': '', + 'dynamismType': '', + # TODO: remove 'fusing' key, when this will dropped in DataBase + 'fusing': False, + 'device': getattr(instance, 'device'), + 'precision': getattr(instance, 'precision'), + 'model': '', + 'result': '', + 'duration': 0, + 'links': '', + 'log': '', + 'moTime': 0, + 'moMemory': 0, + 'links2JiraTickets': [], + 'pytestEntrypoint': '', + 'ext': '' + } + request.node._request.test_info.update(info) + + yield request.node._request.test_info + if not request.config.getoption('db_url'): + return + + request.node._request.test_info.update({ + 'insertTime': time.time(), + 'topLevelLink': get_ie_version(), + 'moTime': get_mo_time(request.node._request.test_info['log']), + 'moMemory': get_mo_memory(request.node._request.test_info['log']), + 'model': get_model_path(request.node._request.test_info['log']) + }) + request.node._request.test_info.update({ + 'framework': get_framework_from_model_ex(instance.definition_path) + }) + # TODO: remove 'fusing' key, when this will dropped in DataBase + schema = """ + { + "type": "object", + "properties": { + "insertTime": {"type": "number"}, + "topLevelLink": {"type": "string"}, + "lowLevelLink": {"type": "string"}, + "subset": {"type": "string"}, + "platform": {"type": "string"}, + "os": {"type": "string"}, + "framework": {"type": "string"}, + "network": {"type": "string"}, + "batch": {"type": "integer"}, + "device": {"type": "string"}, + "fusing": {"type": "boolean"}, + "precision": {"type": "string"}, + "result": {"type": "string"}, + "duration": {"type": "number"}, + "links": {"type": "string"}, + "log": {"type": "string"}, + "model": {"type": "string"}, + "moTime": {"type": "number"}, + "moMemory": {"type": "number"}, + "links2JiraTickets": {"type": "array"}, + "pytestEntrypoint": {"type": "string"}, + "ext": {"type": "string"} + }, + "required": ["insertTime", "topLevelLink", "lowLevelLink", "subset", "platform", + "os", "framework", "network", "batch", "device", "precision", + "result", "duration", "links", "log", "model", "moTime", "moMemory", + "links2JiraTickets", "pytestEntrypoint", "ext" ], + "additionalProperties": true + } + """ + + schema = json.loads(schema) + try: + validate(instance=request.node._request.test_info, schema=schema) + except ValidationError: + raise + + upload_db(data=request.node._request.test_info, url=request.config.getoption('db_url')) + + +def upload_db(data, url): + from requests import post + from requests.structures import CaseInsensitiveDict + + headers = CaseInsensitiveDict() + headers["accept"] = "application/json" + headers["Content-Type"] = "application/json" + + resp = post(url, headers=headers, data=json.dumps({'data': [data]})) + + if resp.status_code == 200: + log.info(f'Data successfully uploaded to DB: {url}') + else: + log.error(f'Upload data failed. DB return: code - {resp.status_code}\n' + f'Message - {resp.text}') + + +def get_ie_version(): + import openvino.runtime as rt + version = rt.get_version() + return version if version else "Not_found" + + +def get_mo_time(test_log): + pattern_time = r'Total execution time:\s*(\d+\.?\d*)\s*seconds' + mo_time = re.search(pattern_time, test_log) + return float(mo_time.group(1)) if mo_time else 0 + + +def get_mo_memory(test_log): + pattern_memory = r'Memory consumed:\s*(\d+)\s*MB.' + memory = re.search(pattern_memory, test_log) + return float(memory.group(1)) if memory else 0 + + +def get_model_path(test_log): + pattern_path = r'Input model was copied from \s*(\S+)' + model_path = re.search(pattern_path, test_log) + return model_path.group(1) if model_path else 'Model was not found! Please contact with QA team' + + +def set_path_for_pytorch_files(instance, final_path): + instance.ie_pipeline['prepare_model']['prepare_model_for_mo']['torch_model_zoo_path'] = final_path + # if pytorch weights is required for tests we should use new path also for them + weights_path = instance.ie_pipeline['prepare_model']['prepare_model_for_mo'].get('weights') + if weights_path: + weights_path = Path(weights_path) + copied_weights_path = os.path.join(final_path, weights_path.parents[1].name, + weights_path.parents[0].name, weights_path.name) + instance.ie_pipeline['prepare_model']['prepare_model_for_mo']['weights'] = copied_weights_path + return instance + + +@pytest.fixture(scope="function") +def copy_input_files(instance): + """ + Fixture for coping model from shared folder to localhost. + """ + pass + # def wait_copy_finished(path_to_local_inputs, timeout=60): + # isCopied = False + # while timeout > 0: + # if os.path.exists(os.path.join(path_to_local_inputs, 'copy_complete')): + # isCopied = True + # break + # else: + # time.sleep(1) + # timeout -= 1 + # return isCopied + # if 'get_ovc_model' not in instance.ie_pipeline.get('get_ir', "None"): + # return + # # define value to copy + # prefix = os.path.join(instance.environment['input_model_dir'], '') + # if not os.path.exists(prefix): + # os.mkdir(prefix) + # if instance.ie_pipeline.get('load_pytorch_model') or instance.ie_pipeline.get('pytorch_to_onnx'): + # if instance.ie_pipeline.get('load_pytorch_model'): + # if instance.ie_pipeline['load_pytorch_model'].get('custom_pytorch_model_loader'): + # # it's hard to find out what to copy because it could be anything in that case + # model = None + # else: + # model = instance.ie_pipeline['load_pytorch_model']['load_pytorch_model'].get('model-path') + # if instance.ie_pipeline.get('pytorch_to_onnx'): + # model = instance.ie_pipeline['pytorch_to_onnx']['convert_pytorch_to_onnx'].get('model-path') + # # in that case we load model during the test so there is nothing to copy + # if not model: + # return + # else: + # if isinstance(instance.ie_pipeline['get_ir']['get_ovc_model']['model'], str): + # model = Path(instance.ie_pipeline['get_ir']['get_ovc_model']['model']) + # else: + # return + # model = Path(model) + # if os.path.isfile(model): + # input_path = os.path.join(prefix, model.parents[1].name, model.parents[0].name) + # model_path = model.parent + # result_path = os.path.join(input_path, model.name) + # else: + # input_path = os.path.join(prefix, model.parent.name, model.name) + # model_path = model + # result_path = input_path + # + # # copy stage + # tries = 2 + # with log_timestamp('copy model'): + # for i in range(tries): + # try: + # shutil.copytree(model_path, input_path) + # open(os.path.join(input_path, 'copy_complete'), 'a').close() + # if instance.ie_pipeline.get('prepare_model'): + # instance = set_path_for_pytorch_files(instance, result_path) + # else: + # instance.ie_pipeline['get_ir']['get_ovc_model']['model'] = result_path + # except FileExistsError: + # if wait_copy_finished(input_path): + # if instance.ie_pipeline.get('prepare_model'): + # instance = set_path_for_pytorch_files(instance, result_path) + # else: + # instance.ie_pipeline['get_ir']['get_ovc_model']['model'] = result_path + # except BaseException: + # if i < tries - 1: + # continue + # else: + # raise + # break + # log.info(f'Input model was copied from {model} to {input_path}') diff --git a/tests/e2e_tests/common/plugins/e2e_test/__init__.py b/tests/e2e_tests/common/plugins/e2e_test/__init__.py new file mode 100644 index 00000000000000..8ba81a92b19c53 --- /dev/null +++ b/tests/e2e_tests/common/plugins/e2e_test/__init__.py @@ -0,0 +1,3 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + diff --git a/tests/e2e_tests/common/plugins/e2e_test/conftest.py b/tests/e2e_tests/common/plugins/e2e_test/conftest.py new file mode 100644 index 00000000000000..754b7f3d910842 --- /dev/null +++ b/tests/e2e_tests/common/plugins/e2e_test/conftest.py @@ -0,0 +1,410 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Local pytest plugin for tests execution.""" + +import inspect +import itertools +import logging as log +# pylint:disable=import-error +import re +import os +import sys +import traceback +from copy import copy +from pathlib import Path + +import pytest +import yaml +from _pytest.runner import show_test_item, call_runtest_hook, check_interactive_exception + +import e2e_tests.common.plugins.common.base_conftest as base + +from e2e_tests.test_utils.path_utils import DirLockingHandler +from e2e_tests.test_utils.test_utils import class_factory, BrokenTest, BrokenTestException +from e2e_tests.common import hook_utils +from e2e_tests.common.env_utils import fix_env_conf +from e2e_tests.common.logger import get_logger +from e2e_tests.common.marks import MarkRunType, MarkGeneral +from e2e_tests.test_utils.env_tools import Environment + +logger = get_logger(__name__) + + +def __to_list(value): + """Wrap non-list value in list.""" + if isinstance(value, list): + return value + return [value] + + +def set_env(metafunc): + """Setup test environment.""" + with open(metafunc.config.getoption('env_conf'), "r") as env_conf: + Environment.env = fix_env_conf(yaml.load(env_conf, Loader=yaml.FullLoader), + root_path=str(metafunc.config.rootdir)) + + with open(metafunc.config.getoption('test_conf'), "r") as test_conf: + Environment.tconf = yaml.load(test_conf, Loader=yaml.FullLoader) + + with open(metafunc.config.getoption('base_rules_conf'), "r") as base_rules_conf: + Environment.base_rules = unwrap_rules(yaml.load(base_rules_conf, Loader=yaml.FullLoader)) + + +def unwrap_rules(rules_config): + """Unwrap all rule values in rules config into a cartesian product. + + Example: {device: GPU, precision: [FP32, FP16]} => [{device: GPU, precision: + FP32}, {device: GPU, precision: FP16}] + """ + if not rules_config: + return [] + for i, rules_dict in enumerate(rules_config): + unwrapped_rules = [] + for rule in rules_dict['rules']: + keys = rule.keys() + vals = [] + for value in rule.values(): + vals.append([v for v in __to_list(value)]) + for rule_set in itertools.product(*vals): + unwrapped_rules.append(dict(zip(keys, rule_set))) + rules_config[i]['rules'] = unwrapped_rules + return rules_config + + +def satisfies_rules(parameter_set, rules, filters, can_partially_match=False): + """Check whether parameter_set satisfies rules. + + If there are no rules for such parameter_set, parameter_set is considered + satisfactory (satisfies_rules returns True). + + By default (can_partially_match is False), parameters are filtered if rule + value exactly matches the parameter value (e.g. 'CPU' == 'CPU'). + + If can_partially_match is True, rule value may be a substring of a parameter + value (e.g. 'CP' is substring of 'CPU'). Partial matching is useful when + multiple models with similar name must be filtered, for example: MobileNet + and MobileNet_v2. + """ + + def equal(a, b): + """ + Check if a equals b + or a match the rule 'not b' + """ + if str(a).startswith('not'): + return a.replace('not ', '') != b + return a == b + + def substr(a, b): + """Check if a is substring of b""" + return a in b + + satisfies = True + # filter rules by non-matchable attributes + match = substr if can_partially_match else equal + applicable_rules = rules + for key in filters: + applicable_rules = list(filter(lambda rule: match(rule[key], parameter_set[key]), applicable_rules)) + # if there are no rules left, consider parameter_set satisfactory + if not applicable_rules: + return True + # check whether parameter_set satisfies rules + rule_satisfactions = [] + for rule in applicable_rules: + common_keys = (set(parameter_set.keys()) & set(rule.keys())) - set(filters) + if not common_keys: + continue + # all parameters must match for current rule to be satisfied by + # parameter_set + rule_satisfactions.append( + all(equal(rule[k], parameter_set[k]) for k in common_keys)) + # there must be at least one match (True value) to consider parameter_set + # satisfactory + return satisfies & any(rule_satisfactions) + + +def satisfies_all_rules(values_set, rules_config, can_partially_match=False): + """Check whether values_set satisfies all rules in rules_config. + + This function calls satisfies_rules for each suitable pair of + rules and filters in rules configuration file. + """ + satisfies = True + for rules_dict in rules_config: + rules = __to_list(rules_dict['rules']) + filters = __to_list(rules_dict['filter_by']) + # if key doesn't exist in the values_set, consider rules/filters do + # not apply to these values + if any(key not in values_set for key in filters): + continue + satisfies &= satisfies_rules(values_set, rules, filters, + can_partially_match) + return satisfies + + +def read_test_config(required_args, test_config, rules_config=None): + """Read test configuration file and return cartesian product of found + parameters (filtered and full). + """ + + def prepare_test_params(keys, values): + params = [] + parameters = itertools.product(*values) + for parameter_set in parameters: + named_params = dict(zip(keys, parameter_set)) + if satisfies_all_rules(named_params, rules_config): + params.append(named_params) + return params + + # sort dictionary items to enforce same order in different python runs + keys = list(test_config.keys()) + vals = list(test_config.values()) + required_args_ind = [i for i, key in enumerate(keys) if key in required_args] + req_keys = [keys[i] for i in required_args_ind] + req_vals = [vals[i] for i in required_args_ind] + req_params = prepare_test_params(req_keys, req_vals) + + addit_args_ind = set(range(len(keys))) - set(required_args_ind) + addit_keys = [keys[i] for i in addit_args_ind] + addit_vals = [vals[i] for i in addit_args_ind] + addit_args = dict(zip(addit_keys, addit_vals)) + + return req_params, addit_args + + +def pytest_generate_tests(metafunc): + """Pytest hook for test generation. + + Generate parameterized tests from discovered modules and test config + parameters. + """ + log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.DEBUG, stream=sys.stdout) + set_env(metafunc) + modules = metafunc.config.getoption('modules') + test_classes, broken_modules = base.find_tests(modules, attributes=['__is_test_config__']) + for module in broken_modules: + log.error("Broken module: {}. Import failed with error: {}".format(module[0], module[1])) + + test_cases = [] + test_ids = [] + cpu_throughput_mode = metafunc.config.getoption("cpu_throughput_mode") + gpu_throughput_mode = metafunc.config.getoption("gpu_throughput_mode") + skip_ir_generation = metafunc.config.getoption("skip_ir_generation") + + for test in test_classes: + setattr(test, 'convert_pytorch_to_onnx', metafunc.config.getoption('convert_pytorch_to_onnx')) + required_args = list(inspect.signature(test.__init__).parameters.keys())[1:] + required_args.extend(getattr(metafunc, 'test_add_args_to_parametrize', [])) + params_list, addit_params_dict = read_test_config(required_args, Environment.tconf, Environment.base_rules) + for _params in params_list: + params = copy(_params) + if cpu_throughput_mode and "CPU" not in params["device"]: + continue + if gpu_throughput_mode and "GPU" not in params["device"]: + continue + if not gpu_throughput_mode: + params.pop("gpu_streams", None) + if not cpu_throughput_mode: + params.pop("cpu_streams", None) + + name = test.__name__ + test_id = "{}_{}".format(name, "_".join("{}_{}".format(key, val) for (key, val) in sorted(params.items()))) + + params.update({"skip_ir_generation": skip_ir_generation}) + + try: + test_case = test(**params, **addit_params_dict, **{"required_params": params}, test_id=test_id) + + except Exception as e: + tb = traceback.format_exc() + broken_test = class_factory(cls_name=name, cls_kwargs={"__name__": name, **params, **addit_params_dict, + "required_params": params}, + BaseClass=BrokenTest) + + test_case = broken_test(test_id=test_id, exception=e, + fail_message="Test {} is broken and fails " + "with traceback {}".format(name, tb)) + + params_for_satisfaction = {"model": name, **params} + if satisfies_all_rules(params_for_satisfaction, Environment.base_rules, can_partially_match=False) \ + and not getattr(test_case, "__do_not_run__", False): + test_ids.append(test_id) + test_cases.append(test_case) + + if test_cases: + metafunc.parametrize("instance", test_cases, ids=test_ids) + + +def pytest_collection_modifyitems(session, config, items): + """ + Pytest hook for items collection. Adds pytest markers to constructed tests. + + Markers are: + * Test instance name + * "Raw" __pytest_marks__ discover in test instances + * IR generation step parameters (framework, precision) + * Inference step parameters (inference type, batch, device) + """ + + for i in list(items): + if not hasattr(i, 'callspec'): + items.remove(i) + + items.sort(key=lambda item: item.callspec.params['instance'].__class__.__name__) + + logger.info("Preparing tests for test session in the following folder: {}".format(session.startdir)) + + deselected = [] + all_components = {} + all_requirements = {} + required_marker_ids = hook_utils.get_required_marker_ids_for_test_run() + + pytorch_original_tests = [] + for i in items: + test_name = i.name.replace(i.originalname, '').replace('[', '').lower() + pytorch_original_tests.append(test_name.startswith('pytorch_')) + + # this WA required because of: 1. pytorch leaks 2. e2e lack of possibility to put every test in multiprocessing + # on Win and MacOS + pytorch_group_marked = 0 + # if number inside the range will be changed there should be according changes in pytest.ini file + group_names = [f'Pytorch_group_{j}' for j in range(7)] + bucket_size = sum(pytorch_original_tests) // len(group_names) + current_group_idx = 0 + + for num, test in enumerate(items): + instance = test.callspec.params['instance'] + target_test_runner = test.originalname + try: + if pytorch_original_tests[num]: + test.add_marker(group_names[current_group_idx]) + pytorch_group_marked += 1 + if pytorch_group_marked % bucket_size == 0 and pytorch_group_marked < bucket_size * len(group_names): + current_group_idx += 1 + + base.set_pytest_marks(_test=test, _object=instance, _runner=target_test_runner, log=log) + + ie_pipeline = getattr(instance, 'ie_pipeline', {}) + ir_gen = ie_pipeline.get('get_ir', {}) + if ir_gen: + test.add_marker(ir_gen.get('precision', 'FP32')) + # TODO: Handle marks setting from infer step correctly. + # TODO: Currently 'network_modifiers' added as mark which is useless + # infer_step = next(iter(ie_pipeline.get('infer', {}).values())) + # for name, value in infer_step.items(): + # mark = '{name}:{value}'.format(name=name, value=value) + # # treat bools as flags + # if isinstance(value, bool) and value is True: + # mark = str(name) + # # pass pytest markers and strings "as is" + # elif isinstance(value, (type(pytest.mark.Marker), str)): + # mark = value + # test.add_marker(mark) + except BrokenTestException as e: + test.add_marker("broken_test") + deselected.append(test) + continue + + test_type = MarkRunType.get_test_type_mark(test) + hook_utils.update_components(test) + if hook_utils.deselect(test, test_type, required_marker_ids): + deselected.append(test) + continue + hook_utils.update_markers(test, test_type, all_components, MarkGeneral.COMPONENTS.mark) + hook_utils.update_markers(test, test_type, all_requirements, MarkGeneral.REQIDS.mark) + + if deselected: + hook_utils.deselect_items(items, config, deselected) + + # sort items so that we have the sequence of tests being executed as in MarkRunType: + items[:] = sorted(items, key=lambda element: MarkRunType.test_type_mark_to_int(element)) + + +def call_and_report(item, when, log=True, **kwds): + import logging as lg + lg.basicConfig(format="[ %(levelname)s ] %(message)s", level=lg.DEBUG, stream=sys.stdout) + call = call_runtest_hook(item, when, **kwds) + + hook = item.ihook + report = hook.pytest_runtest_makereport(item=item, call=call) + + if when == "call" and hasattr(report, "wasxfail"): + regexp_marks = [m for m in item.own_markers if hasattr(m, "regexps")] + failed_msgs = {} + pytest_html = item.config.pluginmanager.getplugin('html') + extra = getattr(report, 'extra') + for m in regexp_marks: + matches = [] + xfail_reason = m.kwargs.get('reason', "UNDEFINED") # TODO: update for non-xfail marks + for pattern in m.regexps: + regexp = re.compile(pattern) + matches.append(regexp.search(report.caplog) is not None or + regexp.search(report.longreprtext) is not None) + + if (m.match_mode == "all" and not all(matches)) or (m.match_mode == "any" and not any(matches)): + failed_msgs[xfail_reason] = \ + "Some of regexps '{}' for xfail mark with reason '{}' doesn't match the test log! " \ + "Test will be forced to fail!".format(', '.join(m.regexps), xfail_reason) + elif (m.match_mode == "all" and all(matches)) or (m.match_mode == "any" and any(matches)): + jira_link = "https://jira.devtools.intel.com/browse/{}".format(xfail_reason) + extra.append(pytest_html.extras.url(jira_link, name=xfail_reason)) + if getattr(item._request, 'test_info', None): + item._request.test_info.update({"links2JiraTickets": [xfail_reason]}) + break + else: + jira_links = [] + for ticket_num, msg in failed_msgs.items(): + lg.error(msg) + jira_link = "https://jira.devtools.intel.com/browse/{}".format(ticket_num) + extra.append(pytest_html.extras.url(jira_link, name=ticket_num)) + jira_links.append(ticket_num) + report.outcome = "failed" + if getattr(item._request, 'test_info', None): + item._request.test_info.update({"links2JiraTickets": jira_links}) + if hasattr(report, "wasxfail"): + del report.wasxfail + report.extra = extra + + if log: + hook.pytest_runtest_logreport(report=report) + if check_interactive_exception(call, report): + hook.pytest_exception_interact(node=item, call=call, report=report) + + return report + + +@pytest.mark.tryfirst +def pytest_runtest_protocol(item, nextitem): + item.ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location) + # copy of _pytest.runner.runtestprotocol function. Need to use local implementation of call_and_report + log = True + hasrequest = hasattr(item, "_request") + if hasrequest and not item._request: + item._initrequest() + rep = call_and_report(item, "setup", log) + reports = [rep] + if rep.passed: + if item.config.option.setupshow: + show_test_item(item) + if not item.config.option.setuponly: + reports.append(call_and_report(item, "call", log)) + reports.append(call_and_report(item, "teardown", log, nextitem=nextitem)) + # after all teardown hooks have been called + # want funcargs and request info to go away + if hasrequest: + item._request = False + item.funcargs = None + item.ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location) + return True + + +def pytest_sessionfinish(session, exitstatus): + for dir in Environment.locked_dirs: + dir_locker = DirLockingHandler(dir) + dir_locker.unlock() + + if session.config.option.pregen_irs: + path = (Path(Environment.env['pregen_irs_path']) / session.config.option.pregen_irs).with_suffix('.lock') + if path.exists(): + os.remove(path) diff --git a/tests/e2e_tests/common/plugins/ref_collect/__init__.py b/tests/e2e_tests/common/plugins/ref_collect/__init__.py new file mode 100644 index 00000000000000..8ba81a92b19c53 --- /dev/null +++ b/tests/e2e_tests/common/plugins/ref_collect/__init__.py @@ -0,0 +1,3 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + diff --git a/tests/e2e_tests/common/plugins/ref_collect/conftest.py b/tests/e2e_tests/common/plugins/ref_collect/conftest.py new file mode 100644 index 00000000000000..9131f6432e9b90 --- /dev/null +++ b/tests/e2e_tests/common/plugins/ref_collect/conftest.py @@ -0,0 +1,56 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Local pytest plugin for reference collection.""" +import logging as log + +import yaml + +import e2e_tests.common.plugins.common.base_conftest as base + +from e2e_tests.common.env_utils import fix_env_conf +from e2e_tests.test_utils.env_tools import Environment + + +def set_env(metafunc): + """Setup test environment.""" + with open(metafunc.config.getoption('env_conf'), "r") as f: + Environment.env = fix_env_conf(yaml.load(f, Loader=yaml.FullLoader), + root_path=str(metafunc.config.rootdir)) + + +def pytest_generate_tests(metafunc): + """Pytest hook for test generation. + + Generate parameterized tests from discovered modules. + """ + set_env(metafunc) + test_classes, broken_modules = base.find_tests(metafunc.config.getoption('modules'), + attributes=['ref_collection', '__is_test_config__']) + test_case = None + for module in broken_modules: + log.error("Broken module: {}. Import failed with error: {}".format(module[0], module[1])) + test_cases = [] + test_ids = [] + for test in test_classes: + # TODO: Add broken tests handling like in e2e_tests/conftest.py when `test` instance creation will be added + name = test.__name__ + skip_ir_generation = metafunc.config.getoption("skip_ir_generation") + try: + test_case = test(test_id=name, batch=1, device="CPU", precision="FP32", + sequence_length=1, qb=8, device_mode="GNA_AUTO", + skip_ir_generation=skip_ir_generation).ref_collection + except Exception as e: + log.warning(f"Test with name {name} failed to add in row with exception {e}") + + test_ids.append(name) + test_cases.append(test_case) + metafunc.parametrize("reference", test_cases, ids=test_ids) + + +def pytest_collection_modifyitems(items): + """ + Pytest hook for items collection + """ + # Sort test cases to support tests' run via pytest-xdist + items.sort(key=lambda item: item.callspec.params['reference'].__class__.__name__) diff --git a/tests/e2e_tests/common/plugins/reshape_tests/__init__.py b/tests/e2e_tests/common/plugins/reshape_tests/__init__.py new file mode 100644 index 00000000000000..8ba81a92b19c53 --- /dev/null +++ b/tests/e2e_tests/common/plugins/reshape_tests/__init__.py @@ -0,0 +1,3 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + diff --git a/tests/e2e_tests/common/plugins/reshape_tests/conftest.py b/tests/e2e_tests/common/plugins/reshape_tests/conftest.py new file mode 100644 index 00000000000000..a199004c7b1ce9 --- /dev/null +++ b/tests/e2e_tests/common/plugins/reshape_tests/conftest.py @@ -0,0 +1,216 @@ +import inspect +import itertools +import logging as log +import os +import sys +import traceback +from contextlib import contextmanager +from copy import copy, deepcopy +from types import SimpleNamespace + +import pytest +import yaml + +import e2e_tests.common.plugins.common.base_conftest as base +from e2e_tests.test_utils.reshape_tests_utils import should_run_reshape, get_reshape_configurations, \ + get_reshape_pipeline_pairs, batch_was_changed +from e2e_tests.test_utils.test_utils import class_factory, BrokenTest, BrokenTestException +from e2e_tests.common.env_utils import fix_env_conf +from e2e_tests.common.plugins.e2e_test.conftest import satisfies_all_rules, unwrap_rules +from e2e_tests.test_utils.env_tools import Environment + + +@contextmanager +def import_from(path): + """ Set import preference to path""" + os.sys.path.insert(0, os.path.realpath(path)) + yield + os.sys.path.remove(os.path.realpath(path)) + + +def set_env_for_reshape(metafunc): + """Setup test environment.""" + with open(metafunc.config.getoption('env_conf'), "r") as env_conf: + Environment.env = fix_env_conf(yaml.load(env_conf, Loader=yaml.FullLoader), + root_path=str(metafunc.config.rootdir)) + + with open(metafunc.config.getoption('test_conf'), "r") as test_conf: + Environment.tconf = yaml.load(test_conf, Loader=yaml.FullLoader) + + with open(metafunc.config.getoption('reshape_rules_conf'), "r") as reshape_rules_conf: + Environment.reshape_rules = unwrap_rules(yaml.load(reshape_rules_conf, Loader=yaml.FullLoader)) + + with open(metafunc.config.getoption('dynamism_rules_conf'), "r") as dynamism_rules_conf: + Environment.dynamism_rules = unwrap_rules(yaml.load(dynamism_rules_conf, Loader=yaml.FullLoader)) + + +def read_reshape_test_config(required_args, test_config, reshape_rules_config=None): + """Read test configuration file and return cartesian product of found + parameters (filtered and full). + """ + + def prepare_test_params(keys, values): + params = [] + parameters = itertools.product(*values) + for parameter_set in parameters: + named_params = dict(zip(keys, parameter_set)) + if satisfies_all_rules(named_params, reshape_rules_config): + params.append(named_params) + return params + + # sort dictionary items to enforce same order in different python runs + keys = list(test_config.keys()) + vals = list(test_config.values()) + required_args_ind = [i for i, key in enumerate(keys) if key in required_args] + req_keys = [keys[i] for i in required_args_ind] + req_vals = [vals[i] for i in required_args_ind] + req_params = prepare_test_params(req_keys, req_vals) + + addit_args_ind = set(range(len(keys))) - set(required_args_ind) + addit_keys = [keys[i] for i in addit_args_ind] + addit_vals = [vals[i] for i in addit_args_ind] + addit_args = dict(zip(addit_keys, addit_vals)) + + return req_params, addit_args + + +def pytest_generate_tests(metafunc): + """Pytest hook for test generation. + + Generate parameterized tests from discovered modules and test config + parameters. + """ + log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.DEBUG, stream=sys.stdout) + set_env_for_reshape(metafunc) + reshape_test_classes, broken_modules = base.find_tests(metafunc.config.getoption('modules'), + attributes=['__is_test_config__']) + for module in broken_modules: + log.error("Broken module: {}. Import failed with error: {}".format(module[0], module[1])) + + reshape_test_cases = [] + reshape_test_ids = [] + reshape_configurations_list = [] + dynamism_type = metafunc.config.getoption('dynamism_type') + consecutive_infer = metafunc.config.getoption('consecutive_infer') + skip_ir_generation = metafunc.config.getoption('skip_ir_generation') + + # batch was set explicitly because reshape and dynamism tests do not use this parameter, + # but it is required in e2e + if len(Environment.tconf['batch']) > 1 or Environment.tconf['batch'][0] != 1: + Environment.tconf['batch'] = [1] + log.warning("batch was set explicitly to '1' because reshape and dynamism tests do not use this parameter," + " but it is required to be in e2e") + + for reshape_test in reshape_test_classes: + required_args = [arg for arg in inspect.signature(reshape_test.__init__).parameters.keys()] + required_args.extend(getattr(metafunc, 'test_add_args_to_parametrize', [])) + rules = Environment.dynamism_rules if dynamism_type == "negative_ones" or dynamism_type == "range_values" \ + else Environment.reshape_rules + + params_list, addit_params_dict = read_reshape_test_config(required_args, Environment.tconf, rules) + + for _params in params_list: + params = copy(_params) + + name = reshape_test.__name__ + test_id = "{}_{}".format(name, "_".join( + "{}_{}".format(key, val) for (key, val) in sorted(params.items()) if key not in ['batch'])) + params.update({"skip_ir_generation": skip_ir_generation}) + try: + reshape_test_case = reshape_test(**params, **addit_params_dict, **{"required_params": params}, + test_id=test_id) + if not should_run_reshape(reshape_test_case): + break + configurations = get_reshape_configurations(reshape_test_case, dynamism_type) + + except Exception as e: + configurations = [SimpleNamespace(shapes={}, changed_dims={}, layout={}, default_shapes={})] + tb = traceback.format_exc() + broken_test = class_factory(cls_name=name, cls_kwargs={"__name__": name, **params, **addit_params_dict, + "required_params": params}, BaseClass=BrokenTest) + reshape_test_case = broken_test(test_id=test_id, exception=e, + fail_message="Test {} is broken and fails " + "with traceback {}".format(name, tb)) + + if not getattr(reshape_test_case, "__do_not_run__", False): + if configurations: + for configuration in configurations: + configuration.skip_ir_generation = skip_ir_generation + params_for_satisfaction = {"model": name, **params} + + if dynamism_type == "negative_ones" or dynamism_type == "range_values": + reshape_test_id = test_id + "_".join( + "_{}_{}".format(k, v[0:]) for (k, v) in configuration.shapes.items()) + if satisfies_all_rules(params_for_satisfaction, rules, can_partially_match=False): + reshape_test_ids.append(reshape_test_id) + reshape_test_cases.append(reshape_test_case) + configuration.consecutive_infer = consecutive_infer + configuration.dynamism_type = dynamism_type + reshape_configurations_list.append(configuration) + else: + requested_reshape_pairs = get_reshape_pipeline_pairs(reshape_test_case) + for reshape_pair in requested_reshape_pairs: + configuration = deepcopy(configuration) + reshape_test_id = test_id + "_".join( + "_{}_{}".format(k, v[0:]) for (k, v) in configuration.shapes.items()) + # check if there a point to run IE_SBS pipeline + if 'IE_SBS' in reshape_pair: + batch = batch_was_changed(configuration.shapes, configuration.changed_dims, + configuration.layout, configuration.default_shapes) + if not batch: + continue + + configuration.reshape_pair = reshape_pair + reshape_test_id = reshape_test_id + "{}".format(reshape_pair) + if satisfies_all_rules(params_for_satisfaction, rules, can_partially_match=False): + reshape_test_ids.append(reshape_test_id) + reshape_test_cases.append(reshape_test_case) + reshape_configurations_list.append(configuration) + else: + pass + + if reshape_test_cases: + pairs_of_shape_and_test_case = list(zip(reshape_test_cases, reshape_configurations_list)) + metafunc.parametrize(argnames='instance,configuration', + argvalues=pairs_of_shape_and_test_case, + ids=reshape_test_ids) + + +def pytest_collection_modifyitems(items): + """ Pytest hook for items collection. """ + + for i in list(items): + if not hasattr(i, 'callspec'): + items.remove(i) + + items.sort(key=lambda item: (item.callspec.params['instance'].batch, + item.callspec.params['instance'].__class__.__name__)) + + pytorch_original_tests = [] + for i in items: + test_name = i.name.replace(i.originalname, '').replace('[', '').lower() + pytorch_original_tests.append(test_name.startswith('pytorch')) + + # this WA required because of: 1. pytorch leaks 2. e2e lack of possibility to put every test in multiprocessing + # on Win and MacOS + pytorch_group_marked = 0 + # if number inside the range will be changed there should be according changes in pytest.ini file + group_names = [f'Pytorch_group_{j}' for j in range(7)] + bucket_size = sum(pytorch_original_tests) // len(group_names) + current_group_idx = 0 + + for num, test in enumerate(items): + instance = test.callspec.params['instance'] + target_test_runner = test.originalname + + try: + if pytorch_original_tests[num]: + test.add_marker(group_names[current_group_idx]) + pytorch_group_marked += 1 + if pytorch_group_marked % bucket_size == 0 and pytorch_group_marked < bucket_size * len(group_names): + current_group_idx += 1 + + base.set_pytest_marks(_test=test, _object=instance, _runner=target_test_runner, log=log) + except BrokenTestException as e: + test.add_marker("broken_test") + continue diff --git a/tests/e2e_tests/common/postprocessors/YOLO.py b/tests/e2e_tests/common/postprocessors/YOLO.py new file mode 100644 index 00000000000000..c54bfd15244629 --- /dev/null +++ b/tests/e2e_tests/common/postprocessors/YOLO.py @@ -0,0 +1,310 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import math + +import numpy as np + +from .provider import ClassProvider + +PRECOMPUTED_ANCHORS = { + 'yolo_v2': [1.3221, 1.73145, 3.19275, 4.00944, 5.05587, 8.09892, 9.47112, 4.84053, 11.2364, 10.0071], + 'tiny_yolo_v2': [1.08, 1.19, 3.42, 4.41, 6.63, 11.38, 9.42, 5.11, 16.62, 10.52], + # TODO Understand why for tiny used 'yolo_v3' anchors + 'yolo_v3': [ + 10.0, 13.0, 16.0, 30.0, 33.0, 23.0, 30.0, 61.0, 62.0, 45.0, 59.0, 119.0, 116.0, 90.0, 156.0, 198.0, 373.0, 326.0 + ] +} + + +class YOLOV1Parser(ClassProvider): + __action_name__ = "parse_yolo_V1_region" + + def __init__(self, config): + self.classes = config['classes'] + self.coords = config['coords'] + self.num = config['num'] + self.grid = config['grid'] + + def apply(self, prediction): + probability_size = 980 + confidence_size = 98 + boxes_size = 392 + + cells_x, cells_y = self.grid + classes = self.classes + objects_per_cell = self.num + + parsed_result = {} + for layer, layer_data in prediction.items(): + parsed_result[layer] = [] + for b in range(layer_data.shape[0]): + batch_data = [] + data = layer_data[b] + assert probability_size + confidence_size + boxes_size == data.shape[0], "Wrong input data shape" + + prob, scale, boxes = np.split(data, [probability_size, probability_size + confidence_size]) + + prob = np.reshape(prob, (cells_y, cells_x, classes)) + scale = np.reshape(scale, (cells_y, cells_x, objects_per_cell)) + boxes = np.reshape(boxes, (cells_y, cells_x, objects_per_cell, 4)) + + probabilities = np.zeros((cells_y, cells_x, objects_per_cell, classes + 4)) + for cls in range(classes): + probabilities[:, :, 0, cls] = np.multiply(prob[:, :, cls], scale[:, :, 0]) + probabilities[:, :, 1, cls] = np.multiply(prob[:, :, cls], scale[:, :, 1]) + + for i, j, k in np.ndindex((cells_x, cells_y, objects_per_cell)): + box = boxes[j, i, k] + box = [(box[0] + i) / float(cells_x), (box[1] + j) / float(cells_y), box[2] ** 2, box[3] ** 2] + + label = np.argmax(probabilities[j, i, k, :classes]) + score = probabilities[j, i, k, label] + x_min = box[0] - box[2] / 2.0 + y_min = box[1] - box[3] / 2.0 + x_max = box[0] + box[2] / 2.0 + y_max = box[1] + box[3] / 2.0 + + batch_data.append({"class": label, "xmin": x_min, "ymin": y_min, + "xmax": x_max, "ymax": y_max, "prob": score}) + parsed_result[layer].append(batch_data) + + return parsed_result + + +class YOLOV2Parser(ClassProvider): + __action_name__ = "parse_yolo_V2_region" + + def __init__(self, config): + self.classes = config['classes'] + self.coords = config['coords'] + self.num = config['num'] + self.grid = config['grid'] + self.anchors = config.get('anchors', PRECOMPUTED_ANCHORS["yolo_v2"]) + self.scale_threshold = config.get('scale_threshold', 0.001) + + @staticmethod + def _entry_index(w, h, n_coords, n_classes, pos, entry): + row = pos // (w * h) + col = pos % (w * h) + return row * w * h * (n_classes + n_coords + 1) + entry * w * h + col + + @staticmethod + def get_anchors_offset(x): + return int(6 * (2 - (math.log2(x / 13)))) + + def apply(self, data): + parsed_result = {"yolo_v2_parsed": []} + batches = max([l_data.shape[0] for l, l_data in data.items()]) + for b in range(batches): + parsed_result["yolo_v2_parsed"].append([]) + for layer, layer_data in data.items(): + for b in range(layer_data.shape[0]): + detections = layer_data[b] + parsed = self._parse_yolo_v2_results(detections) + parsed_result["yolo_v2_parsed"][b].extend(parsed) + + return parsed_result + + def _parse_yolo_v2_results(self, predictions): + cells_x, cells_y = self.grid + result = [] + + for y, x, n in np.ndindex((cells_y, cells_x, self.num)): + index = n * cells_y * cells_x + y * cells_x + x + + box_index = self._entry_index(cells_x, cells_y, self.coords, self.classes, index, 0) + obj_index = self._entry_index(cells_x, cells_y, self.coords, self.classes, index, self.coords) + + scale = predictions[obj_index] + + box = [ + (x + predictions[box_index + 0 * (cells_y * cells_x)]) / cells_x, + (y + predictions[box_index + 1 * (cells_y * cells_x)]) / cells_y, + np.exp(predictions[box_index + 2 * (cells_y * cells_x)]) * self.anchors[2 * n + 0] / cells_x, + np.exp(predictions[box_index + 3 * (cells_y * cells_x)]) * self.anchors[2 * n + 1] / cells_y + ] + + classes_prob = np.empty(self.classes) + for cls in range(self.classes): + cls_index = self._entry_index(cells_x, cells_y, self.coords, self.classes, index, + self.coords + 1 + cls) + classes_prob[cls] = predictions[cls_index] + + classes_prob = classes_prob * scale + + label = np.argmax(classes_prob) + score = classes_prob[label] + x_min = box[0] - box[2] / 2.0 + y_min = box[1] - box[3] / 2.0 + x_max = box[0] + box[2] / 2.0 + y_max = box[1] + box[3] / 2.0 + + result.append({"class": label, "xmin": x_min, "ymin": y_min, + "xmax": x_max, "ymax": y_max, "prob": score}) + return result + + +class YOLOV3Parser(ClassProvider): + __action_name__ = "parse_yolo_V3_region" + + def __init__(self, config): + self.classes = config['classes'] + self.coords = config['coords'] + self.masks_length = config['masks_length'] + self.input_w = config['input_w'] + self.input_h = config['input_h'] + self.scale_threshold = config.get('scale_threshold', 0.001) + self.anchors = PRECOMPUTED_ANCHORS["yolo_v3"] + + @staticmethod + def _entry_index(side, coord, classes, location, entry): + side_power_2 = side ** 2 + n = location // side_power_2 + loc = location % side_power_2 + return int(side_power_2 * (n * (coord + classes + 1) + entry) + loc) + + @staticmethod + def get_anchors_offset(x): + return int(6 * (2 - (math.log2(x / 13)))) + + def _parse_yolo_v3_results(self, prediction): + cells_x, cells_y = prediction.shape[1:] + + assert cells_y == cells_x, "Incorrect YOLO Region! Grid size sides are not equal" + side = cells_x + predictions = prediction.flatten() + parsed_result = [] + + side_square = cells_x * cells_y + + for i in range(side_square): + row = i // side + col = i % side + for n in range(self.masks_length): + obj_index = self._entry_index(side, self.coords, self.classes, n * side_square + i, + self.coords) + scale = predictions[obj_index] + if scale < self.scale_threshold: + continue + box_index = self._entry_index(side, self.coords, self.classes, n * side_square + i, 0) + x = (col + predictions[box_index + 0 * side_square]) / side + y = (row + predictions[box_index + 1 * side_square]) / side + # Value for exp is very big number in some cases so following construction is using here + try: + w_exp = math.exp(predictions[box_index + 2 * side_square]) + h_exp = math.exp(predictions[box_index + 3 * side_square]) + except OverflowError: + continue + w = w_exp * self.anchors[self.get_anchors_offset(side) + 2 * n] / self.input_w + h = h_exp * self.anchors[self.get_anchors_offset(side) + 2 * n + 1] / self.input_h + + for cls_id in range(self.classes): + class_index = self._entry_index(side, self.coords, self.classes, n * side_square + i, + self.coords + 1 + cls_id) + confidence = scale * predictions[class_index] + + x_min = x - w / 2 + y_min = y - h / 2 + x_max = x_min + w + y_max = y_min + h + + parsed_result.append({"class": cls_id, "xmin": x_min, "ymin": y_min, + "xmax": x_max, "ymax": y_max, "prob": confidence}) + + return parsed_result + + def apply(self, data): + result = {"yolo_v3_parsed": []} + batches = max([l_data.shape[0] for l, l_data in data.items()]) + for b in range(batches): + result["yolo_v3_parsed"].append([]) + for layer, layer_data in data.items(): + for b in range(layer_data.shape[0]): + detections = layer_data[b] + parsed = self._parse_yolo_v3_results(detections) + result["yolo_v3_parsed"][b].extend(parsed) + + return result + + +def logistic_activate(x): + return 1. / (1. + math.exp(-x)) + + +class YOLORegion(ClassProvider): + __action_name__ = "yolo_region" + + def __init__(self, config): + self.classes = config.get('classes') + self.coords = config.get('coords') + self.grid = config.get('grid') + self.masks_length = config.get('masks_length', 3) + self.do_softmax = bool(config.get("do_softmax", True)) + self.num = config.get('num') if self.do_softmax else self.masks_length + + @staticmethod + def _entry_index(width, height, coords, classes, outputs, batch, location, entry): + n = location // (width * height) + loc = location % (width * height) + return batch * outputs + n * width * height * (coords + classes + 1) + entry * width * height + loc + @staticmethod + def _logistic_activate(x): + return 1. / (1. + math.exp(-x)) + @staticmethod + def _softmax(data, B, C, H, W): + dest_data = data.copy() + for b in range(B): + for i in range(H * W): + max_val = data[b * C * H * W + i] + for c in range(C): + val = data[b * C * H * W + c * H * W + i] + max_val = max(val, max_val) + exp_sum = 0 + for c in range(C): + dest_data[b * C * H * W + c * H * W + i] = math.exp(data[b * C * H * W + c * H * W + i] - max_val) + exp_sum += dest_data[b * C * H * W + c * H * W + i] + for c in range(C): + dest_data[b * C * H * W + c * H * W + i] = dest_data[b * C * H * W + c * H * W + i] / exp_sum + return dest_data + + def apply(self, data): + for layer, layer_data in data.items(): + + B, C, IH, IW = layer_data.shape + assert IH == IW, "Incorrect data layout! Input data should be in 'NCHW' format" + + if self.do_softmax: + end_index = IW * IH + else: + end_index = IW * IH * (self.classes + 1) + + inputs_size = IH * IW * self.num * (self.classes + self.coords + 1) + + dst_data = layer_data.flatten() + for b in range(B): + for n in range(self.num): + index = self._entry_index(width=IW, height=IH, coords=self.coords, classes=self.classes, + location=n * IW * IH, entry=0, outputs=inputs_size, batch=b) + for i in range(index, index + 2 * IW * IH): + dst_data[i] = self._logistic_activate(dst_data[i]) + + index = self._entry_index(width=IW, height=IH, coords=self.coords, classes=self.classes, + location=n * IW * IH, entry=self.coords, outputs=inputs_size, batch=b) + + for i in range(index, index + end_index): + dst_data[i] = self._logistic_activate(dst_data[i]) + + if self.do_softmax: + index = self._entry_index(IW, IH, self.coords, self.classes, inputs_size, 0, 0, self.coords + 1) + batch_offset = inputs_size // self.num + for b in range(B * self.num): + dst_data[index + b * batch_offset:] = self._softmax(data=dst_data[index + b * batch_offset:], + B=1, C=self.classes, H=IH, W=IW) + + if self.do_softmax: + data[layer] = dst_data.reshape((B, -1)) + else: + data[layer] = dst_data.reshape((B, C, IH, IW)) + + return data diff --git a/tests/e2e_tests/common/postprocessors/__init__.py b/tests/e2e_tests/common/postprocessors/__init__.py new file mode 100644 index 00000000000000..1f6dcc363a3def --- /dev/null +++ b/tests/e2e_tests/common/postprocessors/__init__.py @@ -0,0 +1,13 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from . import YOLO +from . import classification +from . import common +from . import ctc +from . import filters +from . import image_modifications +from . import mask_rcnn +from . import object_detection +from . import semantic_segmentation +from .provider import StepProvider diff --git a/tests/e2e_tests/common/postprocessors/classification.py b/tests/e2e_tests/common/postprocessors/classification.py new file mode 100644 index 00000000000000..6087ea1262870c --- /dev/null +++ b/tests/e2e_tests/common/postprocessors/classification.py @@ -0,0 +1,37 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Classification postprocessor.""" +from .provider import ClassProvider +import numpy as np +from collections import OrderedDict + + +class ParseClassification(ClassProvider): + """Classification parser.""" + __action_name__ = "parse_classification" + + def __init__(self, config): + self.target_layers = config.get("target_layers") + self.labels_offset = config.get("labels_offset", 0) + + def apply(self, data): + """Parse classification data applying optional labels offset.""" + predictions = {} + apply_to = self.target_layers if self.target_layers else data.keys() + for layer in apply_to: + value = data[layer] + predictions[layer] = [] + for batch in range(value.shape[0]): + # exclude values at the beginning with labels_offset + # squeeze data for such shape of ie results like: (1, 1000, 1, 1). In general shape: (1, 1000) + prediction = value[batch][self.labels_offset:] + prediction = np.squeeze(prediction) if prediction.ndim > 1 else prediction + assert prediction.ndim == 1,\ + "1D data expected, got data of shape {} for layer {}, batch {}".format( + prediction.shape, layer, batch) + predictions[layer].append( + OrderedDict( + zip(np.argsort(prediction)[::-1], + np.sort(prediction)[::-1]))) + return predictions diff --git a/tests/e2e_tests/common/postprocessors/common.py b/tests/e2e_tests/common/postprocessors/common.py new file mode 100644 index 00000000000000..642ba321361fb7 --- /dev/null +++ b/tests/e2e_tests/common/postprocessors/common.py @@ -0,0 +1,229 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Common postprocessors.""" +import numpy as np + +from e2e_tests.common.preprocessors.preprocessors import SliceData, Normalize, CustomPreproc, RemoveLayersFromInputData, \ + RenameInputs, Squeeze +from .provider import ClassProvider + + +class Squeeze(ClassProvider, Squeeze): + """Squeezing postprocessor. + + Implementation duplicates Squeeze preprocessor. + """ + __action_name__ = "squeeze" + pass + + +class AlignWithBatch(ClassProvider): + """Batch alignment postprocessor. + + Duplicates 1-batch data BATCH number of times. + """ + __action_name__ = "align_with_batch" + + def __init__(self, config): + self.batch = config['batch'] + self.batch_dim = config.get('batch_dim', 0) + self.expand_dims = config.get('expand_dims', False) + self.target_layers = config.get('target_layers', None) + self.axis = config.get('axis', [self.batch_dim]) + + def apply(self, data): + """Apply batch alignment (duplication) to data.""" + apply_to = self.target_layers if self.target_layers else data.keys() + for layer in apply_to: + if self.expand_dims: + data[layer] = np.expand_dims(data[layer], axis=self.batch_dim) + for axis in self.axis: + data[layer] = np.repeat(data[layer], self.batch, axis=axis) + return data + + +class FilterTorchData(ClassProvider): + """Batch alignment postprocessor. + + Filters torch outputs and converts them to numpy format. + """ + __action_name__ = "filter_torch_data" + + def __init__(self, config): + self.target_layers = config.get('target_layers', None) + + def apply(self, data): + apply_to = self.target_layers if self.target_layers else data.keys() + filtered_data = {} + for layer in apply_to: + filtered_data[layer] = data[layer].detach().numpy() + return filtered_data + + +class PermuteShape(ClassProvider): + """Shape permutation postprocessor. + + Permutes data shape using ORDER value. + """ + __action_name__ = "permute_shape" + + def __init__(self, config): + self.order = config["order"] + self.target_layers = config.get('target_layers', None) + + def apply(self, data): + """Apply np.transpose to data.""" + apply_to = self.target_layers if self.target_layers else data.keys() + for layer in apply_to: + data[layer] = np.transpose(data[layer], self.order) + return data + + +class RemoveLayer(ClassProvider): + """Layer removal postprocessor. + + Removes layer from data dictionary by name. + """ + __action_name__ = "remove_layer" + + def __init__(self, config): + self.target_layers = config.get('layers_to_remove', None) + + def apply(self, data): + if self.target_layers: + for layer in self.target_layers: + data.pop(layer) + return data + + +class SliceData(ClassProvider, SliceData): + """Slice postprocessor. + + Implementation duplicates SliceData preprocessor + """ + + pass + + +class Normalize(ClassProvider, Normalize): + """Normalize postprocessor. + + Implementation duplicates Normalize preprocessor + """ + pass + + +class ExpandDims(ClassProvider): + """Dimension expanding postprocessor. + + Expands dimension by axis. + """ + __action_name__ = "expand_dims" + + def __init__(self, config): + self.target_layers = config.get('target_layers') + self.axis = config.get('axis') + + def apply(self, data): + self.target_layers = self.target_layers if self.target_layers else data.keys() + for layer in self.target_layers: + data[layer] = np.expand_dims(data[layer], axis=self.axis) + return data + + +class RemoveZeros(ClassProvider): + """Removing zeros postprocessor. + + Removes all-zero elements by axis. + """ + __action_name__ = "remove_zeros" + + def __init__(self, config): + self.target_layers = config.get('target_layers') + self.axis = config.get('axis') + + def apply(self, data): + self.target_layers = self.target_layers if self.target_layers else data.keys() + for layer in self.target_layers: + data[layer] = data[layer][np.any(data[layer], axis=self.axis)] + return data + + +class Clip(ClassProvider): + """Removing zeros postprocessor. + + Removes all-zero elements by axis. + """ + __action_name__ = "clip" + + def __init__(self, config): + self.target_layers = config.get('target_layers') + self.min = config.get('min') + self.max = config.get('max') + + def apply(self, data): + self.target_layers = self.target_layers if self.target_layers else data.keys() + for layer in self.target_layers: + data[layer] = np.clip(data[layer], self.min, self.max) + return data + + +class CustomPostproc(ClassProvider, CustomPreproc): + """Custom postprocessor. + + Implementation duplicates CustomPreproc preprocessor + """ + __action_name__ = "custom_postprocessor" + + pass + + +class RemoveLayersFromData(ClassProvider, RemoveLayersFromInputData): + """Updating data postprocessor. + + Removes layers from data + Use case: if reference results contain extra outputs for comparison, + it can be removed from data through this postprocessor + """ + __action_name__ = "remove_layers_from_data" + + pass + + +class RenameOutputs(ClassProvider, RenameInputs): + __action_name__ = "rename_outputs" + pass + + +class ConvertNamesToIndices(ClassProvider): + """Converts input names to indices""" + __action_name__ = "names_to_indices" + + def __init__(self, config): + self.target_layers = config.get('target_layers', None) + + def apply(self, data): + converted = {} + apply_to = self.target_layers if self.target_layers is not None else data.keys() + for i, layer in enumerate(apply_to): + converted[i] = data[layer] + return converted + + +class AssignIndices(ClassProvider): + """Assigns indices for tensors""" + __action_name__ = "assign_indices" + + def __init__(self, config): + pass + + @staticmethod + def apply(data): + import torch + if isinstance(data, torch.Tensor): + data = [data] + converted = {} + for i in range(len(data)): + converted[i] = data[i] + return converted diff --git a/tests/e2e_tests/common/postprocessors/ctc.py b/tests/e2e_tests/common/postprocessors/ctc.py new file mode 100644 index 00000000000000..30f13b301bc66c --- /dev/null +++ b/tests/e2e_tests/common/postprocessors/ctc.py @@ -0,0 +1,59 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""CTC output postprocessor""" +import string + +import numpy as np +from tensorflow.keras import backend as k + +from .provider import ClassProvider + + +class ParseCTCOutput(ClassProvider): + """Transforms CTC output to dictionary {"predictions": predicted_strings, ""probs": corresponding_probabilities}""" + __action_name__ = "ctc_decode" + + def __init__(self, config): + self.top_paths = config.get("top_paths") + self.beam_width = config.get("beam_width") + + def ctc_decode(self, data): + """ + Parse CTC output + Source: + https://intel-my.sharepoint.com/:u:/r/personal/abdulmecit_gungor_intel_com/Documents/Perpetuuiti/OCR-HandWritten/src/network/model.py?csf=1&web=1&e=ZGZ8nO + """ + predicts, probabilities = [], [] + input_length = len(max(data, key=len)) + data_len = np.asarray([input_length for _ in range(len(data))]) + decode, logs = k.ctc_decode(data, data_len, greedy=False, beam_width=self.beam_width, top_paths=self.top_paths) + probabilities.extend([np.exp(x) for x in logs]) + decode = [[[int(p) for p in x if p != -1] for x in y] for y in decode] + predicts.extend(np.swapaxes(decode, 0, 1)) + + return predicts, probabilities + + @staticmethod + def to_text(text, chars): + """Decode vector to text""" + pad_tk, unk_tk = "¶", "¤" + chars = pad_tk + unk_tk + chars + decoded = "".join([chars[int(char)] for char in text if char > -1]) + return decoded + + def apply(self, data: dict): + predicts, probs = [], [] + assert len(data.keys()) == 1, \ + "Expected 1 output layer, but got {} layers".format(len(data.keys())) + + layer = iter(data.keys()) + data = data[next(layer)] + for b in range(len(data)): + cur_predicts, cur_probs = self.ctc_decode(data) + charset_base = string.printable[:95] + cur_predicts = [[self.to_text(x, charset_base) for x in y] for y in cur_predicts] + predicts.extend(cur_predicts) + probs.extend(cur_probs) + decoded_output = {"predictions": predicts, "probs": probs} + return decoded_output diff --git a/tests/e2e_tests/common/postprocessors/filters.py b/tests/e2e_tests/common/postprocessors/filters.py new file mode 100644 index 00000000000000..2eca93318b304a --- /dev/null +++ b/tests/e2e_tests/common/postprocessors/filters.py @@ -0,0 +1,89 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +from .provider import ClassProvider + + +class FilterByLabels(ClassProvider): + __action_name__ = 'classes_filter' + + def __init__(self, config): + self.classes = config.get("classes", []) + + def apply(self, data): + filtered = {} + for layer, layer_data in data.items(): + filtered[layer] = [] + for batch_data in layer_data: + batch_filtered = [] + for i, detection in enumerate(batch_data): + if detection["class"] not in self.classes: + batch_filtered.append(detection) + filtered[layer].append(batch_filtered) + + return filtered + + +class FilterByMinProbability(ClassProvider): + __action_name__ = 'prob_filter' + + def __init__(self, config): + self.threshold = config.get("threshold", 0.1) + + def apply(self, data): + filtered = {} + for layer, layer_data in data.items(): + filtered[layer] = [] + for batch_data in layer_data: + batch_filtered = [] + for i, detection in enumerate(batch_data): + if detection["prob"] > self.threshold: + batch_filtered.append(detection) + filtered[layer].append(batch_filtered) + + return filtered + + +class NMS(ClassProvider): + __action_name__ = "nms" + + def __init__(self, config): + self.overlap_threshold = config.get("overlap_threshold", 0.5) + + def apply(self, data): + + filtered = {} + for layer, layer_data in data.items(): + filtered[layer] = [] + for batch_data in layer_data: + xmins = np.array([det["xmin"] for det in batch_data]) + xmaxs = np.array([det["xmax"] for det in batch_data]) + ymins = np.array([det["ymin"] for det in batch_data]) + ymaxs = np.array([det["ymax"] for det in batch_data]) + probs = np.array([det["prob"] for det in batch_data]) + + areas = (xmaxs - xmins + 1) * (ymaxs - ymins + 1) + order = probs.argsort()[::-1] + + keep = [] + while order.size > 0: + i = order[0] + keep.append(i) + + xx1 = np.maximum(xmins[i], xmins[order[1:]]) + yy1 = np.maximum(ymins[i], ymins[order[1:]]) + xx2 = np.minimum(xmaxs[i], xmaxs[order[1:]]) + yy2 = np.minimum(ymaxs[i], ymaxs[order[1:]]) + + w = np.maximum(0.0, xx2 - xx1 + 1) + h = np.maximum(0.0, yy2 - yy1 + 1) + intersection = w * h + + union = (areas[i] + areas[order[1:]] - intersection) + overlap = np.divide(intersection, union, out=np.zeros_like(intersection, dtype=float), where=union != 0) + + order = order[np.where(overlap <= self.overlap_threshold)[0] + 1] + filtered[layer].append([batch_data[i] for i in keep]) + + return filtered diff --git a/tests/e2e_tests/common/postprocessors/image_modifications.py b/tests/e2e_tests/common/postprocessors/image_modifications.py new file mode 100644 index 00000000000000..3e4904eae98b72 --- /dev/null +++ b/tests/e2e_tests/common/postprocessors/image_modifications.py @@ -0,0 +1,30 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Postprocessor for image modification tasks such as super-resolution, style transfer. + It takes normalized image and converts it back to colored picture""" +from .provider import ClassProvider +import numpy as np +import logging as log + + +class ParseImageModification(ClassProvider): + """Image modification parser""" + __action_name__ = "parse_image_modification" + + def __init__(self, config): + self.target_layers = config.get("target_layers", None) + + def apply(self, data): + """Parse image modification data.""" + target_layers = self.target_layers if self.target_layers else data.keys() + postprocessed = False + for layer in target_layers: + for batch_num in range(len(data[layer])): + data[layer][batch_num][data[layer][batch_num] > 1] = 1 + data[layer][batch_num][data[layer][batch_num] < 0] = 0 + data[layer][batch_num] = data[layer][batch_num]*255 + postprocessed = True + if postprocessed == False: + log.info("Postprocessor {} has nothing to process".format(str(self.__action_name__))) + return data diff --git a/tests/e2e_tests/common/postprocessors/mask_rcnn.py b/tests/e2e_tests/common/postprocessors/mask_rcnn.py new file mode 100644 index 00000000000000..74b2efaa51ffa5 --- /dev/null +++ b/tests/e2e_tests/common/postprocessors/mask_rcnn.py @@ -0,0 +1,69 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Mask RCNN postprocessor""" +import logging as log +import sys + +import cv2 +import numpy as np + +from .provider import ClassProvider + + +class ParseMaskRCNN(ClassProvider): + """Semantic segmentation parser + returns new "score" layer, combined from "tf_detections" and "detection_masks". + For each detected picture it provides matrix of (num of classes + 1, h, w) shape. + Each submatrix matrix[i] of image size contains probability for each pixel to be classified as i class.""" + __action_name__ = "parse_mask_rcnn_tf" + log.basicConfig( + format="[ %(levelname)s ] %(message)s", + level=log.INFO, + stream=sys.stdout) + + def __init__(self, config): + self.target_layers = config.get("target_layers", None) + self.h = config.get("h") + self.w = config.get("w") + self.num_classes = config.get("num_classes") + + def unmold_mask(self, mask: np.ndarray, bbox: list): + """Converts a mask generated by Mask RCNN to a format similar + to its original shape. + mask: [height, width] of type float. A small, typically 28x28 mask. + bbox: [y1, x1, y2, x2]. The box to fit the mask in. + Returns a binary mask with the same size as the original image. + """ + y1, x1, y2, x2 = bbox + mask = cv2.resize(mask, (x2 - x1, y2 - y1)) + # Put the mask in the right location. + full_mask = np.zeros((self.w, self.h)) + full_mask[y1:y2, x1:x2] = mask + return full_mask + + def apply(self, data): + log.info("Applying {} postprocessor...".format(self.__action_name__)) + """Parse Mask RCNN data.""" + predictions = {'score': []} + do_data = data["tf_detections"] + masks = np.zeros(shape=(self.num_classes+1, self.h, self.w)) + masks_data = data["detection_masks"] + for batch in range(len(do_data)): + for cur_bounding_box in range(len(do_data[batch])): + label = int(do_data[batch][cur_bounding_box]['class']) - 1 + x1 = int(min(max(0, do_data[batch][cur_bounding_box]['xmin'] * self.w), self.w)) + y1 = int(min(max(0, do_data[batch][cur_bounding_box]['ymin'] * self.h), self.h)) + x2 = int(min(max(0, do_data[batch][cur_bounding_box]['xmax'] * self.w), self.w)) + y2 = int(min(max(0, do_data[batch][cur_bounding_box]['ymax'] * self.h), self.h)) + num_detected_masks_per_image = int(masks_data.shape[0]/len(do_data)) + current_mask_index = batch * num_detected_masks_per_image + cur_bounding_box + # Shape of TF masks output blob is 3, IE - 4 + mask = masks_data[current_mask_index][label] if len(masks_data.shape) > 3 else masks_data[current_mask_index] + mask = self.unmold_mask(mask, [y1, x1, y2, x2]) + masks[label] = mask + predictions['score'].append(np.array(masks)) + for layer in data.keys(): + if layer not in ["tf_detections", "detection_masks"]: + predictions[layer] = data[layer] + return predictions diff --git a/tests/e2e_tests/common/postprocessors/object_detection.py b/tests/e2e_tests/common/postprocessors/object_detection.py new file mode 100644 index 00000000000000..615b37e1253f52 --- /dev/null +++ b/tests/e2e_tests/common/postprocessors/object_detection.py @@ -0,0 +1,226 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Object detection postprocessor.""" +import logging as log + +import numpy as np + +from .provider import ClassProvider + + +class ParseBeforeODParser(ClassProvider): + """Prepare the pipeline output to right state before parse_object_detection postprocessing using. + e.g. (2,100,7) where all information from 2 batches contain in first element with shape (100, 7), + other 'strings' in this element and second element is zeroes. + Output transform to reference-like state: (n, 7) shape where 'n' is number of detections + stop element('-1') """ + + __action_name__ = "parse_before_OD" + + def __init__(self, config): + self.target_layers = config.get("target_layers", None) + pass + + def apply(self, data): + """Parse data""" + predictions = {} + postprocessed = False + target_layers = self.target_layers if self.target_layers else data.keys() + for layer in target_layers: + layer_data = np.squeeze(data[layer]) + assert layer_data.shape[-1] == 7, "Wrong data for postprocessing! Last dimension must be equal 7." + if len(layer_data.shape) > 2: + layer_data = np.reshape(layer_data, (-1, 7)) + predictions[layer] = layer_data + postprocessed = True + if not postprocessed: + log.info("Postprocessor {} has nothing to process.".format(str(self.__action_name__))) + return predictions + + +class ParseObjectDetection(ClassProvider): + """Object detection parser.""" + __action_name__ = "parse_object_detection" + + def __init__(self, config): + self.target_layers = config.get("target_layers", None) + pass + + def apply(self, data): + """Parse object detection data.""" + predictions = {} + postprocessed = False + target_layers = self.target_layers if self.target_layers else data.keys() + dict_keys = ['class', 'prob', 'xmin', 'ymin', 'xmax', 'ymax'] + for layer in target_layers: + predictions[layer] = [] + layer_data = np.squeeze(data[layer]) + # 1 detection leads to 0-d array after squeeze, which is not iterable + if layer_data.ndim == 1: + layer_data = np.expand_dims(layer_data, axis=0) + assert len(layer_data.shape) <= 2, "Wrong data for postprocessing! Data length must be equal 2." + for obj in layer_data: + if type(obj) == np.float64: + log.debug(f"{obj} has type np.float64") + break + elif obj[0] == -1: + log.debug(f"First item of {obj} == -1") + break + assert len(obj) == 7, "Wrong data for postprocessing! Data length for one detection must be equal 7." + while obj[0] > len(predictions[layer]) - 1: + predictions[layer].append([]) + box = dict(zip(dict_keys, obj[1:])) + predictions[layer][int(obj[0])].append(box) + postprocessed = True + for layer in data.keys() - target_layers: + predictions[layer] = data[layer] + if postprocessed == False: + log.info("Postprocessor {} has nothing to process".format(str(self.__action_name__))) + return predictions + + +class ParseObjectDetectionTF(ClassProvider): + """TF models yield 4-tensor format that needs to be converted into common format""" + __action_name__ = "tf_to_common_od_format" + + def __init__(self, config): + self.target_layers = ['num_detections', 'detection_classes', + 'detection_scores', 'detection_boxes'] + + def apply(self, data: dict): + predictions = [] + num_batches = len(data['detection_boxes']) + for b in range(num_batches): + predictions.append([]) + num_detections = int(data['num_detections'][b]) + detection_classes = data['detection_classes'][b] + detection_scores = data['detection_scores'][b] + detection_boxes = data['detection_boxes'][b] + for i in range(num_detections): + obj = [ + b, detection_classes[i], detection_scores[i], + detection_boxes[i][1], detection_boxes[i][0], + detection_boxes[i][3], detection_boxes[i][2] + ] + predictions[b].append(obj) + predictions = np.asarray(predictions) + if predictions.size != 0: + predictions = np.reshape(predictions, newshape=(1, 1, predictions.shape[0] * predictions.shape[1], + predictions.shape[2])) + else: + log.error("Provided data doesn't contain any detected objects!") + parsed_data = {'tf_detections': predictions} + for layer, blob in data.items(): + if layer not in self.target_layers: + parsed_data.update({layer: blob}) + return parsed_data + + +class ParseObjectDetectionMaskRCNN(ClassProvider): + """TF models yield 4-tensor format that needs to be converted into common format""" + __action_name__ = "parse_object_detection_mask_rcnn" + + def __init__(self, config): + self.target_layers = ['num_detections', 'detection_classes', + 'detection_scores', 'detection_boxes'] + + def apply(self, data: dict): + predictions = [] + num_batches = len(data['detection_boxes']) + for b in range(num_batches): + predictions.append([]) + num_detections = int(data['num_detections'][b]) + detection_classes = data['detection_classes'][b] + detection_scores = data['detection_scores'][b] + detection_boxes = data['detection_boxes'][b] + for i in range(num_detections): + obj = [ + b, detection_classes[i], detection_scores[i], + detection_boxes[i][1], detection_boxes[i][0], + detection_boxes[i][3], detection_boxes[i][2] + ] + predictions[b].append(obj) + parsed_data = {'tf_detections': np.array(predictions)} + for layer, blob in data.items(): + if layer not in self.target_layers: + parsed_data.update({layer: blob}) + return parsed_data + + +class AlignWithBatch(ClassProvider): + """Batch alignment preprocessor. + + Duplicates 1-batch data BATCH number of times. + """ + __action_name__ = "align_with_batch_od" + + def __init__(self, config): + self.batch = config["batch"] + self.target_layers = config.get('target_layers', None) + + def apply(self, data): + """Apply batch alignment (duplication) to data.""" + apply_to = self.target_layers if self.target_layers is not None else data.keys() + for layer in apply_to: + + container = np.zeros(shape=(1, 1, data[layer].shape[2] * self.batch + 1, data[layer].shape[3])) + detections_counter = 0 + + for b in range(self.batch): + for box in data[layer][0][0]: + if box[0] == -1: + break + box[0] = b + container[0][0][detections_counter] = box + detections_counter += 1 + else: + container[0][0][detections_counter] = [-1, 0, 0, 0, 0, 0, 0] # Add 'stop' entry + + data[layer] = container + + return data + + +class ClipBoxes(ClassProvider): + """ + Clip boxes coordinates to target height and width + """ + __action_name__ = "clip_boxes" + + def __init__(self, config): + self.normalized_boxes = config.get("normalized_boxes", True) + self.max_h = 1 if self.normalized_boxes else config.get("max_h") + self.max_w = 1 if self.normalized_boxes else config.get("max_w") + self.target_layers = config.get('target_layers', None) + + def apply(self, data): + apply_to = self.target_layers if self.target_layers is not None else data.keys() + for layer in apply_to: + for b in range(len(data[layer])): + for i, box in enumerate(data[layer][b]): + data[layer][b][i].update({"xmax": min(box["xmax"], self.max_w) if box["xmax"] > 0 else 0, + "xmin": max(box["xmin"], 0), + "ymax": min(box["ymax"], self.max_h) if box["ymax"] > 0 else 0, + "ymin": max(box["ymin"], 0) + }) + return data + + +class AddClass(ClassProvider): + """Adding class values postprocessor. + + Adds class key and its value to detection dictionaries. + """ + __action_name__ = "add_class" + + def __init__(self, config): + self.target_layers = config.get('target_layers', None) + self.class_value = config.get('class_value', 0) + + def apply(self, data): + apply_to = self.target_layers if self.target_layers else data.keys() + for layer in apply_to: + for batch_num in range(len(data[layer])): + for i in range(len(data[layer][batch_num])): + data[layer][batch_num][i]['class'] = self.class_value + return data diff --git a/tests/e2e_tests/common/postprocessors/provider.py b/tests/e2e_tests/common/postprocessors/provider.py new file mode 100644 index 00000000000000..1cf26df5074be6 --- /dev/null +++ b/tests/e2e_tests/common/postprocessors/provider.py @@ -0,0 +1,48 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import inspect + +import torch + +from e2e_tests.common.common.base_provider import BaseProvider, BaseStepProvider + + +class ClassProvider(BaseProvider): + registry = {} + + @classmethod + def validate(cls): + methods = [ + f[0] for f in inspect.getmembers(cls, predicate=inspect.isfunction) + ] + if 'apply' not in methods: + raise AttributeError( + "Requested class {} registred as '{}' doesn't provide required method 'apply'" + .format(cls.__name__, cls.__action_name__)) + + +class StepProvider(BaseStepProvider): + __step_name__ = "postprocessor" + + def __init__(self, config): + self.executors = [] + for name, params in config.items(): + self.executors.append(ClassProvider.provide(name, params)) + + def execute(self, passthrough_data): + data = passthrough_data.strict_get('output', self) + if isinstance(data, list): + # case when input is torch tensor without names + if isinstance(data[0], torch.Tensor): + for executor in self.executors: + data = executor.apply(data) + # case of dynamism tests with --consecutive_infer key (list of two inputs) + else: + for executor in self.executors: + data = list(map(executor.apply, data)) + else: + for executor in self.executors: + data = executor.apply(data) + passthrough_data['output'] = data + return passthrough_data diff --git a/tests/e2e_tests/common/postprocessors/semantic_segmentation.py b/tests/e2e_tests/common/postprocessors/semantic_segmentation.py new file mode 100644 index 00000000000000..60238737f92c1b --- /dev/null +++ b/tests/e2e_tests/common/postprocessors/semantic_segmentation.py @@ -0,0 +1,31 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Semantic segmentation postprocessor""" +from .provider import ClassProvider +import numpy as np +import logging as log + + +class ParseSemanticSegmentation(ClassProvider): + """Semantic segmentation parser""" + __action_name__ = "parse_semantic_segmentation" + + def __init__(self, config): + self.target_layers = config.get("target_layers", None) + + def apply(self, data): + """Parse semantic segmentation data.""" + predictions = {} + postprocessed = False + target_layers = self.target_layers if self.target_layers else data.keys() + for layer in target_layers: + predictions[layer] = [] + for batch in data[layer]: + predictions[layer].append(np.argmax(np.array(batch), axis=0)) + postprocessed = True + for layer in data.keys() - target_layers: + predictions[layer] = data[layer] + if postprocessed == False: + log.info("Postprocessor {} has nothing to process".format(str(self.__action_name__))) + return predictions diff --git a/tests/e2e_tests/common/preprocessors/__init__.py b/tests/e2e_tests/common/preprocessors/__init__.py new file mode 100644 index 00000000000000..f8ce0aaf2d4d80 --- /dev/null +++ b/tests/e2e_tests/common/preprocessors/__init__.py @@ -0,0 +1,6 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from . import preprocessors +from . import transformers +from .provider import StepProvider diff --git a/tests/e2e_tests/common/preprocessors/preprocessors.py b/tests/e2e_tests/common/preprocessors/preprocessors.py new file mode 100644 index 00000000000000..772865855ca359 --- /dev/null +++ b/tests/e2e_tests/common/preprocessors/preprocessors.py @@ -0,0 +1,501 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Data preprocessors applied to target layers in given data dictionary.""" +import logging as log +import sys + +# pylint:disable=no-member +import cv2 +import numpy as np + +from e2e_tests.test_utils.path_utils import resolve_file_path +from .provider import ClassProvider + +log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) + + +class Squeeze(ClassProvider): + """Squeezing preprocessor. + + Removes single-dimensional entries from the shape of an array. + """ + __action_name__ = "squeeze" + + def __init__(self, config): + self.axis = config.get('axis', None) + self.target_layers = config.get('target_layers', None) + + def apply(self, data): + """Apply np.squeeze to data.""" + apply_to = self.target_layers if self.target_layers is not None else data.keys() + for layer in apply_to: + if self.axis is not None: + # If an axis is selected with shape entry greater than one, an error is raised + assert np.all(np.array(data[layer].shape).take(self.axis) == 1), \ + 'Squeeze preprocessor error: Can not squeeze data for layer {} with shape {} by axis {}' \ + ''.format(layer, data[layer].shape, self.axis) + data[layer] = np.squeeze(data[layer], axis=self.axis) + return data + + +class Resize(ClassProvider): + """Resize preprocessor. + + Resizes data to HEIGHTxWIDTH using optional interpolation MODE. + """ + __action_name__ = "resize" + resize_interp_map = { + "nearest": cv2.INTER_NEAREST, + "linear": cv2.INTER_LINEAR, + "area": cv2.INTER_AREA, + "cubic": cv2.INTER_CUBIC, + "lanczos": cv2.INTER_LANCZOS4, + } + + def __init__(self, config): + self.height = int(config["height"]) + self.width = int(config["width"]) + self.target_layers = config.get('target_layers', None) + self.interpolation = Resize.resize_interp_map[config.get( + 'mode', 'linear')] + + def apply(self, data): + """Resize data with opencv resize.""" + apply_to = self.target_layers if self.target_layers is not None else data.keys() + log.info( + "Resize input data for layers {} to size ({}, {}) ...".format(', '.join('"{}"'.format(l) for l in apply_to), + self.width, + self.height)) + for layer in apply_to: + data[layer] = cv2.resize(data[layer], (self.width, self.height), interpolation=self.interpolation) + return data + + +class PermuteShape(ClassProvider): + """Shape permutation preprocessor. + + Permutes data shape using ORDER value. + """ + __action_name__ = "permute_shape" + + def __init__(self, config): + self.order = config["order"] + self.target_layers = config.get('target_layers', None) + + def apply(self, data): + """Apply np.transpose to data.""" + apply_to = self.target_layers if self.target_layers is not None else data.keys() + log.info( + "Permute input data for layers {} to order {} ...".format(', '.join('"{}"'.format(l) for l in apply_to), + self.order)) + for layer in apply_to: + data[layer] = np.transpose(data[layer], self.order) + return data + + +class AlignWithBatch(ClassProvider): + """Batch alignment preprocessor. + + Aligns batch dimension in input data + with BATCH value specified in test. + + Models 0-th dimension for batch and + duplicates input data while size of batch + dimension in input data won't be equal with BATCH. + """ + __action_name__ = "align_with_batch" + + def __init__(self, config): + self.batch = config["batch"] + self.batch_dim = config.get('batch_dim', 0) + self.expand_dims = config.get('expand_dims', True) + self.target_layers = config.get('target_layers', None) + + def apply(self, data): + """Apply batch alignment (duplication) to data.""" + apply_to = self.target_layers if self.target_layers is not None else data.keys() + log.info("Align batch data for layers {} to batch {} ...".format(', '.join( + '"{}"'.format(l) for l in apply_to), self.batch)) + for layer in apply_to: + if self.expand_dims: + data[layer] = np.expand_dims(data[layer], axis=self.batch_dim) + data[layer] = np.repeat(data[layer], self.batch, axis=self.batch_dim) + return data + + +class SubtractMeanValues(ClassProvider): + """Mean subtraction preprocessor.""" + __action_name__ = "subtract_mean_values" + + def __init__(self, config): + self.mean_values = config["mean_values"] + self.target_layers = config.get('target_layers', None) + + def apply(self, data): + """Subtract mean values from data.""" + apply_to = self.target_layers if self.target_layers is not None else data.keys() + log.info("Subtract mean values {} from input data for layers {} ...".format(self.mean_values, ', '.join( + '"{}"'.format(l) for l in apply_to))) + for layer in apply_to: + data[layer] = data[layer] - self.mean_values + return data + + +class SubtractMeanValuesFile(ClassProvider): + """Mean file (image) subtraction preprocessor.""" + __action_name__ = "subtract_mean_values_file" + + def __init__(self, config): + self.mean_file = resolve_file_path(config['mean_file'], as_str=True) + self.target_layers = config.get('target_layers', None) + + def apply(self, data): + """Subtract mean image from data.""" + means = None + with np.load(self.mean_file) as content: + means = content['means'] + apply_to = self.target_layers if self.target_layers is not None else data.keys() + log.info( + "Subtract mean file {} from input data for layers {} ...".format(self.mean_file, ', '.join( + '"{}"'.format(l) for l in apply_to))) + for layer in apply_to: + data_shape = data[layer].shape + if len(data_shape) != 3: + raise ValueError('data layer {l} has unexpected shape {s}, ' + 'expected shape of length 3.'.format(l=layer, s=data_shape)) + mean_values = means[:data_shape[0], :data_shape[1], :] + data[layer] = data[layer] - mean_values + return data + + +class Normalize(ClassProvider): + """Normalization preprocessor.""" + __action_name__ = "normalize" + + def __init__(self, config): + self.factor = config["factor"] + self.target_layers = config.get('target_layers', None) + + def apply(self, data): + """Normalize data by factor.""" + apply_to = self.target_layers if self.target_layers is not None else data.keys() + log.info("Normalize input data for layers {} with normalization factor {}...".format( + ', '.join('"{}"'.format(l) for l in apply_to), + self.factor)) + for layer in apply_to: + data[layer] = data[layer] / self.factor + return data + + +class ExpandDims(ClassProvider): + """Expand dims preprocessor.""" + __action_name__ = "expand_dims" + + def __init__(self, config): + self.target_layers = config.get('target_layers', None) + self.axis = config.get('axis', 0) + + def apply(self, data): + apply_to = self.target_layers if self.target_layers is not None else data.keys() + log.info("Expanding dims for layers {}...".format(', '.join('"{}"'.format(l) for l in apply_to))) + for layer in apply_to: + data[layer] = np.expand_dims(data[layer], axis=self.axis) + return data + + +class Scale(ClassProvider): + """Scale preprocessor.""" + __action_name__ = "scale" + + def __init__(self, config): + self.factor = config["factor"] + self.target_layers = config.get('target_layers', None) + + def apply(self, data): + """Scale data by factor.""" + apply_to = self.target_layers if self.target_layers is not None else data.keys() + log.info("Scale input data for layers {} with scaling factor {} ...". + format(', '.join('"{}"'.format(l) for l in apply_to), self.factor)) + for layer in apply_to: + data[layer] = data[layer] * self.factor + return data + + +class ReverseChannels(ClassProvider): + """Channel reverse preprocessor. + + Reverses channels in data (i.e. RGB image -> BGR image). + """ + __action_name__ = "reverse_channels" + + def __init__(self, config): + self.target_layers = config.get('target_layers', None) + + def apply(self, data): + """Apply cv2.cvtColor to data.""" + + def convert(data): + """OpenCV color conversion""" + # cvtColor doesn't seem to be supported for float64 + # converting to float32 first, then applying color convert + # return in original type + orig_type = data.dtype + return cv2.cvtColor(data.astype(np.float32), cv2.COLOR_RGB2BGR).astype(orig_type) + + apply_to = self.target_layers if self.target_layers is not None else data.keys() + log.info("Convert colors from RGB to BGR for input data for layers {} ...".format( + ', '.join('"{}"'.format(l) for l in apply_to))) + for layer in apply_to: + if len(data[layer].shape) != 3: + raise ValueError( + 'data layer {l} has unexpected shape {s}, ''expected shape of length 3.'.format(l=layer, s=data[ + layer].shape)) + data[layer] = convert(data[layer]) + return data + + +class RenameInputs(ClassProvider): + """Input renaming preprocessor.""" + __action_name__ = "rename_inputs" + + def __init__(self, config): + self.input_pairs = config.get('rename_input_pairs', []) + + def apply(self, data): + """Rename data keys.""" + if self.input_pairs: + log.info("Rename input data keys according to pairs {}...".format(self.input_pairs)) + for old_name, new_name in self.input_pairs: + data[new_name] = data.pop(old_name) + return data + + +class RemoveLayersFromInputData(ClassProvider): + """Updating input data preprocessor. + + Removes input layers from input data + Use case: if some input layer freezed with value during convertion model + in IR, need to remove this layer from input data read from file + """ + __action_name__ = "remove_layers_from_input_data" + + def __init__(self, config): + self.target_layers = config.get('target_layers', []) + + def apply(self, data): + """Remove layers from input data.""" + for layer in self.target_layers: + data.pop(layer) + return data + + +class AddLayersToInputData(ClassProvider): + """Updating input data preprocessor. + + Add input layers to input data + Use case: if some input layer depends on height or weight of previous input layer, + It is need to dynamically fill this layer with value and not to hardcode. + """ + __action_name__ = "add_layer_to_input_data" + + def __init__(self, config): + self.layer_data = config["layer_data"] + + def apply(self, data): + """Add layer to input data.""" + for key in self.layer_data.keys(): + log.info("Adding layer to input data: layer {}...".format(key)) + data.update({key: np.array(self.layer_data[key])}) + return data + + +class CopyDataFromLayer(ClassProvider): + """Copying data from one layer to another""" + __action_name__ = "copy_data_from_layer" + + def __init__(self, config): + self.source_target_map = config.get("source_target_map", {}) + + def apply(self, data): + """Apply rewrite of sequence_length value in input data.""" + for source, target in self.source_target_map.items(): + data = AddLayersToInputData({"layer_data": {target: data[source]}}).apply(data) + return data + + +class RewriteSeqLenValue(ClassProvider): + """Sequence_length rewriting preprocessor. + + Changes sequence_length value in input_data on SEQUENCE_LENGTH value from the test. + """ + __action_name__ = "rewrite_seqlen_value" + + def __init__(self, config): + self.sequence_length = config["sequence_length"] + self.target_layers = config.get('target_layers', None) + + def apply(self, data): + """Apply rewrite of sequence_length value in input data.""" + apply_to = self.target_layers if self.target_layers is not None else data.keys() + for layer in apply_to: + data[layer] = np.array([self.sequence_length]) + return data + + +class SliceData(ClassProvider): + """Slice preprocessor. + + Updates input data through slice + Use case: align size of dimension with value specified + in test (e.g. align with batch) + + Config should have 'slice' field with 'slice' or + 'tuple of slices' types of value. + + Examples how to model some types of slices: + slice(0, 5, 1) = slice(0, 5, 1) + [:1, 3:5:2] = (slice(None, 1, None), slice(3, 5, 2)) + """ + + __action_name__ = "slice_data" + + def __init__(self, config): + self.slice = config["slice"] + self.target_layers = config.get('target_layers', None) + + def apply(self, data): + apply_to = self.target_layers if self.target_layers is not None else data.keys() + for layer in apply_to: + data[layer] = data[layer][self.slice] + return data + + +class CastDataType(ClassProvider): + """Converts data type preprocessor""" + __action_name__ = "cast_data_type" + + def __init__(self, config): + self.target_data_type = config.get('target_data_type', "float32") + self.target_layers = config.get('target_layers', None) + + def apply(self, data): + """Converts data type to specified 'target_data_type' for provided numpy.ndarray""" + apply_to = self.target_layers if self.target_layers is not None else data.keys() + log.info("Converting layers {} data to type {}...".format(', '.join('"{}"'.format(l) for l in apply_to), + self.target_data_type)) + for layer in apply_to: + data[layer] = data[layer].astype(self.target_data_type) + return data + + +class CustomPreproc(ClassProvider): + __action_name__ = "custom_preproc" + + def __init__(self, config): + self.execution_function = config["execution_function"] + + def apply(self, data): + return self.execution_function(data) + + +class DynamismPreproc(CustomPreproc): + """Implementation duplicates CustomPreproc preprocessor.""" + __action_name__ = "dynamism_preproc" + + pass + + +class Grayscale(ClassProvider): + """Convert image to grayscale preprocessor.""" + __action_name__ = "grayscale" + + def __init__(self, config): + self.target_layers = config.get('target_layers', None) + + def apply(self, data): + """Scale data by factor.""" + apply_to = self.target_layers if self.target_layers is not None else data.keys() + log.info("Converting layers {} to grayscale ...".format(', '.join('"{}"'.format(l) for l in apply_to))) + for layer in apply_to: + data[layer] = cv2.cvtColor(data[layer], cv2.COLOR_BGR2GRAY) + data[layer] = np.expand_dims(data[layer], axis=2) + return data + + +class AlignWithBatchDifferently(ClassProvider): + """Align with batch preprocessor which allows expand dims for chosen layers""" + # TODO: Replace with more accurate solution + __action_name__ = "align_with_batch_dif" + + def __init__(self, config): + self.batch = config["batch"] + self.batch_dim = config.get('batch_dim', 0) + self.expand_dims = config.get('expand_dims', True) + self.layers_to_expand = config.get('layers_to_expand', None) + self.layers_not_to_expand = config.get('layers_not_to_expand', None) + + def apply(self, data): + if self.layers_to_expand: + data_preproc = AlignWithBatch({"batch": self.batch, "target_layers": self.layers_to_expand}) + data = data_preproc.apply(data) + else: + log.warning("No layers specified to be aligned with batch using dimension expanding.") + if self.layers_not_to_expand: + data_preproc = AlignWithBatch( + {"batch": self.batch, "expand_dims": False, "target_layers": self.layers_not_to_expand}) + data = data_preproc.apply(data) + else: + log.warning("No layers specified to be aligned with batch using no dimension expanding.") + return data + + +class ConvertToTorchTensor(ClassProvider): + """Convert arrays to torch.Tensor format.""" + __action_name__ = "convert_to_torch" + + def __init__(self, config): + self.target_layers = config.get('target_layers', None) + + def apply(self, data): + import torch + apply_to = self.target_layers if self.target_layers is not None else data.keys() + log.info("Converting layers {} to torch.Tensor format ...".format(', '.join('"{}"'.format(l) for l in apply_to))) + for layer in apply_to: + data[layer] = torch.from_numpy(data[layer]) + return data + + +class ConvertNamesToIndices(ClassProvider): + """Converts input names to indices""" + __action_name__ = "names_to_indices" + + def __init__(self, config): + self.target_layers = config.get('target_layers', None) + + def apply(self, data): + converted = {} + apply_to = self.target_layers if self.target_layers is not None else data.keys() + log.info("Converting names {} to indices ...".format(', '.join('"{}"'.format(l) for l in apply_to))) + for i, layer in enumerate(apply_to): + converted[i] = data[layer] + return converted + + +class AssignIndices(ClassProvider): + """Assigns indices for tensors""" + __action_name__ = "assign_indices" + + def __init__(self, config): + pass + + @staticmethod + def apply(data): + import torch + if isinstance(data, (torch.Tensor, np.ndarray)): + data = [data] + converted = {} + for i in range(len(data)): + converted[i] = data[i] + return converted + diff --git a/tests/e2e_tests/common/preprocessors/provider.py b/tests/e2e_tests/common/preprocessors/provider.py new file mode 100644 index 00000000000000..7774bd7fabae7d --- /dev/null +++ b/tests/e2e_tests/common/preprocessors/provider.py @@ -0,0 +1,50 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import inspect + +import numpy as np +import torch + +from e2e_tests.common.common.base_provider import BaseProvider, BaseStepProvider + + +class ClassProvider(BaseProvider): + registry = {} + + @classmethod + def validate(cls): + methods = [ + f[0] for f in inspect.getmembers(cls, predicate=inspect.isfunction) + ] + if 'apply' not in methods: + raise AttributeError( + "Requested class {} registered as '{}' doesn't provide required method 'apply'" + .format(cls.__name__, cls.__action_name__)) + + +class StepProvider(BaseStepProvider): + __step_name__ = "preprocess" + + def __init__(self, config): + + self.executors = [] + for name, params in config.items(): + self.executors.append(ClassProvider.provide(name, params)) + + def execute(self, passthrough_data): + data = passthrough_data.strict_get('feed_dict', self) + if isinstance(data, list): + # case when input is torch tensor without names + if isinstance(data[0], (torch.Tensor, np.ndarray)): + for executor in self.executors: + data = executor.apply(data) + # case of dynamism tests with --consecutive_infer key (list of two inputs) + else: + for executor in self.executors: + data = list(map(executor.apply, data)) + else: + for executor in self.executors: + data = executor.apply(data) + passthrough_data["feed_dict"] = data + return passthrough_data diff --git a/tests/e2e_tests/common/preprocessors/transformers.py b/tests/e2e_tests/common/preprocessors/transformers.py new file mode 100644 index 00000000000000..d386de78feb96f --- /dev/null +++ b/tests/e2e_tests/common/preprocessors/transformers.py @@ -0,0 +1,187 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from .provider import ClassProvider +import cv2 +from random import randint +import numpy as np +import logging as log +import sys + +log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) + + +class CVFlip(ClassProvider): + """ + Flip an input image using OpenCV. Suitable for 2D and 3D input data + """ + __action_name__ = "cv2_flip" + + def __init__(self, config): + """ + :param config: dictionary with transformer configuration. + Mandatory config case: + should have either 'random_flip_mode' key - in this case OpenCV flipCode will be randomly selected + or have 'flip_mode' config key - allowed values 0 - horizontal flip, 1 - vertical flip, -1 - both flip + Optional config keys: + 'target_layers' - defines for which layer from input data to apply transformation + """ + if config.get("random_flip_mode", False): + self.flip_mode = randint(-1, 1) + else: + self.flip_mode = config.get("flip_mode", 0) + + self.target_layers = config.get('target_layers', None) + + def apply(self, data): + apply_to = self.target_layers if self.target_layers is not None else data.keys() + log.info("Applying {} transformer for layers {} with flip mode {}...".format(self.__action_name__, + ", ".join(apply_to), + self.flip_mode)) + for layer in apply_to: + try: + assert len(data[layer].shape) == 3 or len(data[layer].shape) == 2, \ + "Only 3D and 2D input data can be flipped" + data[layer] = cv2.flip(src=data[layer], flipCode=self.flip_mode) + except: + log.error("Failed to process data for layer {}! Data processing skipped".format(layer)) + continue + return data + + +class NumpyFlip(ClassProvider): + """ + Flip an input image using numpy. Suitable for the data with arbitrary shape + """ + __action_name__ = "np_flip" + + def __init__(self, config): + """ + :param config: dictionary with transformer configuration. + Optional config keys: + 'axis' - defines the axis in ndarray along which to flip a data. If not defined flipping will be perfromed + along all ndarray axises, + 'target_layers' - optional config key which defines to which layer from input data to apply transformation + """ + self.axis = config.get("axis") + self.target_layers = config.get('target_layers', None) + + def apply(self, data): + apply_to = self.target_layers if self.target_layers is not None else data.keys() + log.info("Applying {} transformer for layers {} and axis {}...".format(self.__action_name__, + ", ".join(apply_to), + self.axis)) + for layer in apply_to: + data[layer] = np.flip(data[layer], axis=self.axis) + return data + + +class Crop(ClassProvider): + """ + Crop an input image. Suitable only for 3D and 2D input data + """ + __action_name__ = "crop" + + def __init__(self, config): + """ + :param config: dictionary with transformer configuration. + Mandatory config case: + should have either 'random_crop' key - in this case crop range will be defined randomly with guarantee that 20% + of pixels along each side will be kept + or have 'x_crop' and 'y_crop' config key - tuples containing two int values defining crop region along x/y axis + Optional config keys: + 'target_layers' - defines for which layer from input data to apply transformation + """ + self.random_crop = config.get("random_crop", False) + self.restore_initial_size = config.get("restore_initial_size", True) + if not self.random_crop: + self.x_crop = config['x_crop'] + self.y_crop = config['y_crop'] + self.target_layers = config.get('target_layers', None) + + def apply(self, data): + apply_to = self.target_layers if self.target_layers is not None else data.keys() + + for layer in apply_to: + try: + assert len(data[layer].shape) == 3 or len(data[layer].shape) == 2, \ + "Only 3D and 2D input data can be cropped" + h, w, _ = data[layer].shape + if self.random_crop: + # h or w // 10 required to guarantee that we will not crop whole image + # and keep at least 20 % of pixels along each side + x_start = randint(0, w // 2 - w // 10) + x_end = randint(w // 2 + w // 10, w) + self.x_crop = (x_start, x_end) + y_start = randint(0, h // 2 - h // 10) + y_end = randint(h // 2 + h // 10, h) + self.y_crop = (y_start, y_end) + log.info("Crop data for layer {}. Crop region is: y crop range {}, x crop range {}...".format(layer, + self.y_crop, + self.x_crop)) + data[layer] = data[layer][self.y_crop[0]:self.y_crop[1], self.x_crop[0]:self.x_crop[1]] + if self.restore_initial_size: + data[layer] = cv2.resize(data[layer], (h, w)) + except: + log.error("Failed to process data for layer {}! Data processing skipped".format(layer)) + continue + return data + + +class InvertData(ClassProvider): + """ + Invert input data relatively to array max value. After transformation each n-th array element + will have value equal to (array_max_value - n-th ndarray element) + """ + __action_name__ = "invert_data" + + def __init__(self, config): + """ + :param config: dictionary with transformer configuration. + 'target_layers' - defines for which layer from input data to apply transformation + """ + self.target_layers = config.get('target_layers', None) + + def apply(self, data): + apply_to = self.target_layers if self.target_layers is not None else data.keys() + log.info("Invert colors for layer {}...".format(", ".join(apply_to))) + for layer in apply_to: + data[layer] = np.full(fill_value=data[layer].max, shape=data[layer].shape) - data[layer] + return data + + +class AddGaussianNoise(ClassProvider): + """ + Adds random data with gaussian distribution to an input data + """ + __action_name__ = "add_gaussian_noise" + + def __init__(self, config): + """ + :param config: dictionary with transformer configuration. + Mandatory config case: + should have either 'auto_range' key - in this case mean value and standard deviation will be calculated + automatically basing on input data range + or have 'mean' and 'sigma' config key - numeric values for mean and standard deviation accordingly + Optional config keys: + 'target_layers' - defines for which layer from input data to apply transformation + """ + + self.target_layers = config.get('target_layers', None) + self.auto_range = config.get("auto_range", False) + if not self.auto_range: + self.mean = config.get("mean", 0) + self.sigma = config.get("sigma", 0.01) + + def apply(self, data): + apply_to = self.target_layers if self.target_layers is not None else data.keys() + for layer in apply_to: + if self.auto_range: + self.mean = np.mean(data[layer]) / 5 + self.sigma = np.std(data[layer]) / 10 + log.info("Add gaussian noise for input data for layer {} with mean={} and std={}".format(layer, + self.mean, + self.sigma)) + noise = np.random.normal(loc=self.mean, scale=self.sigma, size=data[layer].shape) + data[layer] = np.clip((data[layer] + noise), a_min=0, a_max=255).astype(np.uint8) + return data diff --git a/tests/e2e_tests/common/pytest_utils.py b/tests/e2e_tests/common/pytest_utils.py new file mode 100644 index 00000000000000..d574907272475a --- /dev/null +++ b/tests/e2e_tests/common/pytest_utils.py @@ -0,0 +1,115 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Pytest utility functions.""" +# pylint:disable=import-error +import pytest +from multiprocessing import TimeoutError +from collections import namedtuple +from _pytest.mark import Mark, MarkDecorator + +"""Mark generator to specify pytest marks in tests in common format +:param target_runner: name of the runner for which required to specify pytest mark (e.g. "test_run") +:param pytest_mark: pytest mark (e.g. "skip", "xfail") or any another mark (e.g. "caffe2") +:param is_simple_mark: bool value to split pytest marks and another marks +""" +mark = namedtuple("mark", ("pytest_mark", "target_runner", "is_simple_mark")) +# default values for "target_runner" and "is_simple_mark" fields respectively +mark.__new__.__defaults__ = ("all", False) + + +class XFailMarkWrapper(Mark): + def __init__(self, regexps: list, match_mode: str = "any", *args, **kwargs): + """ + Class constructs 'xfail'-like mark with additional fields + :param regexps: regexp to search in test logs + :param match_mode: 'any' or + :param args: + :param kwargs: + """ + super().__init__('xfail', *args, **kwargs) + object.__setattr__(self, "regexps", regexps) + object.__setattr__(self, "match_mode", match_mode) + + +def skip(reason): + """Generate skip marker. + + :param reason: reason why marker is generated + + :return: pytest marker + """ + return pytest.mark.skipif(True, reason=reason), reason + + +def skip_if(expr, reason): + """Generate skip marker if expr returns True. + + :param expr: expression to be tested + + :param reason: reason why marker is generated + + :return: pytest marker + """ + if expr: + return skip(reason) + else: + return None, None + + +def xfail(reason, regexps="", match_mode="any"): + """Generate xfail marker. + + :param reason: reason why marker is generated + :param regexps: list of regular expressions for matching xfail reason on test's status + :param match_mode: defines that "all" or "any" specified regexps should be matched + + :return: pytest marker + """ + regexps = [regexps] if not isinstance(regexps, list) else regexps + mark = XFailMarkWrapper(regexps=regexps, match_mode=match_mode, + args=(True,), kwargs={"reason": reason, "strict": True}) + return MarkDecorator(mark=mark), reason + + +def xfail_if(expr, reason, regexps="", match_mode="any"): + """Generate xfail marker if expr returns True. + + :param expr: expression to be tested + :param reason: reason why marker is generated + :param regexps: see "xfail" function defined above + :param match_mode: see "xfail" function defined above + + :return: pytest marker + """ + if expr: + return xfail(reason, regexps, match_mode) + else: + return None, None + + +def timeout(seconds, reason): + """Generate timeout marker. + + :param seconds: number of seconds until timeout is reached + + :param reason: reason why marker is generated + + :return: pytest marker + """ + return pytest.mark.timeout(seconds), reason + + +def warning(reason): + """Generate warning marker. + + :param reason: reason why marker is generated + + :return: pytest marker + """ + return pytest.mark.warning(reason), reason + + +def timeout_error_filter(err, *args): + """Filter function for pytest flaky plugin for restarting only test where TimeoutError was raised""" + return issubclass(err[0], TimeoutError) diff --git a/tests/e2e_tests/common/readers/__init__.py b/tests/e2e_tests/common/readers/__init__.py new file mode 100644 index 00000000000000..7e9f9ba36f4579 --- /dev/null +++ b/tests/e2e_tests/common/readers/__init__.py @@ -0,0 +1,5 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from . import readers +from .provider import StepProvider diff --git a/tests/e2e_tests/common/readers/provider.py b/tests/e2e_tests/common/readers/provider.py new file mode 100644 index 00000000000000..f69e49a0dd95b8 --- /dev/null +++ b/tests/e2e_tests/common/readers/provider.py @@ -0,0 +1,38 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import inspect + +from e2e_tests.common.common.base_provider import BaseProvider, BaseStepProvider + + +class ClassProvider(BaseProvider): + registry = {} + + @classmethod + def validate(cls): + methods = [ + f[0] for f in inspect.getmembers(cls, predicate=inspect.isfunction) + ] + if 'read' not in methods: + raise AttributeError( + "Requested class {} registred as '{}' doesn't provide required method read" + .format(cls.__name__, cls.__action_name__)) + + +class StepProvider(BaseStepProvider): + """ + Read network input data from the file. + """ + __step_name__ = "read_input" + + def __init__(self, config): + action_name = next(iter(config)) + cfg = config[action_name] + self.executor = ClassProvider.provide(action_name, config=cfg) + + def execute(self, passthrough_data): + model_object = passthrough_data.get('model_obj') + passthrough_data["feed_dict"] = self.executor.read(model_object) if model_object else self.executor.read() + passthrough_data['output'] = passthrough_data["feed_dict"] + return passthrough_data diff --git a/tests/e2e_tests/common/readers/readers.py b/tests/e2e_tests/common/readers/readers.py new file mode 100644 index 00000000000000..57e3171bbc99f0 --- /dev/null +++ b/tests/e2e_tests/common/readers/readers.py @@ -0,0 +1,170 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""File readers.""" +# pylint:disable=no-member +import numpy as np +import cv2 +import logging as log +import sys +from copy import deepcopy + +from e2e_tests.test_utils.path_utils import resolve_file_path +from e2e_tests.test_utils.tf_hub_utils import prepare_inputs, get_inputs_info +from e2e_tests.common.readers.provider import ClassProvider + +try: + from onnx import TensorProto, numpy_helper + + onnx_not_installed = False +except ImportError: + onnx_not_installed = True + + +class NPZReader(ClassProvider): + """ + Read input data from .npz file - most preferable input reading method. + Config should have 'path' field (absolute or relative to 'input_data' + defined in env_config.yml). File should store zipped dictionary of input + layer names as keys, and appropriate input data. + """ + __action_name__ = "npz" + log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) + + def __init__(self, config): + """Initialization method. + + :param config: configuration dict. must have 'path' key + """ + self.input_path = resolve_file_path(config['path'], as_str=True) + + def read(self): + """Return file content.""" + log.info("Reading input file from {} ...".format(self.input_path)) + return dict(np.load(self.input_path, allow_pickle=True)) + + +class NPYReader(ClassProvider): + """ + Read input data from .npy file. Config should have 'path' field with mapping + (dictionary) of input layer name and corresponding .npy file. + """ + __action_name__ = "npy" + + def __init__(self, config): + """Initialization method. + :param config: configuration dict. must have 'path' key + """ + self.inputs_map = config['inputs_map'] + + def read(self): + """Return file content.""" + input_data = {} + for input, path in self.inputs_map.items(): + log.info("Reading input file from {} for input '{}' ...".format(path, input)) + input_data[input] = np.load(path, allow_pickle=True) + return input_data + + +class ImageReader(ClassProvider): + """ + Read input data from image file. Config should have 'inputs_map' field with mapping + (dictionary) of input layer name and corresponding image path. + """ + __action_name__ = "img" + + def __init__(self, config): + """Initialization method. + :param config: configuration dict. must have 'path' key + """ + self.inputs_map = config['inputs_map'] + + def read(self): + """Return image data.""" + input_data = {} + for input, path in self.inputs_map.items(): + log.info("Reading input file from {} for input '{}' ...".format(path, input)) + input_data[input] = cv2.imread(path) + return input_data + + +class ProtobufReader(ClassProvider): + """ + Read input data in protobuf format. Config should have 'path' field (absolute or relative to 'input_data' + defined in env_config.yml). File should store data encoded with protobuf.""" + __action_name__ = "pb" + log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) + + def __init__(self, config): + """Initialization method. + :param config: configuration dict. must have 'path' key + """ + self.inputs_map = config['inputs_map'] + + def read(self): + if onnx_not_installed: + raise RuntimeError("ONNX module not available") + input_data = {} + tensor = TensorProto() + for input_name, input_path in self.inputs_map.items(): + log.info("Reading input file for input {} from {} ...".format(input_name, input_path)) + with open(input_path, 'rb') as f: + tensor.ParseFromString(f.read()) + input_data[input_name] = numpy_helper.to_array(tensor) + return input_data + + +class ExternalData(ClassProvider): + __action_name__ = "external_data" + log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) + + def __init__(self, config): + self.data = deepcopy(config['data']) + assert isinstance(self.data, (dict, list)), \ + "External input data specified in config key 'data' have to be a " \ + "dictionary or list of dictionaries with input layer names as keys and numpy.ndarrays with " \ + "input data as values" + + def read(self): + return self.data + + +class TorchReader(ClassProvider): + """ + Read input data from .pt or .pth file. + All content in file should be stored in list. + Config should have 'path' field (absolute or relative to 'input_data' + defined in env_config.yml). + """ + __action_name__ = "pt" + log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) + + def __init__(self, config): + """Initialization method. + + :param config: configuration dict. must have 'path' key + """ + self.input_path = resolve_file_path(config['path'], as_str=True) + + def read(self): + """Return file content.""" + import torch + log.info("Reading input file from {} ...".format(self.input_path)) + return torch.load(self.input_path) + + +class TFHubInputsGenerator(ClassProvider): + """ + Generates random inputs depending on model's input type + """ + __action_name__ = "generate_tf_hub_inputs" + log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) + + def __init__(self, config=None): + """Initialization method. + """ + self.config = config + + def read(self, tf_hub_model): + """Return file content.""" + return prepare_inputs(get_inputs_info(tf_hub_model)) diff --git a/tests/e2e_tests/common/ref_collector/__init__.py b/tests/e2e_tests/common/ref_collector/__init__.py new file mode 100644 index 00000000000000..8ba81a92b19c53 --- /dev/null +++ b/tests/e2e_tests/common/ref_collector/__init__.py @@ -0,0 +1,3 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + diff --git a/tests/e2e_tests/common/ref_collector/dummy_ref_collector.py b/tests/e2e_tests/common/ref_collector/dummy_ref_collector.py new file mode 100644 index 00000000000000..f1a9046b500203 --- /dev/null +++ b/tests/e2e_tests/common/ref_collector/dummy_ref_collector.py @@ -0,0 +1,25 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +""" +Dummy reference collector to be used when real collector is unavailable. + +Example usage: User do not have tensorflow installed and want to run pytorch-only +tests. Tensorflow reference collector is substituted by the dummy so that no +error occurs during pytest test collection/run. If user try to run +tensorflow-related tests, the execution fails due to error specified. +""" +from e2e_tests.common.ref_collector.provider import ClassProvider + + +def use_dummy(name, error_message): + class DummyRefCollector(ClassProvider): + __action_name__ = name + + def __init__(self, *args, **kwargs): + raise ValueError(error_message) + + def get_refs(self, *args, **kwargs): + pass + + return DummyRefCollector diff --git a/tests/e2e_tests/common/ref_collector/precollected.py b/tests/e2e_tests/common/ref_collector/precollected.py new file mode 100644 index 00000000000000..7e6d618fb5a32b --- /dev/null +++ b/tests/e2e_tests/common/ref_collector/precollected.py @@ -0,0 +1,48 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import torch + +from e2e_tests.common.ref_collector.provider import ClassProvider +from e2e_tests.test_utils.path_utils import resolve_file_path +import numpy as np +import logging as log +import sys + + +class PrecollectedRefs(ClassProvider): + """Precollected reference provider.""" + __action_name__ = "precollected" + log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) + + def __init__(self, config): + self.path = resolve_file_path(config['path'], as_str=True) + + def get_refs(self, **kwargs): + """Return existing reference results.""" + log.info("Reading references from path {}".format(self.path)) + return dict(np.load(self.path, allow_pickle=True)) + + +class PrecollectedTorchRefs(ClassProvider): + """Precollected reference provider.""" + __action_name__ = "torch_precollected" + log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) + + def __init__(self, config): + self.path = resolve_file_path(config['path'], as_str=True) + + def get_refs(self, **kwargs): + """Return existing reference results.""" + log.info("Reading references from path {}".format(self.path)) + return torch.load(self.path) + + +class CustomRefCollector(ClassProvider): + __action_name__ = "custom_ref_collector" + + def __init__(self, config): + self.execution_function = config["execution_function"] + + def get_refs(self, **kwargs): + return self.execution_function() diff --git a/tests/e2e_tests/common/ref_collector/provider.py b/tests/e2e_tests/common/ref_collector/provider.py new file mode 100644 index 00000000000000..3eae1e0bdc81fd --- /dev/null +++ b/tests/e2e_tests/common/ref_collector/provider.py @@ -0,0 +1,34 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import inspect +from e2e_tests.common.common.base_provider import BaseProvider, BaseStepProvider + + +class ClassProvider(BaseProvider): + registry = {} + + @classmethod + def validate(cls): + methods = [ + f[0] for f in inspect.getmembers(cls, predicate=inspect.isfunction) + ] + if 'get_refs' not in methods: + raise AttributeError( + "Requested class {} registred as '{}' doesn't provide required method get_refs" + .format(cls.__name__, cls.__action_name__)) + + +class StepProvider(BaseStepProvider): + __step_name__ = "get_refs" + + def __init__(self, config): + action_name = next(iter(config)) + cfg = config[action_name] + self.executor = ClassProvider.provide(action_name, config=cfg) + + def execute(self, passthrough_data): + data = passthrough_data.get('feed_dict') + passthrough_data['output'] = self.executor.get_refs(input_data=data) + return passthrough_data + diff --git a/tests/e2e_tests/common/ref_collector/score_onnx_runtime.py b/tests/e2e_tests/common/ref_collector/score_onnx_runtime.py new file mode 100644 index 00000000000000..fd6869c4dc5fa3 --- /dev/null +++ b/tests/e2e_tests/common/ref_collector/score_onnx_runtime.py @@ -0,0 +1,74 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import logging as log +import sys + +from e2e_tests.common.multiprocessing_utils import multiprocessing_run +from e2e_tests.common.ref_collector.provider import ClassProvider + + +class ONNXRuntimeRunner(ClassProvider): + """Base class for inferring ONNX models with ONNX Runtime""" + + log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) + + __action_name__ = "score_onnx_runtime" + + def __init__(self, config): + """ + ONNXRuntime Runner initialization + :param config: dictionary with class configuration parameters: + required config keys: + model: path to the model for inference + """ + self.model = config["model"] + self.ep = config["onnx_rt_ep"] if isinstance(config["onnx_rt_ep"], list) else [config["onnx_rt_ep"]] + self.cast_input_data = config.get("cast_input_data", True) + self.cast_input_data_to_type = config.get("cast_input_data_to_type", "float32") + self.res = None + self.inputs = config["inputs"] + + def run_rt(self, input_data): + """Return ONNX model reference results.""" + import onnxruntime as rt + + log.info("Loading ONNX model from {} ...".format(self.model)) + opts = rt.SessionOptions() + sess = rt.InferenceSession(self.model, sess_options=opts) + if self.ep == [None]: + log.warning("Execution provider is not specified for ONNX Runtime tests. " + "Using CPUExecutionProvider by default.") + self.ep = ["CPUExecutionProvider"] + if not all([ep in sess.get_providers() for ep in self.ep]): + raise ValueError(f"{self.ep} execution provider is not known to ONNX Runtime. " + f"Available execution providers: {str(sess.get_providers())}") + sess.set_providers(self.ep) + providers_set = sess.get_providers() + log.info("Using {} as an execution provider.".format(str(providers_set))) + if self.cast_input_data: + for layer, data in input_data.items(): + input_data[layer] = data.astype(self.cast_input_data_to_type) + if len(input_data) > 1: + log.warning("ONNX Runtime runner is not properly tested to work with multi-input topologies. " + "Please, contact QA.") + for layer in sess.get_inputs(): + model_shape_to_compare = tuple([layer.shape[dim] for dim in range(len(layer.shape)) + if not ((layer.shape[dim] is None) or (isinstance(layer.shape[dim], str)))]) + data_shape_to_compare = tuple([input_data[layer.name].shape[dim] for dim in range(len(layer.shape)) + if not ((layer.shape[dim] is None) or (isinstance(layer.shape[dim], str)))]) + if model_shape_to_compare != data_shape_to_compare: + raise ValueError(f"Shapes of input data {list(input_data.values())[0].shape} and " + f"input blob {sess.get_inputs()[0].shape} are not equal for layer {layer.name}") + output_names = [output.name for output in sess.get_outputs()] + if len(output_names) > 1: + log.warning("ONNX Runtime runner is not properly tested to work with multi-output topologies. " + "Please, contact QA.") + log.info("Starting inference with ONNX Runtime ...".format(self.model)) + out = sess.run(output_names, input_data) + res = {output_names[i]: out[i] for i in range(len(output_names))} + return res + + def get_refs(self): + self.res = multiprocessing_run(self.run_rt, [self.inputs], "ONNX Runtime Inference", timeout=200) + return self.res diff --git a/tests/e2e_tests/common/ref_collector/score_paddlepaddle.py b/tests/e2e_tests/common/ref_collector/score_paddlepaddle.py new file mode 100644 index 00000000000000..4d59e3b99d2597 --- /dev/null +++ b/tests/e2e_tests/common/ref_collector/score_paddlepaddle.py @@ -0,0 +1,59 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import logging as log +import os +import sys +from pathlib import Path + +from e2e_tests.common.common.base_provider.ref_collector.provider import ClassProvider + + +os.environ["GLOG_minloglevel"] = "3" + + +class ScorePaddlePaddle(ClassProvider): + """Reference collector for PaddlePaddle models.""" + + __action_name__ = "score_paddlepaddle" + log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) + + def __init__(self, config): + """ + ScorePaddlePaddle initialization + :param config: dictionary with class configuration parameters: + required config keys: + model: model path which will be used in get_model() function + optional config keys: + params_filename: the name of single binary file to load all model parameters. + cast_input_data_to_type: type of data model input data cast to. + """ + self.model = Path(config["model"]) + self.params_filename = config.get("params_filename", None) + self.cast_input_data_to_type = config.get("cast_input_data_to_type", "float32") + self.inputs = config["inputs"] + self.res = {} + + def get_refs(self): + """Return PaddlePaddle model reference results.""" + import paddle + + log.info("Running inference with PaddlePaddle ...") + + for layer, data in self.inputs.items(): + self.inputs[layer] = data.astype(self.cast_input_data_to_type) + + executor = paddle.fluid.Executor(paddle.fluid.CPUPlace()) + + paddle.enable_static() + inference_program, _, output_layers = paddle.fluid.io.load_inference_model( + executor=executor, + dirname=self.model.parent, + model_filename=self.model.name, + params_filename=self.params_filename + ) + out = executor.run(inference_program, feed=self.inputs, fetch_list=output_layers, return_numpy=False) + self.res = dict(zip(map(lambda layer: layer.name, output_layers), out)) + + log.info("PaddlePaddle reference collected successfully") + return self.res diff --git a/tests/e2e_tests/common/ref_collector/score_pytorch.py b/tests/e2e_tests/common/ref_collector/score_pytorch.py new file mode 100644 index 00000000000000..51d31b7378fc76 --- /dev/null +++ b/tests/e2e_tests/common/ref_collector/score_pytorch.py @@ -0,0 +1,367 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import logging as log +import os +import sys + +from utils.path_utils import resolve_dir_path +from e2e_tests.common.ref_collector.provider import ClassProvider + + +class PytorchBaseRunner: + log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) + """ + Base class for inferring models with PyTorch and converting PyTorch models to ONNX format + """ + + def __init__(self, config): + """ + PyTorchBaseRunner initialization + :param config: dictionary with class configuration parameters: + required config keys: + model_name: name of the model which will be used in _get_model() function + torch_model_zoo: path to the folder with pytorch pretrained\torchvision model's weights files + optional config keys: + onnx_dump_path: path to the file or folder where to dump onnx model's representation. + if onnx_dump_path specified as a directory, target dump file name will be constructed from + the path specified in config and model_name attribute + .onnx extension + """ + self.inputs = config["inputs"] + self.model_name = config.get("model_name") + self.torch_model_zoo_path = config.get("torch_model_zoo_path", '') + os.environ['TORCH_HOME'] = self.torch_model_zoo_path + self.get_model_args = config.get("get_model_args", {}) + self.net = self._get_model() + self.onnx_dump_path = config.get("onnx_dump_path") + if config.get('convert_to_onnx'): + self._pytorch_to_onnx() + + def _get_model(self): + """ + `_get_model` function have to be implemented in inherited classes + depending on PyTorch models source (pretrained or torchvision) + """ + raise NotImplementedError("{}\nDo not use {} class directly!".format(self._get_model.__doc__, + self.__class__.__name__)) + + def get_refs(self): + """ + Run inference with PyTorch + Note: input_data for the function have to be represented as a dictionary to keep uniform interface + across all framework's scoring classes. But PyTorch models doesn't have named inputs so the keys + of the dictionary can have arbitrary value. The first numpy ndarray from input_data.values() will be used + as input data + :param input_data: dict with input data for the model + :return: numpy ndarray with inference results + """ + log.info("Running inference with torch ...") + import torch + # PyTorch forward method accepts input data without mapping on input tensor + # All models from pytorch pretrained have only one input, so we will work with 1st numpy array from input dict + input_array = next(iter(self.inputs.values())) + input_variable = torch.autograd.Variable(torch.Tensor(input_array)) + self.net.eval() + self.res = {"output": self.net(input_variable).detach().numpy()} + return self.res + + def _pytorch_to_onnx(self): + """ + Convert and save PyTorch model in ONNX format + :return: + """ + log.info("Dumping torch model to ONNX ...") + import torch + dump_dir = os.path.dirname(self.onnx_dump_path) + + if not os.path.exists(dump_dir): + log.warning("Target dump directory {} doesn't exist! Let's try to create it ...".format(dump_dir)) + os.makedirs(dump_dir, mode=0o755, exist_ok=True) + log.warning("{} directory created!".format(dump_dir)) + dump_dir = resolve_dir_path(os.path.dirname(dump_dir), as_str=True) + + # If user defined onnx_dump_path attribute as a folder, target file name will be constructed + # using self.model_name and joined to the specified path + if os.path.isdir(self.onnx_dump_path): + log.warning("Specified ONNX dump path is a directory...") + self.onnx_dump_path = os.path.join(dump_dir, self.model_name + ".onnx") + log.warning("Target model will be saved with specified model name as {}".format(self.onnx_dump_path)) + + if os.path.exists(self.onnx_dump_path): + log.warning( + "Specified ONNX model {} already exist and will not be dumped again".format(self.onnx_dump_path)) + else: + dummy_input = torch.autograd.Variable(torch.randn([1, ] + list(self.net.input_size)), requires_grad=False) + torch.onnx.export(self.net, dummy_input, self.onnx_dump_path, export_params=True) + + +class PytorchPretrainedRunner(ClassProvider, PytorchBaseRunner): + """ + PyTorch Pretrained models inference class + """ + __action_name__ = "score_pytorch_pretrained" + + def _get_model(self): + """ + Get PyTorch model implemented in `pretrained` module + :return: PyTorch Network object + """ + log.info("Getting PyTorch pretrained model ...") + import pretrainedmodels + return getattr(pretrainedmodels.models, self.model_name)(**self.get_model_args) + + +class PytorchTorchvisionRunner(ClassProvider, PytorchBaseRunner): + """ + PyTorch Torchvision models inference class + """ + __action_name__ = "score_pytorch_torchvision" + + def __init__(self, config): + """ + PytorchTorchvisionRunner initialization + :param config: dictionary with class configuration parameters: + required and optional config keys are the same as in parent PytorchBaseRunner class plus optional key + `input_size` used to dump model to onnx format ([3,224,224] by default since most of the torchvision models have + such input size) + """ + self.input_size = config.get("input_size", [3, 224, 224]) + PytorchBaseRunner.__init__(self, config=config) + + def _get_model(self): + """ + Get PyTorch model implemented in `torchvision` module + :return: PyTorch Network object + """ + log.info("Getting PyTorch torchvision model ...") + import torchvision + net = getattr(torchvision.models, self.model_name)(pretrained=True, **self.get_model_args) + """ + Torchvision models doesn't have information about input size like in pretrained models. + `input_size` attribute will be set manually to keep` _pytorch_to_onnx` function implementation + uniform for pretrained and torchvision pytorch models + """ + setattr(net, "input_size", self.input_size) + return net + + +class PytorchTorchvisionDetectionRunner(ClassProvider, PytorchBaseRunner): + """ + PyTorch Torchvision models inference class + """ + __action_name__ = "score_pytorch_torchvision_detection" + + def __init__(self, config): + """ + PytorchTorchvisionRunner initialization + :param config: dictionary with class configuration parameters: + required and optional config keys are the same as in parent PytorchBaseRunner class plus optional key + `input_size` used to dump model to onnx format ([3,800,800] by default since most of the torchvision detection + models have such input size) + """ + self.input_size = config.get("input_size", [3, 800, 800]) + PytorchBaseRunner.__init__(self, config=config) + + def _get_model(self): + """ + Get PyTorch model implemented in `torchvision.models.detection` module + :return: PyTorch Network object + """ + log.info("Getting PyTorch Detection model ...") + import torchvision + net = getattr(torchvision.models.detection, self.model_name)(pretrained=True, **self.get_model_args) + """ + Torchvision Detection models doesn't have information about input size like in pretrained models. + `input_size` attribute will be set manually to keep` _pytorch_to_onnx` function implementation + uniform for pytorch models + """ + setattr(net, "input_size", self.input_size) + return net + + def get_refs(self): + """ + Run inference with PyTorch + Note: input_data for the function have to be represented as a dictionary to keep uniform interface + across all framework's scoring classes. But PyTorch models doesn't have named inputs so the keys + of the dictionary can have arbitrary value. The first numpy ndarray from input_data.values() will be used + as input data + :param input_data: dict with input data for the model + :return: numpy ndarray with inference results + """ + log.info("Running inference with torch ...") + import torch + # PyTorch forward method accepts input data without mapping on input tensor + input_array = next(iter(self.inputs.values())) + input_variable = torch.autograd.Variable(torch.Tensor(input_array)) + self.net.eval() + self.res = {"output": self.net(input_variable)[0]} + return self.res + + +class PytorchTorchvisionOpticalFlowRunner(ClassProvider, PytorchBaseRunner): + """ + PyTorch Torchvision models inference class + """ + __action_name__ = "score_pytorch_torchvision_optical_flow" + + def __init__(self, config): + """ + PytorchTorchvisionRunner initialization + :param config: dictionary with class configuration parameters: + required and optional config keys are the same as in parent PytorchBaseRunner class plus optional key + `input_size` used to dump model to onnx format ([3,520,960] by default since most of the torchvision optical flow + models have + such input size) + """ + self.input_size = config.get("input_size", [3, 520, 960]) + PytorchBaseRunner.__init__(self, config=config) + + def _get_model(self): + """ + Get PyTorch model implemented in `torchvision` module + :return: PyTorch Network object + """ + log.info("Getting PyTorch torchvision model ...") + import torchvision + net = getattr(torchvision.models.optical_flow, self.model_name)(pretrained=True, **self.get_model_args) + """ + Torchvision models doesn't have information about input size like in pretrained models. + `input_size` attribute will be set manually to keep` _pytorch_to_onnx` function implementation + uniform for pretrained and torchvision pytorch models + """ + setattr(net, "input_size", self.input_size) + return net + + def get_refs(self): + """ + Run inference with PyTorch + Note: input_data for the function have to be represented as a dictionary to keep uniform interface + across all framework's scoring classes. But PyTorch models doesn't have named inputs so the keys + of the dictionary can have arbitrary value. + :param input_data: dict with input data for the model + :return: numpy ndarray with inference results + """ + log.info("Running inference with torch ...") + import torch + # PyTorch forward method accepts input data without mapping on input tensor + input_variable = [torch.autograd.Variable(torch.Tensor(x)) for x in self.inputs.values()] + assert len(input_variable) == 2, "There should be 2 inputs for optical flow models" + self.net.eval() + # We are only interested in the final predicted flows (they are the most accurate ones), + # so we will just retrieve the last item in the list + self.res = {"output": self.net(input_variable[0], input_variable[1])} + self.res["output"] = self.res["output"][-1].detach().numpy() + return self.res + + def _pytorch_to_onnx(self): + """ + Convert and save PyTorch model in ONNX format + :return: + """ + log.info("Dumping torch model to ONNX ...") + import torch + dump_dir = os.path.dirname(self.onnx_dump_path) + + if not os.path.exists(dump_dir): + log.warning("Target dump directory {} doesn't exist! Let's try to create it ...".format(dump_dir)) + os.makedirs(dump_dir, mode=0o755, exist_ok=True) + log.warning("{} directory created!".format(dump_dir)) + dump_dir = resolve_dir_path(os.path.dirname(dump_dir), as_str=True) + + # If user defined onnx_dump_path attribute as a folder, target file name will be constructed + # using self.model_name and joined to the specified path + if os.path.isdir(self.onnx_dump_path): + log.warning("Specified ONNX dump path is a directory...") + self.onnx_dump_path = os.path.join(dump_dir, self.model_name + ".onnx") + log.warning("Target model will be saved with specified model name as {}".format(self.onnx_dump_path)) + + if os.path.exists(self.onnx_dump_path): + log.warning( + "Specified ONNX model {} already exist and will not be dumped again".format(self.onnx_dump_path)) + else: + dummy_input = torch.autograd.Variable(torch.randn([1, ] + list(self.net.input_size)), requires_grad=False) + torch.onnx.export(self.net, (dummy_input, dummy_input), self.onnx_dump_path, export_params=True, + opset_version=16) + + +class PytorchTimmRunner(ClassProvider, PytorchBaseRunner): + """ + PyTorch Torchvision models inference class + """ + __action_name__ = "score_pytorch_timm" + + def __init__(self, config): + """ + PytorchTorchvisionRunner initialization + :param config: dictionary with class configuration parameters: + required and optional config keys are the same as in parent PytorchBaseRunner class plus optional key + `input_size` used to dump model to onnx format ([3,224,224] by default since most of the torchvision models have + such input size) + """ + self.input_size = config.get("input_size", [3, 224, 224]) + PytorchBaseRunner.__init__(self, config=config) + + def _get_model(self): + """ + Get PyTorch model implemented in `timm` module + :return: PyTorch Network object + """ + log.info("Getting PyTorch Timm model ...") + import timm + net = getattr(timm.models, self.model_name)(pretrained=True, **self.get_model_args) + """ + Timm models doesn't have information about input size like in pretrained models. + `input_size` attribute will be set manually to keep` _pytorch_to_onnx` function implementation + uniform for pytorch models + """ + setattr(net, "input_size", self.input_size) + return net + + +class PytorchSavedModelRunner(ClassProvider, PytorchBaseRunner): + """ + PyTorch saved models inference class + """ + __action_name__ = "score_pytorch_saved_model" + + def __init__(self, config): + """ + PytorchTorchvisionRunner initialization + :param config: dictionary with class configuration parameters: + required and optional config keys are the same as in parent PytorchBaseRunner class + """ + self.model_path = config["model-path"] + self.model_class_path = config.get('model_class_path') + if self.model_class_path: + sys.path.insert(0, os.path.abspath(self.model_class_path)) + PytorchBaseRunner.__init__(self, config=config) + + def _get_model(self): + """ + Load Pytorch model from path + :return: PyTorch Network object + """ + log.info("Getting PyTorch saved model ...") + import torch + net = torch.load(self.model_path) + + return net + + def get_refs(self): + """ + Run inference with PyTorch + :param input_data: input data for the model. Could be list or dict with tensors + :return: numpy ndarray with inference results + """ + log.info("Running inference with torch ...") + self.net.eval() + if isinstance(self.inputs, dict): + try: + self.res = self.net(**self.inputs) + except Exception as e: + log.info(f"Tried to infer model with unpacking arguments (self.res = self.net(**input_data)), but got " + f"exception: \n{e}") + self.res = self.net(self.inputs) + if isinstance(self.inputs, list): + self.res = self.net(*self.inputs) + return self.res diff --git a/tests/e2e_tests/common/ref_collector/score_pytorch_onnx_runtime.py b/tests/e2e_tests/common/ref_collector/score_pytorch_onnx_runtime.py new file mode 100644 index 00000000000000..303fcf9bbc8a13 --- /dev/null +++ b/tests/e2e_tests/common/ref_collector/score_pytorch_onnx_runtime.py @@ -0,0 +1,149 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import logging as log +import os +import sys + +from e2e_tests.common.multiprocessing_utils import multiprocessing_run +from utils.path_utils import resolve_dir_path +from e2e_tests.common.ref_collector.score_onnx_runtime import ONNXRuntimeRunner +from e2e_tests.common.ref_collector.provider import ClassProvider + + +class PyTorchToOnnxRunner: + log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) + """ + Base class for converting PyTorch models to ONNX format and infering with ONNX Runtime + + PyTorch net objects doesn't fully support pickling https://github.com/pytorch/pytorch/issues/49260 and running with + caffe2 after dumping to ONNX https://github.com/pytorch/pytorch/issues/49752 + + To get and infer PyTorch pretrained or torchvision model with multiprocessing to avoid potential crashing of main + process, net is converted to ONNX format and inferred with OnnxInfer class. + """ + def __init__(self, config): + """ + PyTorchToOnnxRunner initialization + :param config: dictionary with class configuration parameters: + required config keys: + model_name: name of the model which will be used in _get_model() function + torch_model_zoo: path to the folder with pytorch pretrained\torchvision model's weights files + optional config keys: + onnx_dump_path: path to the file or folder where to dump onnx model's representation. + if onnx_dump_path specified as a directory, target dump file name will be constructed from + the path specified in config and model_name attribute + .onnx extension + """ + self.inputs = config["inputs"] + self.model_name = config["model_name"] + self.torch_model_zoo_path = config["torch_model_zoo_path"] + os.environ['TORCH_HOME'] = os.path.join(self.torch_model_zoo_path, self.model_name) + self.get_model_args = config.get("get_model_args", {}) + self.onnx_dump_path = config.get("onnx_dump_path") + self.onnx_model_path = multiprocessing_run(self._get_model, [], "Pytorch Get Model") + + def _get_model(self): + """ + `_get_model` function have to be implemented in inherited classes + depending on PyTorch models source (pretrained or torchvision) + """ + raise NotImplementedError("{}\nDo not use {} class directly!".format(self._get_model.__doc__, + self.__class__.__name__)) + + def get_refs(self): + """ + Run inference with Onnx runner + Note: input_data for the function have to be represented as a dictionary to keep uniform interface + across all framework's scoring classes. But PyTorch models doesn't have named inputs so the keys + of the dictionary can have arbitrary value. The first numpy ndarray from input_data.values() will be used + as input data + :param input_data: dict with input data for the model + :return: numpy ndarray with inference results + """ + runner = ONNXRuntimeRunner({"model": self.onnx_model_path, + "onnx_rt_ep": "CPUExecutionProvider"}) + res = runner.get_refs(self.inputs) + self.res = {"output": next(iter(res.values()))} + return self.res + + def _pytorch_to_onnx(self, net): + """ + Convert and save PyTorch model in ONNX format + :return: saved ONNX model path + """ + log.info("Dumping torch model to ONNX ...") + import torch + dump_dir = os.path.dirname(self.onnx_dump_path) + + if not os.path.exists(dump_dir): + log.warning("Target dump directory {} doesn't exist! Let's try to create it ...".format(dump_dir)) + os.makedirs(dump_dir, mode=0o755, exist_ok=True) + log.warning("{} directory created!".format(dump_dir)) + dump_dir = resolve_dir_path(os.path.dirname(dump_dir), as_str=True) + + # If user defined onnx_dump_path attribute as a folder, target file name will be constructed + # using self.model_name and joined to the specified path + if os.path.isdir(self.onnx_dump_path): + log.warning("Specified ONNX dump path is a directory...") + model_path = os.path.join(dump_dir, self.model_name + ".onnx") + log.warning("Target model will be saved with specified model name as {}".format(self.onnx_dump_path)) + else: + model_path = self.onnx_dump_path + if os.path.exists(model_path): + log.warning( + "Specified ONNX model {} already exist and will not be dumped again".format(model_path)) + else: + dummy_input = torch.autograd.Variable(torch.randn([1, ] + list(net.input_size)), requires_grad=False) + torch.onnx.export(net, dummy_input, model_path, export_params=True) + return model_path + + +class PytorchPretrainedToONNXRunner(ClassProvider, PyTorchToOnnxRunner): + """ + PyTorch Pretrained models inference class + """ + __action_name__ = "score_pytorch_pretrained_with_onnx" + + def _get_model(self): + """ + Get PyTorch model implemented in `pretrained` module + :return: path to dumped onnx object + """ + log.info("Getting PyTorch pretrained model ...") + import pretrainedmodels + net = getattr(pretrainedmodels.models, self.model_name)(**self.get_model_args) + return self._pytorch_to_onnx(net) + + +class PytorchTorchvisionToONNXRunner(ClassProvider, PyTorchToOnnxRunner): + """ + PyTorch Torchvision models inference class + """ + __action_name__ = "score_pytorch_torchvision_with_onnx" + + def __init__(self, config): + """ + PytorchTorchvisionRunner initialization + :param config: dictionary with class configuration parameters: + required and optional config keys are the same as in parent PytorchBaseRunner class plus optional key + `input_size` used to dump model to onnx format ([3,224,224] by default since most of the torchvision models have + such input size) + """ + self.input_size = config.get("input_size", [3, 224, 224]) + PyTorchToOnnxRunner.__init__(self, config=config) + + def _get_model(self): + """ + Get PyTorch model implemented in `torchvision` module + :return: path to dumped onnx object + """ + log.info("Getting PyTorch torchvision model ...") + import torchvision + net = getattr(torchvision.models, self.model_name)(pretrained=True, **self.get_model_args) + """ + Torchvision models doesn't have information about input size like in pretrained models. + `input_size` attribute will be set manually to keep` _pytorch_to_onnx` function implementation + uniform for pretrained and torchvision pytorch models + """ + setattr(net, "input_size", self.input_size) + return self._pytorch_to_onnx(net) diff --git a/tests/e2e_tests/common/ref_collector/score_tf.py b/tests/e2e_tests/common/ref_collector/score_tf.py new file mode 100644 index 00000000000000..16df920bec48a9 --- /dev/null +++ b/tests/e2e_tests/common/ref_collector/score_tf.py @@ -0,0 +1,259 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from e2e_tests.common.ref_collector.provider import ClassProvider +from e2e_tests.utils.path_utils import resolve_file_path, resolve_dir_path +import os +import re +import sys +import itertools +from collections import defaultdict +import logging as log + +os.environ['GLOG_minloglevel'] = '3' + + +def build_control_flow_children_map(graph): + """ + Builds map: graph.node -> set of nodes that have incoming control flow + dependencies from graph.node. + """ + mapping = defaultdict(set) + ops = graph.get_operations() + for op in ops: + for src in op.control_inputs: + mapping[src].add(op) + return mapping + + +def trace_loop(enter, control_flow_map): + """ + Starting from enter traverse graph nodes until face Exit op, if Enter is + discovered, do trace_loop for it. Returns all discovered tensors inside the + loop(s). + """ + for_examination = set(enter.outputs[0].consumers()) # ops + visited = set() + collected = set(enter.outputs) # tensors + exits = set() # ops + while len(for_examination): + candidate = for_examination.pop() + if candidate in visited: + continue + visited.add(candidate) + if candidate.type == 'Exit': + exits.add(candidate) + continue + if candidate.type == 'Enter': + # nested loop is detected + nested_collected, nested_exits = trace_loop(candidate, + control_flow_map) + for_examination = for_examination | nested_exits + collected = collected | nested_collected + else: + collected = collected | set(candidate.outputs) + for_examination = for_examination | set( + itertools.chain.from_iterable( + [output.consumers() for output in candidate.outputs])) + for_examination = for_examination | control_flow_map[candidate] + return collected, exits + + +def find_all_tensors_in_loops(graph): + """Search for all Enter operations in the graph.""" + ops = graph.get_operations() + enters = [op for op in ops if op.type == 'Enter'] + collected = set() + control_flow_map = build_control_flow_children_map(graph) + for enter in enters: + nested_collected, _ = trace_loop(enter, control_flow_map) + collected = collected | nested_collected + return collected + + +def children(op_name: str, graph): + """Get operation node children.""" + op = graph.get_operation_by_name(op_name) + return set(op for out in op.outputs for op in out.consumers()) + + +def summarize_graph(graph_def): + import tensorflow as tf + unlikely_output_types = ['Const', 'Assign', 'NoOp', 'Placeholder', 'Assert', 'switch_t', 'switch_f'] + placeholders = dict() + outputs = list() + graph = tf.Graph() + with graph.as_default(): # pylint: disable=not-context-manager + tf.import_graph_def(graph_def, name='') + for node in graph.as_graph_def().node: # pylint: disable=no-member + if node.op == 'Placeholder': + node_dict = dict() + node_dict['type'] = tf.DType(node.attr['dtype'].type).name + node_dict['shape'] = str(tf.TensorShape(node.attr['shape'].shape)).replace(' ', '').replace('?', '-1') + placeholders[node.name] = node_dict + if len(children(node.name, graph)) == 0: + if node.op not in unlikely_output_types and node.name.split('/')[-1] not in unlikely_output_types: + outputs.append(node.name) + result = dict() + result['inputs'] = placeholders + result['outputs'] = outputs + return result + + +def get_output_node_names_list(graph_def, user_defined_output_node_names_list: list): + return summarize_graph(graph_def)['outputs'] if len(user_defined_output_node_names_list) == 0 \ + else user_defined_output_node_names_list + + +class ScoreTensorFlowBase(ClassProvider): + """Reference collector for TensorFlow models.""" + log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) + + def __init__(self, config): + self.output_nodes_for_freeze = config.get("output_nodes_for_freeze", None) + self.additional_outputs = config.get("additional_outputs", []) + self.override_default_outputs = config.get("override_default_outputs", False) + self.additional_inputs = config.get("additional_inputs", []) + self.user_output_node_names_list = config.get("user_output_node_names_list", []) + self.override_default_inputs = config.get("override_default_inputs", False) + self.inputs = config["inputs"] + self.res = {} + + def load_graph(self): + """ + load_graph function have to be implemented in inherited classes + depending on type of input tf model (from saved_dir, from meta or simple pb) + """ + raise NotImplementedError("{}\nDo not use {} class directly!".format(self.load_graph().__doc__, + self.__class__.__name__)) + + def get_refs(self): + """Return TensorFlow model reference results.""" + log.info("Running inference with tensorflow ...") + import tensorflow as tf + graph = self.load_graph() + feed_dict = {} + summary_info = summarize_graph(graph.as_graph_def()) + + input_layers, output_layers = list(summary_info['inputs'].keys()), summary_info['outputs'] + if self.override_default_outputs and self.additional_outputs: + output_layers = self.additional_outputs + else: + output_layers.extend(self.additional_outputs) + if self.override_default_inputs and self.additional_inputs: + input_layers = self.additional_inputs + else: + input_layers.extend(self.additional_inputs) + data_keys = [key for key in self.inputs.keys()] + if sorted(input_layers) != sorted(data_keys): + raise ValueError('input data keys: {data_keys} do not match input ' + 'layers of network: {input_layers}'.format(data_keys=data_keys, input_layers=input_layers)) + + for input_layer_name in input_layers: + # Case when port is already in layer name + port = re.search(r':[0-9]*$', input_layer_name) + if port is not None: + tensor = graph.get_tensor_by_name(input_layer_name) + else: + tensor = graph.get_tensor_by_name(input_layer_name + ':0') + feed_dict[tensor] = self.inputs[input_layer_name] + output_tensors = [] + for name in output_layers: + tensor = graph.get_tensor_by_name(name + ':0') + output_tensors.append(tensor) + + log.info("Running tf.Session") + with graph.as_default(): + with tf.compat.v1.Session(graph=graph) as session: + outputs = session.run(output_tensors, feed_dict=feed_dict) + self.res = dict(zip(output_layers, outputs)) + log.info("TensorFlow reference collected successfully\n") + return self.res + + +class ScoreTensorFlow(ScoreTensorFlowBase): + __action_name__ = "score_tf" + + def __init__(self, config): + self.model = resolve_file_path(config["model"], as_str=True) + super().__init__(config=config) + + def load_graph(self): + import tensorflow as tf + tf.compat.v1.reset_default_graph() + graph = tf.Graph() + graph_def = tf.compat.v1.GraphDef() + + with open(self.model, "rb") as model_file: + graph_def.ParseFromString(model_file.read()) + + nodes_to_clear_device = graph_def.node if isinstance( + graph_def, tf.compat.v1.GraphDef) else graph_def.graph_def.node + for node in nodes_to_clear_device: + node.device = "" + + with graph.as_default(): + tf.import_graph_def(graph_def, name='') + + log.info("tf graph was created") + return graph + + +class ScoreTensorFlowMeta(ScoreTensorFlowBase): + __action_name__ = "score_tf_meta" + + def __init__(self, config): + self.model = resolve_file_path(config["model"], as_str=True) + super().__init__(config=config) + + def load_graph(self): + import tensorflow as tf + tf.compat.v1.reset_default_graph() + graph = tf.Graph() + graph_def = tf.compat.v1.MetaGraphDef() + + with open(self.model, "rb") as model_file: + graph_def.ParseFromString(model_file.read()) + + nodes_to_clear_device = graph_def.node if isinstance( + graph_def, tf.compat.v1.GraphDef) else graph_def.graph_def.node + for node in nodes_to_clear_device: + node.device = "" + + assert bool(self.output_nodes_for_freeze), \ + "Input model has .meta extension. To freeze model need to specify 'output_nodes_for_freeze'" + log.info("Created tf.Session") + with tf.compat.v1.Session() as sess: + restorer = tf.compat.v1.train.import_meta_graph(graph_def) + restorer.restore(sess, re.sub('\.meta$', '', self.model)) + graph_def = tf.compat.v1.graph_util.convert_variables_to_constants( + sess, graph_def.graph_def, self.output_nodes_for_freeze) + + with graph.as_default(): + tf.import_graph_def(graph_def, name='') + + log.info("tf graph was created") + return graph + + +class ScoreTensorFlowFromDir(ScoreTensorFlowBase): + __action_name__ = "score_tf_dir" + + def __init__(self, config): + self.model = resolve_dir_path(config["model"], as_str=True) + super().__init__(config=config) + + def load_graph(self): + import tensorflow as tf + tf.compat.v1.reset_default_graph() + tags = [tf.saved_model.SERVING] + log.info("Created tf.Session") + with tf.compat.v1.Session() as sess: + meta_graph_def = tf.compat.v1.saved_model.loader.load(sess, tags, self.model) + outputs = get_output_node_names_list(meta_graph_def.graph_def, self.user_output_node_names_list) + graph_def = tf.compat.v1.graph_util.convert_variables_to_constants(sess, meta_graph_def.graph_def, outputs) + graph = tf.Graph() + with graph.as_default(): + tf.import_graph_def(graph_def, name='') + log.info("tf graph was created") + return graph \ No newline at end of file diff --git a/tests/e2e_tests/common/ref_collector/score_tf_hub.py b/tests/e2e_tests/common/ref_collector/score_tf_hub.py new file mode 100644 index 00000000000000..e5dda2659418e3 --- /dev/null +++ b/tests/e2e_tests/common/ref_collector/score_tf_hub.py @@ -0,0 +1,34 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import os +import sys +import logging as log +import tensorflow as tf +from .tf_hub_ref_provider import ClassProvider + + +os.environ['GLOG_minloglevel'] = '3' + + +class ScoreTFHub(ClassProvider): + """Reference collector for TensorFlow Hub models.""" + __action_name__ = "score_tf_hub" + log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) + + def __init__(self, config): + self.res = {} + + def get_refs(self, passthrough_data): + inputs = passthrough_data['feed_dict'] + model = passthrough_data['model_obj'] + # repack input dictionary to tensorflow constants + tf_inputs = {} + for input_name, input_value in inputs.items(): + tf_inputs[input_name] = tf.constant(input_value) + + for out_name, out_value in model(**tf_inputs).items(): + self.res[out_name] = out_value.numpy() + + return self.res + diff --git a/tests/e2e_tests/common/ref_collector/score_tf_lite.py b/tests/e2e_tests/common/ref_collector/score_tf_lite.py new file mode 100644 index 00000000000000..fe519bc0b08050 --- /dev/null +++ b/tests/e2e_tests/common/ref_collector/score_tf_lite.py @@ -0,0 +1,33 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from e2e_tests.common.ref_collector.provider import ClassProvider + + +class ScoreTensorFLowLite(ClassProvider): + __action_name__ = "score_tf_lite" + + def __init__(self, config): + self.model = config["model"] + self.inputs = config["inputs"] + self.res = {} + + def get_refs(self): + import tensorflow as tf + interpreter = tf.compat.v1.lite.Interpreter(model_path=self.model) + interpreter.allocate_tensors() + input_details = interpreter.get_input_details() + output_details = interpreter.get_output_details() + input_name_to_id_mapping = {input['name']: input['index'] for input in input_details} + + for layer, data in self.inputs.items(): + tensor_index = input_name_to_id_mapping[layer] + tensor_id = next(i for i, tensor in enumerate(input_details) if tensor['index'] == tensor_index) + interpreter.set_tensor(input_details[tensor_id]['index'], data) + + interpreter.invoke() + + for output in output_details: + self.res[output['name']] = interpreter.get_tensor(output['index']) + + return self.res diff --git a/tests/e2e_tests/common/ref_collector/score_tf_v2.py b/tests/e2e_tests/common/ref_collector/score_tf_v2.py new file mode 100644 index 00000000000000..38c56c6d891cfe --- /dev/null +++ b/tests/e2e_tests/common/ref_collector/score_tf_v2.py @@ -0,0 +1,78 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import gc +import logging as log +import os +import tempfile +from distutils.version import LooseVersion +from pathlib import Path + +from utils.path_utils import resolve_dir_path +from e2e_tests.common.ref_collector.provider import ClassProvider +from .score_tf import ScoreTensorFlowBase + +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' + + +class ScoreTensorFlow(ClassProvider): + __action_name__ = "score_tf_v2" + + def __init__(self, config): + self.saved_model_dir = resolve_dir_path(config["saved_model_dir"], as_str=True) + self.inputs = config["inputs"] + self.res = {} + + def get_refs(self): + import tensorflow as tf + """Return TensorFlow model reference results.""" + input_data_constants = {name: tf.constant(val) for name, val in self.inputs.items()} + log.info("Running inference with tensorflow {} ...".format(tf.__version__)) + model = tf.saved_model.load(self.saved_model_dir) + infer_func = model.signatures["serving_default"] + self.res = infer_func(**input_data_constants) + tf.keras.backend.clear_session() + del model, input_data_constants, infer_func + gc.collect() + return self.res + + +class ScoreTensorFlowV2ByV1(ScoreTensorFlowBase): + __action_name__ = "score_convert_TF2_to_TF1" + + def __init__(self, config): + self.model = config["model"] + super().__init__(config=config) + + def load_graph(self): + import tensorflow as tf + import tensorflow.compat.v1 as tf_v1 + from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 + assert LooseVersion(tf.__version__) >= LooseVersion("2"), "This collector can't be used with TF 1.* version" + + # disable eager execution of TensorFlow 2 environment immediately + tf_v1.disable_eager_execution() + + with tempfile.TemporaryDirectory() as tmpdir: + tmp_model_path = Path(tmpdir, "saved_model.pb") + + # Convert TF2 to TF1 + tf_v1.enable_eager_execution() + imported = tf.saved_model.load(self.model) + frozen_func = convert_variables_to_constants_v2(imported.signatures['serving_default'], + lower_control_flow=False) + graph_def = frozen_func.graph.as_graph_def(add_shapes=True) + tf_v1.io.write_graph(graph_def, str(tmp_model_path.parent), tmp_model_path.name, as_text=False) + + graph = tf_v1.Graph() + + with tf_v1.gfile.GFile(str(tmp_model_path), 'rb') as f: + graph_def = tf_v1.GraphDef() + graph_def.ParseFromString(f.read()) + + with graph.as_default(): + tf.import_graph_def(graph_def, name='') + + tf_v1.disable_eager_execution() + + return graph diff --git a/tests/e2e_tests/common/ref_collector/tf_hub_ref_provider.py b/tests/e2e_tests/common/ref_collector/tf_hub_ref_provider.py new file mode 100644 index 00000000000000..d8fe54cc356ef4 --- /dev/null +++ b/tests/e2e_tests/common/ref_collector/tf_hub_ref_provider.py @@ -0,0 +1,33 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import inspect + +from e2e_tests.common.common.base_provider import BaseProvider, BaseStepProvider + + +class ClassProvider(BaseProvider): + registry = {} + + @classmethod + def validate(cls): + methods = [ + f[0] for f in inspect.getmembers(cls, predicate=inspect.isfunction) + ] + if 'get_refs' not in methods: + raise AttributeError( + "Requested class {} registred as '{}' doesn't provide required method get_refs" + .format(cls.__name__, cls.__action_name__)) + + +class TFHubStepProvider(BaseStepProvider): + __step_name__ = "get_refs_tf_hub" + + def __init__(self, config): + action_name = next(iter(config)) + cfg = config[action_name] + self.executor = ClassProvider.provide(action_name, config=cfg) + + def execute(self, passthrough_data): + passthrough_data['output'] = self.executor.get_refs(passthrough_data) + return passthrough_data diff --git a/tests/e2e_tests/common/sys_info_utils.py b/tests/e2e_tests/common/sys_info_utils.py new file mode 100644 index 00000000000000..b96e7693191a81 --- /dev/null +++ b/tests/e2e_tests/common/sys_info_utils.py @@ -0,0 +1,434 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# pylint: disable=logging-fstring-interpolation,fixme + +""" +Functions for getting system information. +""" + +import os +import sys +import contextlib +import logging +import multiprocessing +import pathlib +import platform +import re +import subprocess +from enum import Enum + +import cpuinfo +import distro +import yaml + +if sys.hexversion < 0x3060000: + raise Exception('Python version must be >= 3.6') + + +with open(os.path.join(os.path.dirname(__file__), 'platforms.yml'), 'r') as f: + platforms = yaml.safe_load(f) + + +# Host name + +def get_host_name(): + """ Get hostname """ + return platform.node() + + +# OS info + +class UnsupportedOsError(Exception): + """ Exception for unsupported OS type + Originally taken from https://gitlab-icv.toolbox.iotg.sclab.intel.com/inference-engine/infrastructure/blob/master/common/system_info.py # pylint: disable=line-too-long + All changes shall be done in the original location first (inference-engine/infrastructure repo) + """ + def __init__(self, *args, **kwargs): + error_message = f'OS type "{platform.system()}" is not currently supported' + if args or kwargs: + super().__init__(*args, **kwargs) + else: + super().__init__(error_message) + + +class OsType(Enum): + """ Container for supported os types + Originally taken from https://gitlab-icv.toolbox.iotg.sclab.intel.com/inference-engine/infrastructure/blob/master/common/system_info.py # pylint: disable=line-too-long + All changes shall be done in the original location first (inference-engine/infrastructure repo) + """ + WINDOWS = 'Windows' + LINUX = 'Linux' + DARWIN = 'Darwin' + + +def get_os_type(): + """ Return OS type """ + return platform.system() + + +def os_type_is_windows(): + """ Returns True if OS type is Windows. Otherwise returns False""" + return platform.system() == OsType.WINDOWS.value + + +def os_type_is_linux(): + """ Returns True if OS type is Linux. Otherwise returns False""" + return platform.system() == OsType.LINUX.value + + +def os_type_is_darwin(): + """ Returns True if OS type is Darwin. Otherwise returns False""" + return platform.system() == OsType.DARWIN.value + + +def get_os_name(): + """ Check OS type and return OS name + Originally taken from https://gitlab-icv.toolbox.iotg.sclab.intel.com/inference-engine/infrastructure/blob/master/common/system_info.py # pylint: disable=line-too-long + All changes shall be done in the original location first (inference-engine/infrastructure repo) + + :return: OS name + :rtype: String | Exception if it is not supported + """ + if os_type_is_linux(): + return distro.id().lower() + if os_type_is_windows() or os_type_is_darwin(): + return platform.system().lower() + raise UnsupportedOsError() + + +def get_os_version(): + """ Check OS version and return it + Originally taken from https://gitlab-icv.toolbox.iotg.sclab.intel.com/inference-engine/infrastructure/blob/master/common/system_info.py # pylint: disable=line-too-long + All changes shall be done in the original location first (inference-engine/infrastructure repo) + + :return: OS version + :rtype: tuple of strings | Exception if it is not supported + """ + if os_type_is_linux(): + return distro.major_version(), distro.minor_version() + if os_type_is_windows(): + return str(sys.getwindowsversion().major), str(sys.getwindowsversion().minor) + if os_type_is_darwin(): + return tuple(platform.mac_ver()[0].split(".")[:2]) + raise UnsupportedOsError() + + +def get_os(): + """ Get OS """ + if os_type_is_linux(): + # distro.linux_distribution() => ('Ubuntu', '16.04', 'xenial') + _os = ''.join(distro.linux_distribution()[:2]) + elif os_type_is_windows(): + # platform.win32_ver() => ('10', '10.0.17763', 'SP0', 'Multiprocessor Free') + _os = 'Windows{}'.format(str(platform.win32_ver()[0])) + elif os_type_is_darwin(): + # platform.mac_ver() => ('10.5.8', ('', '', ''), 'i386') + _os = 'MacOS{}'.format(str(platform.mac_ver()[0])) + else: + raise UnsupportedOsError() + return _os + + +# Platform info + +def get_platform(env): + """ Get platform """ + platform_info = {'alias': '', 'info': ''} + alias = env.get('platform') + if alias: + platform_info.update({'alias': alias}) + platform_info.update({'info': get_platform_info(alias)}) + return platform_info + + +def get_platform_info(platform_alias): + """ Get platform info """ + platform_info = { + # CPU/GPU + 'apl': 'ApolloLake', + 'cfl': 'CoffeeLake', + 'clx': 'CascadeLake', + 'clx-ap': 'CascadeLake', + 'cslx': 'CascadeLake', + 'cpx': 'CooperLake', + 'halo': 'Skylake', + 'iclu': 'IceLake', + 'skl': 'Skylake', + 'sklx': 'Skylake', + 'skx-avx512': 'Skylake', + 'skl-e': 'Skylake', + 'tglu': 'TigerLake', + 'whl': 'WhiskyLake', + 'epyc': 'AMD EPYC 7601', + + # Myriad/HDDL + 'myriad': 'Myriad 2 Stick', + 'myriadx': 'Myriad X Stick', + 'myriadx-evm': 'Myriad X Board', + 'myriadx-pc': 'Myriad X Board 2085', + 'hddl': 'HDDL-R', + + # FPGA + 'hddlf': 'PyramidLake', + 'hddlf_SG2': 'PyramidLake SG2', + + # VCAA + 'vcaa': 'Hiker Hights PCI-e board CPU/GPU/HDDL', + } + + return platform_info.get(platform_alias, '') + + +# CPU info + +def get_cpu_name(): + """ Get CPU name """ + # cpuinfo.get_cpu_info().get('brand', '') => Intel(R) Core(TM) i7-8700K CPU @ 3.70GHz + return cpuinfo.get_cpu_info().get('brand', '') + + +def get_device_description(platform_selector, device): + """Get device detailed info + :param platform_selector: platform identifier (Jenkins label) + :param device: device + :return: string with detailed info + """ + return platforms.get(platform_selector, {}).get(device, {}).get('description', '') + + +def get_cpu_count(): + """ + Originally taken from https://gitlab-icv.toolbox.iotg.sclab.intel.com/inference-engine/infrastructure/blob/master/common/system_info.py#L138 # pylint: disable=line-too-long + All changes shall be done in the original location first (inference-engine/infrastructure repo). + + Custom `cpu_count` calculates the number of CPUs as minimum of: + * System CPUs count by ``multiprocessing.cpu_count()`` + * CPU affinity settings of the current process + * CFS scheduler CPU bandwidth limit + + :return: The number of CPUs available to be used by the current process, it is >= 1 + :rtype: int + """ + + cpu_counts = [] + cpu_counts.append(multiprocessing.cpu_count()) + + # Number of available CPUs given affinity settings + # More info: http://man7.org/linux/man-pages/man2/sched_setaffinity.2.html + if hasattr(os, "sched_getaffinity"): + with contextlib.suppress(NotImplementedError): + cpu_counts.append(len(os.sched_getaffinity(0))) # pylint: disable=no-member + + if os_type_is_linux(): + # CFS scheduler CPU bandwidth limit + # More info: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt + with contextlib.suppress(OSError, ValueError): + # CPU clock time allocated within a period (in microseconds) + cfs_quota = int(pathlib.Path("/sys/fs/cgroup/cpu/cpu.cfs_quota_us"). + read_text(errors="strict")) + # Real world time length of a period (in microseconds) + cfs_period = int(pathlib.Path("/sys/fs/cgroup/cpu/cpu.cfs_period_us"). + read_text(errors="strict")) + if cfs_quota > 0 and cfs_period > 0: + cpu_counts.append(int(cfs_quota / cfs_period)) + elif os_type_is_windows(): + # Workaround for Python bug with some pre-production CPU + try: + env_cpu_count = os.getenv('NUMBER_OF_PROCESSORS') + if env_cpu_count and env_cpu_count != cpu_counts[0]: + proc = subprocess.run( + 'powershell "$cs=Get-WmiObject -class Win32_ComputerSystem; '\ + '$cs.numberoflogicalprocessors"', + stdout=subprocess.PIPE, encoding='utf-8', shell=True, timeout=5, check=True) + cpu_counts[0] = int(proc.stdout) + except Exception: # pylint: disable=broad-except + pass + + return max(min(cpu_counts), 1) + + +class CoreInfo: + """Wrapper for getting cpu info""" + + def __init__(self): + self._log = logging.getLogger("sys_info.coreinfo") + + def _run_tool(self, cmd): + """ Run tool, return stdout or None if running is not successful. """ + + # pylint: disable=subprocess-run-check + + result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + if result.returncode: + self._log.warning(f"{cmd} running failed") + return None + return result.stdout.decode("utf-8") + + def _get_lscpu_info(self, cpu_property_name, regex): + """ Linux specific method. Run lscpu tool and parse its output. + Return extracted CPU property value in case of successful run, None otherwise. + + Refer https://man7.org/linux/man-pages/man1/lscpu.1.html for tool manual. + """ + cpu_prop_count = None + + stdout = self._run_tool([f"lscpu | grep '{cpu_property_name}'"]) + if stdout: + match = re.search(regex, stdout.rstrip()) + if match: + cpu_prop_count = int(match.group(1)) + + return cpu_prop_count + + def _get_coreinfo_info(self, cpu_property_opt, regex): + """ Windows specific method. Run coreinfo tool and parse its output. + Return extracted CPU property value in case of successful run, None otherwise. + + Refer https://docs.microsoft.com/en-us/sysinternals/downloads/coreinfo for tool manual. + """ + cpu_prop_count = 0 + + stdout = self._run_tool(["Coreinfo.exe", cpu_property_opt]) + if stdout: + for line in stdout.split("\n"): + if re.search(regex, line.rstrip()): + cpu_prop_count += 1 + return cpu_prop_count or None + + def get_cpu_cores(self): + """ Return the number of CPU cores """ + if os_type_is_linux(): + return self._get_lscpu_info(cpu_property_name="Core(s) per socket", regex=r"(\d+)$") + + if os_type_is_windows(): + return self._get_coreinfo_info(cpu_property_opt="-c", regex=r"Physical Processor (\d+)") + + self._log.warning(f"OS type '{get_os_type()}' is not currently supported") + return None + + def get_cpu_sockets(self): + """ Return the number of CPU sockets """ + if os_type_is_linux(): + return self._get_lscpu_info(cpu_property_name="Socket(s)", regex=r"(\d+)$") + + if os_type_is_windows(): + return self._get_coreinfo_info(cpu_property_opt="-s", regex=r"Socket (\d+)") + + self._log.warning(f"OS type '{get_os_type()}' is not currently supported") + return None + + def get_cpu_numa_nodes(self): + """ Return the number of CPU numa nodes """ + if os_type_is_linux(): + return self._get_lscpu_info(cpu_property_name="NUMA node(s)", regex=r"(\d+)$") + + if os_type_is_windows(): + return self._get_coreinfo_info(cpu_property_opt="-n", regex=r"NUMA Node (\d+)") + + self._log.warning(f"OS type '{get_os_type()}' is not currently supported") + return None + + +def get_cpu_max_instructions_set(): + """ Get CPU max instructions set """ + look_for = ['avx512vnni', 'avx512_vnni', 'avx512', 'avx2', 'sse4_2'] + for item in look_for: + for instruction in cpuinfo.get_cpu_info().get('flags', []): + if item in instruction: + return item + + return '' + + +def get_default_bf16_settings(): + """ Get default BF16 settings + We suppose that BF16 is enabled by default if platform supports BF16 (in other words if + avx512_bf16 is in instructions set) + :return: boolean, True if BF16 is enabled by default (e.g. CPX), False - otherwise + """ + for instruction in cpuinfo.get_cpu_info().get('flags', []): + if 'avx512_bf16' in instruction: + return True + return False + + +def get_sys_info(): + """Return dictionary with system information""" + return { + "hostname": get_host_name(), + "os": get_os(), + "os_name": get_os_name(), + "os_version": get_os_version(), + "cpu_info": get_cpu_name(), + "cpu_count": get_cpu_count(), + "cpu_cores": CoreInfo().get_cpu_cores(), + "cpu_sockets": CoreInfo().get_cpu_sockets(), + "cpu_numa_nodes": CoreInfo().get_cpu_numa_nodes(), + "cpu_max_instructions_set": get_cpu_max_instructions_set(), + "bf16_support": get_default_bf16_settings(), + } + +# Jenkins-related utils + + +def is_running_under_jenkins(): + """ Checks if running under Jenkins """ + return 'JENKINS_URL' in os.environ + + +def get_jenkins_url(): + """ Get Jenkins URL of the current job""" + return os.environ.get('BUILD_URL', '').rstrip('/') + + +def get_parent_jenkins_url(): + """ Get Jenkins URL of the parent job""" + return os.environ.get('PARENT_BUILD_URL', '').rstrip('/') + + +def get_mc_entrypoint_url(): + """Get Jenkins URL of the MC entrypoint job""" + return os.environ.get('MC_ROOT_JOB_URL', '').rstrip('/') + + +def get_jenkins_info(): + """Return dictionary with Jenkins information""" + return { + "jenkins_run": is_running_under_jenkins(), + "jenkins_url": get_jenkins_url(), + "parent_jenkins_url": get_parent_jenkins_url(), + } + + +def get_mc_jenkins_info(): + """Return dictionary with MC specific Jenkins information""" + return { + "jenkins_run": is_running_under_jenkins(), + "mc_task_url": get_jenkins_url(), + "mc_entrypoint_url": get_mc_entrypoint_url(), + } + + +def path_to_url(artifact_path, test_folder): + # TODO: USED BY OLD ACCURACY TESTS - TO REMOVE LOOKING FORWARD + """ Converts Jenkins artifact path to URL """ + if is_running_under_jenkins(): + work_dir = os.path.join(os.environ["WORKSPACE"], os.path.join('tests', test_folder)) + return os.path.join( + os.environ["BUILD_URL"], 'artifact', 'tests', test_folder, + os.path.relpath(artifact_path, work_dir)).replace( + '\\', '/') + return None + + +def path_to_url_new(log_name, log_path=None): + """ Converts Jenkins artifact path to URL - new infrastructure""" + if is_running_under_jenkins(): + log_path = os.environ["LOG_PATH"] if not log_path else log_path + return os.path.join( + os.environ["BUILD_URL"], + 'artifact/b/logs', + os.path.relpath(log_name, log_path) + ).replace('\\', '/') + return None diff --git a/tests/e2e_tests/common/table_utils.py b/tests/e2e_tests/common/table_utils.py new file mode 100644 index 00000000000000..1a1372e788ab52 --- /dev/null +++ b/tests/e2e_tests/common/table_utils.py @@ -0,0 +1,13 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Common table formatting/creation utils used across E2E tests framework.""" +#pylint:disable=import-error +from tabulate import tabulate + + +def make_table(*args, **kwargs): + """Wrapper function for `tabulate` to unify table styles across tests.""" + tablefmt = kwargs.pop('tablefmt', 'orgtbl') + table = tabulate(*args, tablefmt=tablefmt, **kwargs) + return table diff --git a/tests/e2e_tests/config.py b/tests/e2e_tests/config.py new file mode 100644 index 00000000000000..2f08cb467eb35b --- /dev/null +++ b/tests/e2e_tests/config.py @@ -0,0 +1,14 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import os + +from common import config + +""" TT_PRODUCT_VERSION_SUFFIX - Environment version suffix provided by user""" +product_version_suffix = os.environ.get("TT_PRODUCT_VERSION_SUFFIX", "e2e_tests") +config.product_version_suffix = product_version_suffix + +""" TT_REPOSITORY_NAME - repository name provided by user """ +repository_name = os.environ.get("TT_REPOSITORY_NAME", "openvino.test:e2e_tests") +config.repository_name = repository_name diff --git a/tests/e2e_tests/conftest.py b/tests/e2e_tests/conftest.py new file mode 100644 index 00000000000000..1ecd76979a56fa --- /dev/null +++ b/tests/e2e_tests/conftest.py @@ -0,0 +1,69 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pathlib +import sys + +from cpuinfo import get_cpu_info + +from e2e_tests.common.logger import get_logger +from .common.sys_info_utils import get_sys_info +from e2e_tests.test_utils.tf_helper import TFVersionHelper + +try: + # In user_config.py, user might export custom environment variables + from . import user_config + + print("Successfully imported user_config") +except ImportError: + pass + +from e2e_tests.common.plugins.common.conftest import * + +NODEID_TOKENS_RE = r"(?P.+?)::(?P.+?)\[(?P.+?)\]" +VR_FRIENDLY_NODEID = "{class_definition_path}::{class_name}::{func_name}[{args}]" + +sys.path.append(str(pathlib.Path(__file__).resolve().parents[1])) +logger = get_logger(__name__) + + +def pytest_configure(config): + sys_info = get_sys_info() + cpu_info = get_cpu_info() + + logger.info(f"System information: {sys_info}") + logger.info(f"CPU info: {cpu_info}") + # Fill environment section of HTML report with additional data + # config._metadata['INTERNAL_GFX_DRIVER_VERSION'] = os.getenv('INTERNAL_GFX_DRIVER_VERSION') + # Set TensorFlow models version with command line option value + tf_models_version = config.getoption("tf_models_version") + TFVersionHelper(tf_models_version) + + +@pytest.mark.hookwrapper +def pytest_runtest_makereport(item, call): + pytest_html = item.config.pluginmanager.getplugin('html') + report = (yield).get_result() + extra = getattr(report, 'extra', []) + ir_links = [] + if report.when == 'call': + ir_link = next((p[1] for p in report.user_properties if p[0] == "ir_link"), None) + if ir_link: + extra.append(pytest_html.extras.url(ir_link, name="xml")) + extra.append(pytest_html.extras.url(ir_link.replace(".xml", ".bin"), name="bin")) + extra.append(pytest_html.extras.url(ir_link.replace(".xml", ".mo_log.txt"), name="mo_log")) + + ir_links.append(f"xml") + ir_links.append(f"bin") + ir_links.append(f"mo_log") + if getattr(item._request, 'test_info', None): + item._request.test_info.update( + {"links": " ".join(ir_links), + "log": "\n\n\n".join([report.caplog, report.longreprtext]), + "insertTime": report.duration, + "duration": report.duration, + "result": report.outcome} + ) + report.extra = extra + diff --git a/tests/e2e_tests/env_config_local.yml b/tests/e2e_tests/env_config_local.yml new file mode 100644 index 00000000000000..5684e011d1637e --- /dev/null +++ b/tests/e2e_tests/env_config_local.yml @@ -0,0 +1,42 @@ +mo_out: /tmp/out_dir/ +pregen_irs_path: /tmp/out_dir/ +input_model_dir: /tmp/input_dir/ + +models: W:\models/models/model_downloader/ +test_data: test_data/inputs/ +references_repo: test_data/references/ +references: test_data/references/ + +# internal models location +caffe_internal_models: W:\models\internal\caffe\ +mxnet_internal_models: W:\models/models/internal/mxnet/ +tf_internal_models: \\ov-share-02.iotg.sclab.intel.com\data\vdp_tests\models/internal/tf/ +onnx_internal_models: W:\models/models/internal/onnx/ +onnx_internal_models_paddlepaddle: W:\models/models/internal/onnx/PaddlePaddle +onnx_small_models: W:\models/models/internal/onnx/small/ +paddlepaddle_internal_models: W:\models/models/internal/paddlepaddle + +# Kaldi models location +kaldi_models: \\ov-share-02.iotg.sclab.intel.com\data\vdp_tests\models/internal/kaldi + +# PyTorch specific environment +pytorch_models_path: W:\models/models/internal/pytorch/ +pytorch_pretrained_models_path: W:\models/models/internal/pytorch/pretrained/0.7.4 +pytorch_torchvision_models_path: W:\models/models/internal/pytorch/torchvision/0.2.1 +pytorch_timm_models_path: W:\models/models/internal/pytorch/timm +pytorch_hf_models_path: \\ov-share-02.iotg.sclab.intel.com\data\vdp_tests/models/internal/pytorch/huggingface +pytorch_to_onnx_dump_path: /tmp/pytorch_to_onnx_dump + +# private models location +private_models: \\ov-share-02.iotg.sclab.intel.com\data\vdp_tests\models/private/ + +# large models location +tf_large_models: /nfs/ov-share-05/data/chunk-01/openvino_models/models/tf/ +pytorch_large_models: /nfs/ov-share-05/data/chunk-01/openvino_models/models/pytorch/ + +# ICV models zoo location +icv_model_zoo_models: W:\models/models/icv_modelzoo/ + +omz_root: /localdisk/repos/open_model_zoo/ +omz_models_out: /tmp/omz_model_out +omz_downloader_cache: /tmp/omz_model_out/downloader_cache diff --git a/tests/e2e_tests/pipelines/pipeline_templates/collect_reference_templates.py b/tests/e2e_tests/pipelines/pipeline_templates/collect_reference_templates.py new file mode 100644 index 00000000000000..619a2d91f93625 --- /dev/null +++ b/tests/e2e_tests/pipelines/pipeline_templates/collect_reference_templates.py @@ -0,0 +1,146 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Common reference collection templates processed by testing framework. +""" +from collections import OrderedDict + +from e2e_tests.common.decorators import wrap_ord_dict +from e2e_tests.pipelines.pipeline_templates.postproc_template import assemble_postproc_tf +from e2e_tests.test_utils.path_utils import ref_from_model + + +def get_refs_onnx_runtime(model, onnx_rt_ep, inputs, cast_input_data=True, cast_type="float32"): + """ + Construct ONNX Runtime reference collection action. + + :param model: .onnx file + :param onnx_rt_ep: execution provider to infer model + :param inputs: input data for model + :param cast_input_data: whether cast or not input data to specific dtype + :param cast_type: type of data model input data cast to + :return: ONNX models "get_refs" action processed by testing framework + """ + return 'get_refs', {'score_onnx_runtime': {'model': model, 'onnx_rt_ep': onnx_rt_ep, + 'inputs': inputs, + 'cast_input_data': cast_input_data, + 'cast_input_data_to_type': cast_type}} + + +def get_refs_paddlepaddle(model, inputs, params_filename=None): + """ + Construct PaddlePaddle reference collection action. + + :param model: model file path which will be used in get_model() function + :param inputs: input data for model + :param params_filename: the name of single binary file to load all model parameters. + :return: PaddlePaddle models "get_refs" action processed by testing framework + """ + return 'get_refs', {'score_paddlepaddle': {'model': model, 'inputs': inputs, 'params_filename': params_filename}} + + +def get_refs_tf(inputs, model=None, output_nodes_for_freeze=None, additional_outputs=[], additional_inputs=[], + override_default_outputs=False, override_default_inputs=False, saved_model_dir=None, + user_output_node_names_list=[], score_class_name="score_tf"): + """ + Construct TensorFlow reference collection action. + + :param inputs: input data for model + :param model: .pb or .meta file with model + :param output_nodes_for_freeze: output nodes used for freeze input model before inference + :param score_class_name: "score_tf", "score_tf_dir", "score_tf_meta" - type of loading model + :return: TF models "get_refs" action processed by testing framework + """ + return "get_refs", {score_class_name: {"inputs": inputs, + "model": model, + "saved_model_dir": saved_model_dir, + "output_nodes_for_freeze": output_nodes_for_freeze, + "additional_outputs": additional_outputs, + "additional_inputs": additional_inputs, + "override_default_outputs": override_default_outputs, + "override_default_inputs": override_default_inputs, + "user_output_node_names_list": user_output_node_names_list}} + + +@wrap_ord_dict +def read_refs_pipeline(ref_file, batch): + """ + Construct Read pre-collected references pipeline + + :param ref_file: path to .npz file with pre-collected references + :param batch: target batch size + :return: OrderedDict with pipeline containing get_refs and postprocessor steps + """ + return [("get_refs", {"precollected": {"path": ref_file}}), + ("postprocessor", {"align_with_batch": {"batch": batch}})] + + +@wrap_ord_dict +def read_pytorch_refs_pipeline(ref_file, batch): + """ + Construct Read pre-collected references pipeline + + :param ref_file: path to .npz file with pre-collected references + :param batch: target batch size + :return: OrderedDict with pipeline containing get_refs and postprocessor steps + """ + return [("get_refs", {"torch_precollected": {"path": ref_file}}), + ("postprocessor", {"align_with_batch": {"batch": batch}})] + + +@wrap_ord_dict +def read_tf_refs_pipeline(ref_file, batch=None, align_with_batch_od=False, postprocessors=None): + """ + Construct Read pre-collected references pipeline + + :param ref_file: path to pre-collected references + :param batch: target batch size + :param align_with_batch_od: batch alignment preprocessor + :return: OrderedDict with pipeline containing get_refs and postprocessor steps + """ + if postprocessors is None: + postprocessors = {} + return [("get_refs", {"precollected": {"path": ref_from_model(ref_file, framework="tf")}}), + assemble_postproc_tf(batch=batch, align_with_batch_od=align_with_batch_od, **postprocessors)] + + +def collect_paddlepaddle_refs(model, inputs, params_filename=None, ref_name=None): + """Construct PaddlePaddle reference collection pipeline.""" + return {'pipeline': OrderedDict([ + get_refs_paddlepaddle(model=model, inputs=inputs, + params_filename=params_filename) + ]), + 'store_path': ref_from_model(ref_name, framework="paddlepaddle"), + 'store_path_for_ref_save': ref_from_model(ref_name, framework="paddlepaddle", check_empty_ref_path=False)} + + +def collect_tf_refs_pipeline(model, inputs, saved_model_dir=None, ref_name=None): + """Construct reference collection pipeline.""" + if not ref_name: + ref_name = model + return {'pipeline': OrderedDict([ + get_refs_tf(inputs=inputs, model=model) if saved_model_dir is None + else get_refs_tf(inputs=inputs, saved_model_dir=saved_model_dir) + ]), + 'store_path': ref_from_model(ref_name, framework="tf"), + 'store_path_for_ref_save': ref_from_model(ref_name, framework="tf", check_empty_ref_path=False)} + + +def collect_onnx_refs_pipeline(model, inputs, onnx_rt_ep, framework, cast_type="float32", h=None, w=None, + ref_name=None, preprocessors=None, batch=1, cast_input_data=True): + """Construct reference collection pipeline.""" + if not ref_name: + ref_name = model + return {'pipeline': OrderedDict([ + get_refs_onnx_runtime(model=model, inputs=inputs, onnx_rt_ep=onnx_rt_ep, cast_type=cast_type, + cast_input_data=cast_input_data) + ]), + 'store_path': ref_from_model(ref_name, framework=framework), + 'store_path_for_ref_save': ref_from_model(ref_name, framework=framework, check_empty_ref_path=False)} + + +def get_refs_tf_hub(model, inputs): + """ + Construct TensorFlow Hub reference collection action. + """ + return "get_refs_tf_hub", {'score_tf_hub': {}} diff --git a/tests/e2e_tests/pipelines/pipeline_templates/comparators_template.py b/tests/e2e_tests/pipelines/pipeline_templates/comparators_template.py new file mode 100644 index 00000000000000..4bf6d5aa6af19f --- /dev/null +++ b/tests/e2e_tests/pipelines/pipeline_templates/comparators_template.py @@ -0,0 +1,119 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from e2e_tests.common.decorators import wrap_ord_dict + + +@wrap_ord_dict +def classification_comparators(device, postproc=None, target_layers=None, precision=None, a_eps=None, r_eps=None, + ntop=10): + if postproc is None: + postproc = {} + return [("classification", {"device": device, + "ntop": ntop, + "precision": precision, + "a_eps": a_eps, + "r_eps": r_eps, + "postprocessors": postproc, + "target_layers": target_layers + } + ), + ("eltwise", {"device": device, + "a_eps": a_eps, + "r_eps": r_eps, + "precision": precision, + "target_layers": target_layers, + "ignore_results": True} + )] + + +@wrap_ord_dict +def object_detection_comparators(device, postproc=None, precision=None, a_eps=None, r_eps=None, p_thr=0.5, iou_thr=None, + mean_only_iou=False, target_layers=None): + if postproc is None: + postproc = {} + return "object_detection", {"device": device, + "p_thr": p_thr, + "a_eps": a_eps, + "r_eps": r_eps, + "precision": precision, + "iou_thr": iou_thr, + "postprocessors": postproc, + "mean_only_iou": mean_only_iou, + "target_layers": target_layers + } + + +@wrap_ord_dict +def eltwise_comparators(device, postproc=None, precision=None, a_eps=None, r_eps=None, + target_layers=None, ignore_results=False, mean_r_eps=None): + if postproc is None: + postproc = {} + return "eltwise", {"device": device, + "a_eps": a_eps, + "r_eps": r_eps, + "mean_r_eps": mean_r_eps, + "precision": precision, + "postprocessors": postproc, + "ignore_results": ignore_results, + "target_layers": target_layers + } + + + + +@wrap_ord_dict +def segmentation_comparators(device, postproc=None, precision=None, thr=None, target_layers=None): + if postproc is None: + postproc = {} + return "semantic_segmentation", {"device": device, + "thr": thr, + "postprocessors": postproc, + "target_layers": target_layers, + "precision": precision + } + + +@wrap_ord_dict +def dummy_comparators(): + return "dummy", {} + + +@wrap_ord_dict +def ssim_comparators(device, postproc=None, precision=None, thr=None, target_layers=None): + if postproc is None: + postproc = {} + return "ssim", {"device": device, + "thr": thr, + "precision": precision, + "postprocessors": postproc, + "ignore_results": False, + "target_layers": target_layers + } + + +@wrap_ord_dict +def ssim_4d_comparators(device, postproc=None, precision=None, thr=None, target_layers=None, win_size=None): + if postproc is None: + postproc = {} + return "ssim_4d", {"device": device, + "ssim_4d_thr": thr, + "precision": precision, + "postprocessors": postproc, + "ignore_results": False, + "target_layers": target_layers, + "win_size": win_size + } + + +@wrap_ord_dict +def ocr_comparators(device, postproc=None, precision=None, target_layers=None, top_paths=10, beam_width=10): + if postproc is None: + postproc = {} + return "ocr", {"device": device, + "precision": precision, + "postprocessors": postproc, + "target_layers": target_layers, + "top_paths": top_paths, + "beam_width": beam_width + } diff --git a/tests/e2e_tests/pipelines/pipeline_templates/infer_templates.py b/tests/e2e_tests/pipelines/pipeline_templates/infer_templates.py new file mode 100644 index 00000000000000..91fe469d1aff5d --- /dev/null +++ b/tests/e2e_tests/pipelines/pipeline_templates/infer_templates.py @@ -0,0 +1,36 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +def batched_common_infer_step(device, **additional_args): + # return 'reshape' method for setting batch because old 'batch_size' method still not implemented in new API + return "infer", {"ie_sync": {"device": device}, + **additional_args} + + +def common_infer_step(device, **additional_args): + return "infer", {"ie_sync": {"device": device, + "network_modifiers": {}, + **additional_args}} + + +def batch_reshape_infer_step(device, **additional_args): + return "infer", {"ie_sync": {"device": device}, + **additional_args} + + +def reshape_input_shape_infer_step(device, input_file_path, **additional_args): + return "infer", {"ie_sync": {"device": device, + "network_modifiers": + {"reshape_input_shape": {"input_path": input_file_path}}, + **additional_args}} + + +def cpu_extension_infer_step(device, **additional_args): + return "infer", {"ie_sync": {"device": device, "cpu_extension": "cpu_extension"}, + **additional_args} + + +def sequence_infer_step(device, **additional_args): + # return 'reshape' method for setting batch because old 'batch_size' method still not implemented in new API + return "infer", {"ie_sequence": {"device": device}, + **additional_args} diff --git a/tests/e2e_tests/pipelines/pipeline_templates/input_templates.py b/tests/e2e_tests/pipelines/pipeline_templates/input_templates.py new file mode 100644 index 00000000000000..ea5dc7c5fdf8c0 --- /dev/null +++ b/tests/e2e_tests/pipelines/pipeline_templates/input_templates.py @@ -0,0 +1,33 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +def read_npz_input(path): + return "read_input", {"npz": {"path": path}} + + +def read_npy_input(path): + return "read_input", {"npy": {"inputs_map": path}} + + +def read_ark_input(path): + return "read_input", {"ark": {"inputs_map": path}} + + +def read_img_input(path): + return "read_input", {"img": {"inputs_map": path}} + + +def read_external_input(path): + return "read_input", {"external_data": {"data": path}} + + +def read_pb_input(path): + return "read_input", {"pb": {"inputs_map": path}} + + +def read_pt_input(path): + return "read_input", {"pt": {"path": path}} + + +def generate_tf_hub_inputs(model): + return {"read_input": {"generate_tf_hub_inputs": {"model": model}}} diff --git a/tests/e2e_tests/pipelines/pipeline_templates/ir_gen_templates.py b/tests/e2e_tests/pipelines/pipeline_templates/ir_gen_templates.py new file mode 100644 index 00000000000000..c99a82be9b0a22 --- /dev/null +++ b/tests/e2e_tests/pipelines/pipeline_templates/ir_gen_templates.py @@ -0,0 +1,16 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from pathlib import Path + + +def common_ir_generation(mo_out, precision, **kwargs): + return ("get_ir", {"get_ovc_model": {"mo_out": mo_out, + "precision": precision, + "additional_args": kwargs}}) + + +def ir_pregenerated(xml, bin=None): + if not bin: + bin = str(Path(xml).with_suffix(".bin")) + return "get_ir", {"pregenerated": {"xml": xml, "bin": bin}} diff --git a/tests/e2e_tests/pipelines/pipeline_templates/model_loader_templates.py b/tests/e2e_tests/pipelines/pipeline_templates/model_loader_templates.py new file mode 100644 index 00000000000000..fb3d1dd94d39a5 --- /dev/null +++ b/tests/e2e_tests/pipelines/pipeline_templates/model_loader_templates.py @@ -0,0 +1,40 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +def pytorch_loader(model_name=None, + weights=None, + output_file=None, + model_path=None, + import_module=None, + model_param=None, + inputs_order=None, + torch_export_method='', + torch_model_zoo_path='', + model_class_path='', + loader_timeout=300): + return "load_model", {"load_pytorch_model": { + "model-name": model_name, + "weights": weights, + "output-file": output_file, + "model-path": model_path, + "import-module": import_module, + "model-param": model_param, + "torch_export_method": torch_export_method, + "inputs_order": inputs_order, + "torch_model_zoo_path": torch_model_zoo_path, + "model_class_path": model_class_path, + "loader_timeout": loader_timeout, + }} + + +def custom_pytorch_loader(func: callable, *args, **kwargs): + return 'load_model', { + 'custom_pytorch_model_loader': {"execution_function": func(*args, **kwargs)}} + + +def tf_hub_loader(model_name=None, + model_link=None): + return "tf_hub_load_model", {"load_tf_hub_model": { + "model_name": model_name, + 'model_link': model_link, + }} diff --git a/tests/e2e_tests/pipelines/pipeline_templates/postproc_template.py b/tests/e2e_tests/pipelines/pipeline_templates/postproc_template.py new file mode 100644 index 00000000000000..3204dede87d994 --- /dev/null +++ b/tests/e2e_tests/pipelines/pipeline_templates/postproc_template.py @@ -0,0 +1,103 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""ing templates processed by testing framework. +""" +from collections import OrderedDict + +from e2e_tests.common.decorators import wrap_ord_dict + + +@wrap_ord_dict +def squeeze(axis=(2, 3)): + """Construct squeezing action. + + :param axis: axis along which squeezing is performed, defaults to (2, 3) + :return: "squeeze" action processed by testing framework + """ + return "squeeze", {"axis": axis} + + +@wrap_ord_dict +def parse_object_detection(): + """Construct object detection parsing action.""" + return "parse_object_detection", {} + + +def align_with_batch_postprocess(): + """Construct align_with_batch postprocess.""" + return "parse_object_detection", {} + + +@wrap_ord_dict +def parse_semantic_segmentation(): + """Construct object detection parsing action.""" + return "parse_semantic_segmentation", {} + + +@wrap_ord_dict +def parse_image_modification(): + """Construct object detection parsing action.""" + return "parse_image_modification", {} + + +@wrap_ord_dict +def parse_classification(labels_offset=0, target_layers=None): + """Construct classification parsing action. + + :param labels_offset: offset to be used during results parsing. i.e. + imagenet classification model can return 1001 class + where class 0 represents "background", one can + specify labels_offset=1 in order to cut the + "background" class (1001 -> 1000), defaults to 0 + :return: "parse_classification" action processed by testing framework. + """ + return "parse_classification", {"labels_offset": labels_offset, 'target_layers': target_layers} + + +@wrap_ord_dict +def squeeze_and_parse_classification(axis=(2, 3), labels_offset=0): + """Construct squeeze and parse classification actions in a single pipeline. + + :param axis: axis along which squeezing is performed, defaults to (2, 3) + :param labels_offset: offset to be used during results parsing. i.e. + imagenet classification model can return 1001 class + where class 0 represents "background", one can + specify labels_offset=1 in order to cut the + "background" class (1001 -> 1000), defaults to 0 + :return: "squeeze" and "parse_classification" action processed by testing + framework + """ + return [squeeze.unwrap(axis=axis), parse_classification.unwrap(labels_offset=labels_offset)] + + +def assemble_postproc_tf(batch=None, align_with_batch_od=False, **kwargs): + """Add mxnet-specific postprocessing. Pass rest of arguments as is. + + :return: "postprocess" step with MXNet specific actions + """ + steps = [] + if batch is not None and align_with_batch_od: + steps.append(("align_with_batch_od", {"batch": batch})) + elif batch is not None: + steps.append(("align_with_batch", {"batch": batch})) + for preproc, config in kwargs.items(): + steps.append((preproc, config)) + + return "postprocessor", dict(steps) + + +def paddlepaddle_od_postproc(target_layers=None): + """Construct PaddlePaddle object detection parsing actions.""" + if target_layers is None: + target_layers = {} + return OrderedDict([ + ("mxnet_to_common_od_format", {"target_layers": target_layers}), # PDPD has the same OD format as MXNET + ("parse_object_detection", {"target_layers": target_layers}) + ]) + + +@wrap_ord_dict +def parse_image_modification(): + """Construct object detection parsing action.""" + return "parse_image_modification", {} diff --git a/tests/e2e_tests/pipelines/pipeline_templates/preproc_templates.py b/tests/e2e_tests/pipelines/pipeline_templates/preproc_templates.py new file mode 100644 index 00000000000000..be1661a457c356 --- /dev/null +++ b/tests/e2e_tests/pipelines/pipeline_templates/preproc_templates.py @@ -0,0 +1,190 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Preprocessing templates processed by testing framework. +""" + +from collections import OrderedDict + + +def assemble_preproc(batch=None, + h=None, + w=None, + resize_mode="nearest", + mean=None, + mean_file=None, + scale_factor=None, + normalization_factor=None, + reverse_channels=None, + permute_order=None, + expand_dims=True, + target_layers=None, + layers_to_expand=None, + layers_not_to_expand=None, + add_layer_to_input_data=None, + remove_layers_from_input_data=None, + slice_length=None, + cast_data_type=None, + rename_inputs=None, + grayscale=None, + convert_to_torch=None, + names_to_indices=None, + assign_indices=None, + **kwargs): + """ + Construct data preprocessing pipeline given basic values. + + :param batch: Data batch + :param h: Data height + :param w: Data width + :param resize_mode: Interpolation + :param mean: Mean tuple (for 3 channels: x, y, z) to subtract from data. + Mutually exclusive with `mean_file` + :param mean_file: Mean file to subtract from data. Mutually exclusive + with `mean` + :param grayscale: Grayscale data + :param cast_data_type: Converts data type preprocessor + :param scale_factor: Data scaling factor + :param normalization_factor: Data normalization factor + :param reverse_channels: Apply reversing of channels (for images: + RGB<->BGR) + :param permute_order: Permute data shape. (i.e. from HWC:(0, 1, 2) to + CHW:(2, 0, 1) where H is height, W is width, + C is depth(channels)) + :param expand_dims: Indicates whether to expand 0th dimension + :param target_layers: Target layers to apply preprocessing to + :param slice_length: Updates data through slice + :param layers_to_expand: Layers to apply preprocessor which allows expand dims + :param layers_not_to_expand: Layers to disable preprocessor which allows expand dims + :param add_layer_to_input_data: Add new input to input dictionary loaded from npz + :param remove_layers_from_input_data: Delete input from input dictionary loaded from npz + :param rename_inputs: Rename data layers of format: list of + (old name, new name) + :param convert_to_torch: convert inputs to torch.Tensor format + :param names_to_indices: whether convert input names to indices + :param assign_indices: whether assign indices as input names for tensors + :return: "preprocess" step parsed by testing framework + """ + + def step_include(value): + if not value: + return False + else: + return True + + steps = [] + + if step_include(assign_indices): + steps.append(("assign_indices", { + "target_layers": target_layers + })) + + if step_include(remove_layers_from_input_data): + steps.append(("remove_layers_from_input_data", { + "target_layers": remove_layers_from_input_data + })) + + if h and w: + steps.append(("resize", { + "height": h, + "width": w, + "mode=": resize_mode, + "target_layers": target_layers + })) + + if step_include(mean) and step_include(mean_file): + raise AttributeError('both mean and mean file options specified') + elif step_include(mean): + steps.append(("subtract_mean_values", { + "mean_values": mean, + "target_layers": target_layers + })) + elif step_include(mean_file): + steps.append(("subtract_mean_values_file", { + "mean_file": mean_file, + "target_layers": target_layers + })) + + if grayscale == dict(): + steps.append(("grayscale", grayscale)) + + if step_include(scale_factor) and step_include(normalization_factor): + raise AttributeError( + 'both scale and normalization factors are specified') + elif step_include(normalization_factor): + steps.append(("normalize", { + "factor": normalization_factor, + "target_layers": target_layers + })) + elif step_include(scale_factor): + steps.append(("scale", { + "factor": scale_factor, + "target_layers": target_layers + })) + + if step_include(cast_data_type): + steps.append(("cast_data_type", {"target_data_type": cast_data_type})) + + if step_include(slice_length): + steps.append(("slice_data", {"slice": slice(None, slice_length, None)})) + + if step_include(reverse_channels): + steps.append(("reverse_channels", {"target_layers": target_layers})) + + if step_include(permute_order): + steps.append(("permute_shape", { + "order": permute_order, + "target_layers": target_layers + })) + + if step_include(batch): + steps.append(("align_with_batch", { + "batch": batch, + "expand_dims": expand_dims, + "target_layers": target_layers + })) + + if step_include(layers_to_expand) and step_include(layers_not_to_expand): + steps.append(("align_with_batch_dif", { + "batch": batch, + "layers_to_expand": layers_to_expand, + "layers_not_to_expand": layers_not_to_expand + })) + + if step_include(add_layer_to_input_data): + steps.append(("add_layer_to_input_data", { + "layer_data": add_layer_to_input_data + })) + + for preproc, config in kwargs.items(): + steps.append((preproc, config)) + + # must be the last step + # otherwise, preprocessors that depend on target_layers might fail + if step_include(rename_inputs): + steps.append(("rename_inputs", {"rename_input_pairs": rename_inputs})) + + if step_include(convert_to_torch): + steps.append(("convert_to_torch", { + "target_layers": target_layers + })) + + if step_include(names_to_indices): + steps.append(("names_to_indices", { + "target_layers": target_layers + })) + + return 'preprocess', OrderedDict(steps) + + +def assemble_preproc_tf(*args, **kwargs): + """Add tensorflow-specific preprocessings. Pass rest of arguments as is. + + :return: "preprocess" step with TensorFlow specific actions + """ + + order = kwargs.pop('permute_order', None) + reverse = kwargs.pop('reverse_channels', True) + + return assemble_preproc(*args, permute_order=order, reverse_channels=reverse, **kwargs) + diff --git a/tests/e2e_tests/pipelines/pipeline_templates/pytorch_to_onnx_converter_template.py b/tests/e2e_tests/pipelines/pipeline_templates/pytorch_to_onnx_converter_template.py new file mode 100644 index 00000000000000..259c326a248529 --- /dev/null +++ b/tests/e2e_tests/pipelines/pipeline_templates/pytorch_to_onnx_converter_template.py @@ -0,0 +1,33 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +def convert_pytorch_to_onnx(model_name=None, + weights=None, + input_shapes=None, + output_file=None, + model_path=None, + import_module=None, + input_names=None, + output_names=None, + model_param=None, + inputs_dtype=None, + conversion_param=None, + opset_version=None, + torch_model_zoo_path='', + converter_timeout=300): + return "pytorch_to_onnx", {"convert_pytorch_to_onnx": { + "model-name": model_name, + "weights": weights, + "input-shapes": input_shapes, + "output-file": output_file, + "model-path": model_path, + "import-module": import_module, + "input-names": input_names, + "output-names": output_names, + "model-param": model_param, + "inputs-dtype": inputs_dtype, + "conversion-param": conversion_param, + "opset_version": opset_version, + "torch_model_zoo_path": torch_model_zoo_path, + "converter_timeout": converter_timeout, + }} diff --git a/tests/e2e_tests/pipelines/tf_hub/nightly.yml b/tests/e2e_tests/pipelines/tf_hub/nightly.yml new file mode 100644 index 00000000000000..074ee20fcc0a9c --- /dev/null +++ b/tests/e2e_tests/pipelines/tf_hub/nightly.yml @@ -0,0 +1,689 @@ +universal-sentence-encoder,https://tfhub.dev/google/universal-sentence-encoder/4?tf-hub-format=compressed +imagenet/mobilenet_v1_100_224/classification,https://tfhub.dev/google/imagenet/mobilenet_v1_100_224/classification/5?tf-hub-format=compressed +imagenet/mobilenet_v2_100_224/classification,https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/classification/5?tf-hub-format=compressed +universal-sentence-encoder-multilingual,https://tfhub.dev/google/universal-sentence-encoder-multilingual/3?tf-hub-format=compressed +universal-sentence-encoder-large,https://tfhub.dev/google/universal-sentence-encoder-large/5?tf-hub-format=compressed +imagenet/mobilenet_v2_075_224/classification,https://tfhub.dev/google/imagenet/mobilenet_v2_075_224/classification/5?tf-hub-format=compressed +movenet/singlepose/lightning,https://tfhub.dev/google/movenet/singlepose/lightning/4?tf-hub-format=compressed +imagenet/mobilenet_v1_025_224/classification,https://tfhub.dev/google/imagenet/mobilenet_v1_025_224/classification/5?tf-hub-format=compressed +bert_en_uncased_preprocess,https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3?tf-hub-format=compressed +tf2-preview/mobilenet_v2/feature_vector,https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4?tf-hub-format=compressed +magenta/arbitrary-image-stylization-v1-256,https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2?tf-hub-format=compressed +nnlm-en-dim128,https://tfhub.dev/google/nnlm-en-dim128/2?tf-hub-format=compressed +bert_en_uncased_L-12_H-768_A-12,https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/4?tf-hub-format=compressed +bert_uncased_L-12_H-768_A-12,https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1?tf-hub-format=compressed +elmo,https://tfhub.dev/google/elmo/3?tf-hub-format=compressed +universal-sentence-encoder-multilingual-large,https://tfhub.dev/google/universal-sentence-encoder-multilingual-large/3?tf-hub-format=compressed +small_bert/bert_en_uncased_L-4_H-256_A-4,https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-256_A-4/2?tf-hub-format=compressed +imagenet/resnet_v2_50/feature_vector,https://tfhub.dev/google/imagenet/resnet_v2_50/feature_vector/5?tf-hub-format=compressed +spice,https://tfhub.dev/google/spice/2?tf-hub-format=compressed +movenet/singlepose/thunder,https://tfhub.dev/google/movenet/singlepose/thunder/4?tf-hub-format=compressed +yamnet,https://tfhub.dev/google/yamnet/1?tf-hub-format=compressed +tf2-preview/nnlm-en-dim128,https://tfhub.dev/google/tf2-preview/nnlm-en-dim128/1?tf-hub-format=compressed +universal-sentence-encoder-lite,https://tfhub.dev/google/universal-sentence-encoder-lite/2?tf-hub-format=compressed +tf2-preview/mobilenet_v2/classification,https://tfhub.dev/google/tf2-preview/mobilenet_v2/classification/4?tf-hub-format=compressed +imagenet/inception_v3/feature_vector,https://tfhub.dev/google/imagenet/inception_v3/feature_vector/5?tf-hub-format=compressed +imagenet/mobilenet_v2_140_224/classification,https://tfhub.dev/google/imagenet/mobilenet_v2_140_224/classification/5?tf-hub-format=compressed +efficientnet/lite0/feature-vector,https://tfhub.dev/tensorflow/efficientnet/lite0/feature-vector/2?tf-hub-format=compressed +i3d-kinetics-400,https://tfhub.dev/deepmind/i3d-kinetics-400/1?tf-hub-format=compressed +imagenet/mobilenet_v2_035_224/classification,https://tfhub.dev/google/imagenet/mobilenet_v2_035_224/classification/5?tf-hub-format=compressed +tf2-preview/gnews-swivel-20dim,https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1?tf-hub-format=compressed +faster_rcnn/openimages_v4/inception_resnet_v2,https://tfhub.dev/google/faster_rcnn/openimages_v4/inception_resnet_v2/1?tf-hub-format=compressed +imagenet/mobilenet_v2_140_224/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v2_140_224/feature_vector/5?tf-hub-format=compressed +imagenet/mobilenet_v2_100_224/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/feature_vector/5?tf-hub-format=compressed +bert_en_cased_L-12_H-768_A-12,https://tfhub.dev/tensorflow/bert_en_cased_L-12_H-768_A-12/4?tf-hub-format=compressed +universal-sentence-encoder-cmlm/en-base,https://tfhub.dev/google/universal-sentence-encoder-cmlm/en-base/1?tf-hub-format=compressed +openimages_v4/ssd/mobilenet_v2,https://tfhub.dev/google/openimages_v4/ssd/mobilenet_v2/1?tf-hub-format=compressed +efficientnet/lite0/classification,https://tfhub.dev/tensorflow/efficientnet/lite0/classification/2?tf-hub-format=compressed +universal-sentence-encoder-xling-many,https://tfhub.dev/google/universal-sentence-encoder-xling-many/1?tf-hub-format=compressed +imagenet/mobilenet_v3_large_100_224/classification,https://tfhub.dev/google/imagenet/mobilenet_v3_large_100_224/classification/5?tf-hub-format=compressed +nnlm-en-dim128-with-normalization,https://tfhub.dev/google/nnlm-en-dim128-with-normalization/2?tf-hub-format=compressed +nnlm-en-dim50,https://tfhub.dev/google/nnlm-en-dim50/2?tf-hub-format=compressed +movenet/multipose/lightning,https://tfhub.dev/google/movenet/multipose/lightning/1?tf-hub-format=compressed +efficientdet/lite0/feature-vector,https://tfhub.dev/tensorflow/efficientdet/lite0/feature-vector/1?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet1k_b0/feature_vector,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_b0/feature_vector/2?tf-hub-format=compressed +small_bert/bert_en_uncased_L-4_H-512_A-8,https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-512_A-8/2?tf-hub-format=compressed +universal-sentence-encoder-cmlm/multilingual-preprocess,https://tfhub.dev/google/universal-sentence-encoder-cmlm/multilingual-preprocess/2?tf-hub-format=compressed +vggish,https://tfhub.dev/google/vggish/1?tf-hub-format=compressed +bert_multi_cased_L-12_H-768_A-12,https://tfhub.dev/tensorflow/bert_multi_cased_L-12_H-768_A-12/4?tf-hub-format=compressed +imagenet/mobilenet_v2_130_224/classification,https://tfhub.dev/google/imagenet/mobilenet_v2_130_224/classification/5?tf-hub-format=compressed +tf2-preview/inception_v3/feature_vector,https://tfhub.dev/google/tf2-preview/inception_v3/feature_vector/4?tf-hub-format=compressed +mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT,https://tfhub.dev/google/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT/1?tf-hub-format=compressed +bert_en_uncased_L-24_H-1024_A-16,https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/4?tf-hub-format=compressed +imagenet/mobilenet_v3_small_100_224/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v3_small_100_224/feature_vector/5?tf-hub-format=compressed +efficientdet/lite0/detection,https://tfhub.dev/tensorflow/efficientdet/lite0/detection/1?tf-hub-format=compressed +small_bert/bert_en_uncased_L-2_H-128_A-2,https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-128_A-2/2?tf-hub-format=compressed +albert_base,https://tfhub.dev/google/albert_base/3?tf-hub-format=compressed +nnlm-ja-dim128,https://tfhub.dev/google/nnlm-ja-dim128/2?tf-hub-format=compressed +universal-sentence-encoder-multilingual-qa,https://tfhub.dev/google/universal-sentence-encoder-multilingual-qa/3?tf-hub-format=compressed +nnlm-ja-dim128-with-normalization,https://tfhub.dev/google/nnlm-ja-dim128-with-normalization/2?tf-hub-format=compressed +LaBSE,https://tfhub.dev/google/LaBSE/2?tf-hub-format=compressed +nnlm-en-dim50-with-normalization,https://tfhub.dev/google/nnlm-en-dim50-with-normalization/2?tf-hub-format=compressed +resnet_50/feature_vector,https://tfhub.dev/tensorflow/resnet_50/feature_vector/1?tf-hub-format=compressed +universal-sentence-encoder-qa,https://tfhub.dev/google/universal-sentence-encoder-qa/3?tf-hub-format=compressed +biggan-deep-256,https://tfhub.dev/deepmind/biggan-deep-256/1?tf-hub-format=compressed +efficientdet/lite2/detection,https://tfhub.dev/tensorflow/efficientdet/lite2/detection/1?tf-hub-format=compressed +imagenet/mobilenet_v2_050_224/classification,https://tfhub.dev/google/imagenet/mobilenet_v2_050_224/classification/5?tf-hub-format=compressed +mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT/squadv1,https://tfhub.dev/google/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT/squadv1/1?tf-hub-format=compressed +delf,https://tfhub.dev/google/delf/1?tf-hub-format=compressed +ssd_mobilenet_v2,https://tfhub.dev/tensorflow/ssd_mobilenet_v2/2?tf-hub-format=compressed +imagenet/mobilenet_v3_large_075_224/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v3_large_075_224/feature_vector/5?tf-hub-format=compressed +centernet/hourglass_512x512_kpts,https://tfhub.dev/tensorflow/centernet/hourglass_512x512_kpts/1?tf-hub-format=compressed +bert_cased_L-12_H-768_A-12,https://tfhub.dev/google/bert_cased_L-12_H-768_A-12/1?tf-hub-format=compressed +biggan-512,https://tfhub.dev/deepmind/biggan-512/2?tf-hub-format=compressed +tf2-preview/gnews-swivel-20dim-with-oov,https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim-with-oov/1?tf-hub-format=compressed +tf2-preview/nnlm-es-dim50-with-normalization,https://tfhub.dev/google/tf2-preview/nnlm-es-dim50-with-normalization/1?tf-hub-format=compressed +efficientnet/b4/feature-vector,https://tfhub.dev/tensorflow/efficientnet/b4/feature-vector/1?tf-hub-format=compressed +enformer,https://tfhub.dev/deepmind/enformer/1?tf-hub-format=compressed +efficientnet/b0/feature-vector,https://tfhub.dev/tensorflow/efficientnet/b0/feature-vector/1?tf-hub-format=compressed +efficientnet/b0/feature-vector,https://tfhub.dev/google/efficientnet/b0/feature-vector/1?tf-hub-format=compressed +aiy/vision/classifier/food_V1,https://tfhub.dev/google/aiy/vision/classifier/food_V1/1?tf-hub-format=compressed +albert_en_base,https://tfhub.dev/tensorflow/albert_en_base/3?tf-hub-format=compressed +imagenet/inception_v3/classification,https://tfhub.dev/google/imagenet/inception_v3/classification/5?tf-hub-format=compressed +nonsemantic-speech-benchmark/trill-distilled,https://tfhub.dev/google/nonsemantic-speech-benchmark/trill-distilled/3?tf-hub-format=compressed +efficientnet/lite4/feature-vector,https://tfhub.dev/tensorflow/efficientnet/lite4/feature-vector/2?tf-hub-format=compressed +resnet_50/classification,https://tfhub.dev/tensorflow/resnet_50/classification/1?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet21k_ft1k_m/feature_vector,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_m/feature_vector/2?tf-hub-format=compressed +imagenet/inception_v1/classification,https://tfhub.dev/google/imagenet/inception_v1/classification/5?tf-hub-format=compressed +bit/m-r50x1,https://tfhub.dev/google/bit/m-r50x1/1?tf-hub-format=compressed +progan-128,https://tfhub.dev/google/progan-128/1?tf-hub-format=compressed +biggan-256,https://tfhub.dev/deepmind/biggan-256/2?tf-hub-format=compressed +nnlm-de-dim50-with-normalization,https://tfhub.dev/google/nnlm-de-dim50-with-normalization/2?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet21k_b0/feature_vector,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_b0/feature_vector/2?tf-hub-format=compressed +bert_multi_cased_preprocess,https://tfhub.dev/tensorflow/bert_multi_cased_preprocess/3?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet21k_xl/feature_vector,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_xl/feature_vector/2?tf-hub-format=compressed +efficientnet/lite2/feature-vector,https://tfhub.dev/tensorflow/efficientnet/lite2/feature-vector/2?tf-hub-format=compressed +tfgan/eval/inception,https://tfhub.dev/tensorflow/tfgan/eval/inception/1?tf-hub-format=compressed +efficientdet/lite2/feature-vector,https://tfhub.dev/tensorflow/efficientdet/lite2/feature-vector/1?tf-hub-format=compressed +albert_lite_base,https://tfhub.dev/tensorflow/albert_lite_base/1?tf-hub-format=compressed +efficientnet/lite1/feature-vector,https://tfhub.dev/tensorflow/efficientnet/lite1/feature-vector/2?tf-hub-format=compressed +imagenet/mobilenet_v3_large_100_224/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v3_large_100_224/feature_vector/5?tf-hub-format=compressed +efficientnet/lite4/classification,https://tfhub.dev/tensorflow/efficientnet/lite4/classification/2?tf-hub-format=compressed +Wiki-words-250,https://tfhub.dev/google/Wiki-words-250/2?tf-hub-format=compressed +efficientnet/lite3/feature-vector,https://tfhub.dev/tensorflow/efficientnet/lite3/feature-vector/2?tf-hub-format=compressed +imagenet/inception_resnet_v2/feature_vector,https://tfhub.dev/google/imagenet/inception_resnet_v2/feature_vector/5?tf-hub-format=compressed +efficientdet/d0,https://tfhub.dev/tensorflow/efficientdet/d0/1?tf-hub-format=compressed +tfgan/eval/mnist/logits,https://tfhub.dev/tensorflow/tfgan/eval/mnist/logits/1?tf-hub-format=compressed +albert_en_preprocess,https://tfhub.dev/tensorflow/albert_en_preprocess/3?tf-hub-format=compressed +bert_zh_L-12_H-768_A-12,https://tfhub.dev/tensorflow/bert_zh_L-12_H-768_A-12/4?tf-hub-format=compressed +efficientdet/lite4/feature-vector,https://tfhub.dev/tensorflow/efficientdet/lite4/feature-vector/2?tf-hub-format=compressed +imagenet/mobilenet_v1_100_224/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v1_100_224/feature_vector/5?tf-hub-format=compressed +bert_chinese_L-12_H-768_A-12,https://tfhub.dev/google/bert_chinese_L-12_H-768_A-12/1?tf-hub-format=compressed +mask_rcnn/inception_resnet_v2_1024x1024,https://tfhub.dev/tensorflow/mask_rcnn/inception_resnet_v2_1024x1024/1?tf-hub-format=compressed +object_detection/mobile_object_localizer_v1,https://tfhub.dev/google/object_detection/mobile_object_localizer_v1/1?tf-hub-format=compressed +efficientdet/lite3/detection,https://tfhub.dev/tensorflow/efficientdet/lite3/detection/1?tf-hub-format=compressed +tf2-preview/inception_v3/classification,https://tfhub.dev/google/tf2-preview/inception_v3/classification/4?tf-hub-format=compressed +imagenet/mobilenet_v2_035_96/classification,https://tfhub.dev/google/imagenet/mobilenet_v2_035_96/classification/5?tf-hub-format=compressed +faster_rcnn/inception_resnet_v2_640x640,https://tfhub.dev/tensorflow/faster_rcnn/inception_resnet_v2_640x640/1?tf-hub-format=compressed +imagenet/resnet_v2_50/classification,https://tfhub.dev/google/imagenet/resnet_v2_50/classification/5?tf-hub-format=compressed +aiy/vision/classifier/birds_V1,https://tfhub.dev/google/aiy/vision/classifier/birds_V1/1?tf-hub-format=compressed +MuRIL,https://tfhub.dev/google/MuRIL/1?tf-hub-format=compressed +efficientdet/lite1/feature-vector,https://tfhub.dev/tensorflow/efficientdet/lite1/feature-vector/1?tf-hub-format=compressed +random-nnlm-en-dim128,https://tfhub.dev/google/random-nnlm-en-dim128/1?tf-hub-format=compressed +imagenet/inception_resnet_v2/classification,https://tfhub.dev/google/imagenet/inception_resnet_v2/classification/5?tf-hub-format=compressed +ssd_mobilenet_v2/fpnlite_320x320,https://tfhub.dev/tensorflow/ssd_mobilenet_v2/fpnlite_320x320/1?tf-hub-format=compressed +centernet/resnet50v1_fpn_512x512,https://tfhub.dev/tensorflow/centernet/resnet50v1_fpn_512x512/1?tf-hub-format=compressed +small_bert/bert_en_uncased_L-2_H-256_A-4,https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-256_A-4/2?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet1k_s/feature_vector,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_s/feature_vector/2?tf-hub-format=compressed +biggan-deep-512,https://tfhub.dev/deepmind/biggan-deep-512/1?tf-hub-format=compressed +universal-sentence-encoder-cmlm/multilingual-base-br,https://tfhub.dev/google/universal-sentence-encoder-cmlm/multilingual-base-br/1?tf-hub-format=compressed +faster_rcnn/resnet50_v1_640x640,https://tfhub.dev/tensorflow/faster_rcnn/resnet50_v1_640x640/1?tf-hub-format=compressed +imagenet/mobilenet_v3_large_075_224/classification,https://tfhub.dev/google/imagenet/mobilenet_v3_large_075_224/classification/5?tf-hub-format=compressed +imagenet/mobilenet_v3_small_075_224/classification,https://tfhub.dev/google/imagenet/mobilenet_v3_small_075_224/classification/5?tf-hub-format=compressed +tweening_conv3d_bair,https://tfhub.dev/google/tweening_conv3d_bair/1?tf-hub-format=compressed +efficientnet/b7/feature-vector,https://tfhub.dev/tensorflow/efficientnet/b7/feature-vector/1?tf-hub-format=compressed +bert_en_cased_preprocess,https://tfhub.dev/tensorflow/bert_en_cased_preprocess/3?tf-hub-format=compressed +bert_uncased_L-24_H-1024_A-16,https://tfhub.dev/google/bert_uncased_L-24_H-1024_A-16/1?tf-hub-format=compressed +centernet/hourglass_512x512,https://tfhub.dev/tensorflow/centernet/hourglass_512x512/1?tf-hub-format=compressed +imagenet/mobilenet_v2_035_128/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v2_035_128/feature_vector/5?tf-hub-format=compressed +efficientdet/d7,https://tfhub.dev/tensorflow/efficientdet/d7/1?tf-hub-format=compressed +albert_en_xxlarge,https://tfhub.dev/tensorflow/albert_en_xxlarge/3?tf-hub-format=compressed +efficientnet/lite3/classification,https://tfhub.dev/tensorflow/efficientnet/lite3/classification/2?tf-hub-format=compressed +efficientdet/lite3/feature-vector,https://tfhub.dev/tensorflow/efficientdet/lite3/feature-vector/1?tf-hub-format=compressed +MuRIL_preprocess,https://tfhub.dev/google/MuRIL_preprocess/1?tf-hub-format=compressed +imagenet/inception_v1/feature_vector,https://tfhub.dev/google/imagenet/inception_v1/feature_vector/5?tf-hub-format=compressed +efficientdet/lite3x/detection,https://tfhub.dev/tensorflow/efficientdet/lite3x/detection/1?tf-hub-format=compressed +midas/v2,https://tfhub.dev/intel/midas/v2/2?tf-hub-format=compressed +universal-sentence-encoder-cmlm/multilingual-base,https://tfhub.dev/google/universal-sentence-encoder-cmlm/multilingual-base/1?tf-hub-format=compressed +imagenet/mobilenet_v1_075_224/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v1_075_224/feature_vector/5?tf-hub-format=compressed +bit/m-r50x1/ilsvrc2012_classification,https://tfhub.dev/google/bit/m-r50x1/ilsvrc2012_classification/1?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet1k_s/classification,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_s/classification/2?tf-hub-format=compressed +nnlm-ja-dim50,https://tfhub.dev/google/nnlm-ja-dim50/2?tf-hub-format=compressed +electra_small,https://tfhub.dev/google/electra_small/2?tf-hub-format=compressed +nonsemantic-speech-benchmark/trill,https://tfhub.dev/google/nonsemantic-speech-benchmark/trill/3?tf-hub-format=compressed +bigbigan-resnet50,https://tfhub.dev/deepmind/bigbigan-resnet50/1?tf-hub-format=compressed +imagenet/resnet_v2_152/feature_vector,https://tfhub.dev/google/imagenet/resnet_v2_152/feature_vector/5?tf-hub-format=compressed +efficientnet/lite2/classification,https://tfhub.dev/tensorflow/efficientnet/lite2/classification/2?tf-hub-format=compressed +experts/bert/pubmed,https://tfhub.dev/google/experts/bert/pubmed/2?tf-hub-format=compressed +faster_rcnn/inception_resnet_v2_1024x1024,https://tfhub.dev/tensorflow/faster_rcnn/inception_resnet_v2_1024x1024/1?tf-hub-format=compressed +ssd_mobilenet_v2/fpnlite_640x640,https://tfhub.dev/tensorflow/ssd_mobilenet_v2/fpnlite_640x640/1?tf-hub-format=compressed +imagenet/resnet_v1_50/classification,https://tfhub.dev/google/imagenet/resnet_v1_50/classification/5?tf-hub-format=compressed +movinet/a5/base/kinetics-600/classification,https://tfhub.dev/tensorflow/movinet/a5/base/kinetics-600/classification/3?tf-hub-format=compressed +biggan-128,https://tfhub.dev/deepmind/biggan-128/2?tf-hub-format=compressed +Wiki-words-250-with-normalization,https://tfhub.dev/google/Wiki-words-250-with-normalization/2?tf-hub-format=compressed +sentence-t5/st5-base,https://tfhub.dev/google/sentence-t5/st5-base/1?tf-hub-format=compressed +movinet/a2/base/kinetics-600/classification,https://tfhub.dev/tensorflow/movinet/a2/base/kinetics-600/classification/3?tf-hub-format=compressed +bert_zh_preprocess,https://tfhub.dev/tensorflow/bert_zh_preprocess/3?tf-hub-format=compressed +bert_en_cased_L-24_H-1024_A-16,https://tfhub.dev/tensorflow/bert_en_cased_L-24_H-1024_A-16/4?tf-hub-format=compressed +inaturalist/inception_v3/feature_vector,https://tfhub.dev/google/inaturalist/inception_v3/feature_vector/5?tf-hub-format=compressed +efficientnet/lite1/classification,https://tfhub.dev/tensorflow/efficientnet/lite1/classification/2?tf-hub-format=compressed +wav2vec2,https://tfhub.dev/vasudevgupta7/wav2vec2/1?tf-hub-format=compressed +experts/bert/wiki_books,https://tfhub.dev/google/experts/bert/wiki_books/2?tf-hub-format=compressed +retinanet/resnet50_v1_fpn_640x640,https://tfhub.dev/tensorflow/retinanet/resnet50_v1_fpn_640x640/1?tf-hub-format=compressed +bert_en_wwm_uncased_L-24_H-1024_A-16,https://tfhub.dev/tensorflow/bert_en_wwm_uncased_L-24_H-1024_A-16/4?tf-hub-format=compressed +cropnet/classifier/cassava_disease_V1,https://tfhub.dev/google/cropnet/classifier/cassava_disease_V1/2?tf-hub-format=compressed +midas/v2_1_small,https://tfhub.dev/intel/midas/v2_1_small/1?tf-hub-format=compressed +object_detection/mobile_object_labeler_v1,https://tfhub.dev/google/object_detection/mobile_object_labeler_v1/1?tf-hub-format=compressed +imagenet/resnet_v2_101/classification,https://tfhub.dev/google/imagenet/resnet_v2_101/classification/5?tf-hub-format=compressed +electra_base,https://tfhub.dev/google/electra_base/2?tf-hub-format=compressed +efficientdet/d4,https://tfhub.dev/tensorflow/efficientdet/d4/1?tf-hub-format=compressed +small_bert/bert_en_uncased_L-6_H-512_A-8,https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-6_H-512_A-8/2?tf-hub-format=compressed +mobilebert_multi_cased_L-24_H-128_B-512_A-4_F-4_OPT,https://tfhub.dev/tensorflow/mobilebert_multi_cased_L-24_H-128_B-512_A-4_F-4_OPT/1?tf-hub-format=compressed +i3d-kinetics-600,https://tfhub.dev/deepmind/i3d-kinetics-600/1?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet21k_s/feature_vector,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_s/feature_vector/2?tf-hub-format=compressed +efficientdet/d1,https://tfhub.dev/tensorflow/efficientdet/d1/1?tf-hub-format=compressed +nnlm-de-dim50,https://tfhub.dev/google/nnlm-de-dim50/2?tf-hub-format=compressed +ganeval-cifar10-convnet,https://tfhub.dev/deepmind/ganeval-cifar10-convnet/1?tf-hub-format=compressed +musiq/ava,https://tfhub.dev/google/musiq/ava/1?tf-hub-format=compressed +talkheads_ggelu_bert_en_base,https://tfhub.dev/tensorflow/talkheads_ggelu_bert_en_base/2?tf-hub-format=compressed +nnlm-es-dim128,https://tfhub.dev/google/nnlm-es-dim128/2?tf-hub-format=compressed +esrgan-tf2,https://tfhub.dev/captain-pool/esrgan-tf2/1?tf-hub-format=compressed +imagenet/mobilenet_v3_small_075_224/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v3_small_075_224/feature_vector/5?tf-hub-format=compressed +efficientnet/b2/feature-vector,https://tfhub.dev/tensorflow/efficientnet/b2/feature-vector/1?tf-hub-format=compressed +efficientnet/b7/classification,https://tfhub.dev/tensorflow/efficientnet/b7/classification/1?tf-hub-format=compressed +small_bert/bert_en_uncased_L-12_H-256_A-4,https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-12_H-256_A-4/2?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet21k_ft1k_xl/feature_vector,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_xl/feature_vector/2?tf-hub-format=compressed +ssd_mobilenet_v1/fpn_640x640,https://tfhub.dev/tensorflow/ssd_mobilenet_v1/fpn_640x640/1?tf-hub-format=compressed +mil-nce/s3d,https://tfhub.dev/deepmind/mil-nce/s3d/1?tf-hub-format=compressed +imagenet/nasnet_mobile/feature_vector,https://tfhub.dev/google/imagenet/nasnet_mobile/feature_vector/5?tf-hub-format=compressed +efficientnet/b4/feature-vector,https://tfhub.dev/google/efficientnet/b4/feature-vector/1?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet21k_m/feature_vector,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_m/feature_vector/2?tf-hub-format=compressed +efficientdet/d2,https://tfhub.dev/tensorflow/efficientdet/d2/1?tf-hub-format=compressed +Wiki-words-500,https://tfhub.dev/google/Wiki-words-500/2?tf-hub-format=compressed +imagenet/inception_v2/feature_vector,https://tfhub.dev/google/imagenet/inception_v2/feature_vector/5?tf-hub-format=compressed +disease-classification,https://tfhub.dev/agripredict/disease-classification/1?tf-hub-format=compressed +biggan-deep-128,https://tfhub.dev/deepmind/biggan-deep-128/1?tf-hub-format=compressed +bit/m-r101x3,https://tfhub.dev/google/bit/m-r101x3/1?tf-hub-format=compressed +centernet/hourglass_1024x1024,https://tfhub.dev/tensorflow/centernet/hourglass_1024x1024/1?tf-hub-format=compressed +small_bert/bert_en_uncased_L-12_H-128_A-2,https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-12_H-128_A-2/2?tf-hub-format=compressed +centernet/resnet50v2_512x512,https://tfhub.dev/tensorflow/centernet/resnet50v2_512x512/1?tf-hub-format=compressed +imagenet/resnet_v2_152/classification,https://tfhub.dev/google/imagenet/resnet_v2_152/classification/5?tf-hub-format=compressed +imagenet/mobilenet_v1_075_160/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v1_075_160/feature_vector/5?tf-hub-format=compressed +aiy/vision/classifier/plants_V1,https://tfhub.dev/google/aiy/vision/classifier/plants_V1/1?tf-hub-format=compressed +imagenet/pnasnet_large/feature_vector,https://tfhub.dev/google/imagenet/pnasnet_large/feature_vector/5?tf-hub-format=compressed +faster_rcnn/resnet101_v1_640x640,https://tfhub.dev/tensorflow/faster_rcnn/resnet101_v1_640x640/1?tf-hub-format=compressed +imagenet/mobilenet_v2_130_224/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v2_130_224/feature_vector/5?tf-hub-format=compressed +nnlm-es-dim128-with-normalization,https://tfhub.dev/google/nnlm-es-dim128-with-normalization/2?tf-hub-format=compressed +bit/m-r50x1/imagenet21k_classification,https://tfhub.dev/google/bit/m-r50x1/imagenet21k_classification/1?tf-hub-format=compressed +imagenet/resnet_v1_101/feature_vector,https://tfhub.dev/google/imagenet/resnet_v1_101/feature_vector/5?tf-hub-format=compressed +experts/bert/wiki_books/sst2,https://tfhub.dev/google/experts/bert/wiki_books/sst2/2?tf-hub-format=compressed +retinanet/resnet152_v1_fpn_1024x1024,https://tfhub.dev/tensorflow/retinanet/resnet152_v1_fpn_1024x1024/1?tf-hub-format=compressed +small_bert/bert_en_uncased_L-6_H-256_A-4,https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-6_H-256_A-4/2?tf-hub-format=compressed +movinet/a0/base/kinetics-600/classification,https://tfhub.dev/tensorflow/movinet/a0/base/kinetics-600/classification/3?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet21k_ft1k_b0/feature_vector,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_b0/feature_vector/2?tf-hub-format=compressed +imagenet/resnet_v2_101/feature_vector,https://tfhub.dev/google/imagenet/resnet_v2_101/feature_vector/5?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet21k_l/feature_vector,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_l/feature_vector/2?tf-hub-format=compressed +imagenet/nasnet_large/classification,https://tfhub.dev/google/imagenet/nasnet_large/classification/5?tf-hub-format=compressed +faster_rcnn/resnet152_v1_1024x1024,https://tfhub.dev/tensorflow/faster_rcnn/resnet152_v1_1024x1024/1?tf-hub-format=compressed +vit_s16_fe,https://tfhub.dev/sayakpaul/vit_s16_fe/1?tf-hub-format=compressed +zh_segmentation,https://tfhub.dev/google/zh_segmentation/1?tf-hub-format=compressed +wiki40b-lm-es,https://tfhub.dev/google/wiki40b-lm-es/1?tf-hub-format=compressed +centernet/resnet50v1_fpn_512x512_kpts,https://tfhub.dev/tensorflow/centernet/resnet50v1_fpn_512x512_kpts/1?tf-hub-format=compressed +nnlm-es-dim50-with-normalization,https://tfhub.dev/google/nnlm-es-dim50-with-normalization/2?tf-hub-format=compressed +efficientdet/d6,https://tfhub.dev/tensorflow/efficientdet/d6/1?tf-hub-format=compressed +cord-19/swivel-128d,https://tfhub.dev/tensorflow/cord-19/swivel-128d/3?tf-hub-format=compressed +albert_large,https://tfhub.dev/google/albert_large/3?tf-hub-format=compressed +centernet/resnet50v2_512x512_kpts,https://tfhub.dev/tensorflow/centernet/resnet50v2_512x512_kpts/1?tf-hub-format=compressed +efficientnet/b7/feature-vector,https://tfhub.dev/google/efficientnet/b7/feature-vector/1?tf-hub-format=compressed +efficientnet/b5/feature-vector,https://tfhub.dev/tensorflow/efficientnet/b5/feature-vector/1?tf-hub-format=compressed +imagenet/nasnet_large/feature_vector,https://tfhub.dev/google/imagenet/nasnet_large/feature_vector/5?tf-hub-format=compressed +centernet/resnet101v1_fpn_512x512,https://tfhub.dev/tensorflow/centernet/resnet101v1_fpn_512x512/1?tf-hub-format=compressed +retinanet/resnet152_v1_fpn_640x640,https://tfhub.dev/tensorflow/retinanet/resnet152_v1_fpn_640x640/1?tf-hub-format=compressed +movinet/a0/stream/kinetics-600/classification,https://tfhub.dev/tensorflow/movinet/a0/stream/kinetics-600/classification/3?tf-hub-format=compressed +imagenet/mobilenet_v1_025_224/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v1_025_224/feature_vector/5?tf-hub-format=compressed +unet/industrial/class_1,https://tfhub.dev/nvidia/unet/industrial/class_1/1?tf-hub-format=compressed +mobilebert_en_uncased_L-24_H-128_B-512_A-4_F-4_OPT,https://tfhub.dev/tensorflow/mobilebert_en_uncased_L-24_H-128_B-512_A-4_F-4_OPT/1?tf-hub-format=compressed +boundless/quarter,https://tfhub.dev/google/boundless/quarter/1?tf-hub-format=compressed +gtr/gtr-base,https://tfhub.dev/google/gtr/gtr-base/1?tf-hub-format=compressed +small_bert/bert_en_uncased_L-12_H-512_A-8,https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-12_H-512_A-8/2?tf-hub-format=compressed +LAReQA/mBERT_En_En,https://tfhub.dev/google/LAReQA/mBERT_En_En/1?tf-hub-format=compressed +efficientdet/d5,https://tfhub.dev/tensorflow/efficientdet/d5/1?tf-hub-format=compressed +faster_rcnn/resnet101_v1_1024x1024,https://tfhub.dev/tensorflow/faster_rcnn/resnet101_v1_1024x1024/1?tf-hub-format=compressed +film,https://tfhub.dev/google/film/1?tf-hub-format=compressed +electra_large,https://tfhub.dev/google/electra_large/2?tf-hub-format=compressed +small_bert/bert_uncased_L-2_H-128_A-2,https://tfhub.dev/google/small_bert/bert_uncased_L-2_H-128_A-2/2?tf-hub-format=compressed +imagenet/mobilenet_v1_075_224/classification,https://tfhub.dev/google/imagenet/mobilenet_v1_075_224/classification/5?tf-hub-format=compressed +efficientdet/d3,https://tfhub.dev/tensorflow/efficientdet/d3/1?tf-hub-format=compressed +distilbert_en_uncased_preprocess,https://tfhub.dev/jeongukjae/distilbert_en_uncased_preprocess/2?tf-hub-format=compressed +movinet/a2/stream/kinetics-600/classification,https://tfhub.dev/tensorflow/movinet/a2/stream/kinetics-600/classification/3?tf-hub-format=compressed +movinet/a4/base/kinetics-600/classification,https://tfhub.dev/tensorflow/movinet/a4/base/kinetics-600/classification/3?tf-hub-format=compressed +efficientnet/b6/feature-vector,https://tfhub.dev/google/efficientnet/b6/feature-vector/1?tf-hub-format=compressed +nnlm-zh-dim50,https://tfhub.dev/google/nnlm-zh-dim50/2?tf-hub-format=compressed +nnlm-id-dim128-with-normalization,https://tfhub.dev/google/nnlm-id-dim128-with-normalization/2?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet1k_b3/feature_vector,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_b3/feature_vector/2?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet21k_ft1k_b3/feature_vector,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_b3/feature_vector/2?tf-hub-format=compressed +humpback_whale,https://tfhub.dev/google/humpback_whale/1?tf-hub-format=compressed +nnlm-id-dim50,https://tfhub.dev/google/nnlm-id-dim50/2?tf-hub-format=compressed +nonsemantic-speech-benchmark/frill,https://tfhub.dev/google/nonsemantic-speech-benchmark/frill/1?tf-hub-format=compressed +vit_s16_classification,https://tfhub.dev/sayakpaul/vit_s16_classification/1?tf-hub-format=compressed +faster_rcnn/resnet152_v1_800x1333,https://tfhub.dev/tensorflow/faster_rcnn/resnet152_v1_800x1333/1?tf-hub-format=compressed +bit/s-r152x4,https://tfhub.dev/google/bit/s-r152x4/1?tf-hub-format=compressed +imagenet/resnet_v1_101/classification,https://tfhub.dev/google/imagenet/resnet_v1_101/classification/5?tf-hub-format=compressed +faster_rcnn/resnet50_v1_800x1333,https://tfhub.dev/tensorflow/faster_rcnn/resnet50_v1_800x1333/1?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet21k_ft1k_l/feature_vector,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_l/feature_vector/2?tf-hub-format=compressed +distilbert_multi_cased_L-6_H-768_A-12,https://tfhub.dev/jeongukjae/distilbert_multi_cased_L-6_H-768_A-12/1?tf-hub-format=compressed +imagenet/mobilenet_v2_050_224/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v2_050_224/feature_vector/5?tf-hub-format=compressed +roberta_en_cased_preprocess,https://tfhub.dev/jeongukjae/roberta_en_cased_preprocess/1?tf-hub-format=compressed +efficientnet/b3/feature-vector,https://tfhub.dev/google/efficientnet/b3/feature-vector/1?tf-hub-format=compressed +small_bert/bert_en_uncased_L-8_H-256_A-4,https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-8_H-256_A-4/2?tf-hub-format=compressed +efficientnet/b7/classification,https://tfhub.dev/google/efficientnet/b7/classification/1?tf-hub-format=compressed +bit/s-r50x1/ilsvrc2012_classification,https://tfhub.dev/google/bit/s-r50x1/ilsvrc2012_classification/1?tf-hub-format=compressed +tf2-preview/nnlm-ko-dim128-with-normalization,https://tfhub.dev/google/tf2-preview/nnlm-ko-dim128-with-normalization/1?tf-hub-format=compressed +imagenet/mobilenet_v2_100_160/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v2_100_160/feature_vector/5?tf-hub-format=compressed +imagenet/mobilenet_v1_025_128/classification,https://tfhub.dev/google/imagenet/mobilenet_v1_025_128/classification/5?tf-hub-format=compressed +experts/bit/r50x1/in21k/food,https://tfhub.dev/google/experts/bit/r50x1/in21k/food/1?tf-hub-format=compressed +imagenet/resnet_v1_152/feature_vector,https://tfhub.dev/google/imagenet/resnet_v1_152/feature_vector/5?tf-hub-format=compressed +faster_rcnn/resnet101_v1_800x1333,https://tfhub.dev/tensorflow/faster_rcnn/resnet101_v1_800x1333/1?tf-hub-format=compressed +bit/s-r50x3,https://tfhub.dev/google/bit/s-r50x3/1?tf-hub-format=compressed +smaller_LaBSE_15lang,https://tfhub.dev/jeongukjae/smaller_LaBSE_15lang/1?tf-hub-format=compressed +imagenet/mobilenet_v2_100_96/classification,https://tfhub.dev/google/imagenet/mobilenet_v2_100_96/classification/5?tf-hub-format=compressed +imagenet/mobilenet_v2_100_192/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v2_100_192/feature_vector/5?tf-hub-format=compressed +tiny_video_net/tvn1,https://tfhub.dev/google/tiny_video_net/tvn1/1?tf-hub-format=compressed +nnlm-id-dim128,https://tfhub.dev/google/nnlm-id-dim128/2?tf-hub-format=compressed +lambert_en_uncased_L-24_H-1024_A-16,https://tfhub.dev/tensorflow/lambert_en_uncased_L-24_H-1024_A-16/2?tf-hub-format=compressed +imagenet/amoebanet_a_n18_f448/classification,https://tfhub.dev/google/imagenet/amoebanet_a_n18_f448/classification/1?tf-hub-format=compressed +retinanet/resnet101_v1_fpn_1024x1024,https://tfhub.dev/tensorflow/retinanet/resnet101_v1_fpn_1024x1024/1?tf-hub-format=compressed +imagenet/mobilenet_v1_025_192/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v1_025_192/feature_vector/5?tf-hub-format=compressed +imagenet/mobilenet_v2_100_160/classification,https://tfhub.dev/google/imagenet/mobilenet_v2_100_160/classification/5?tf-hub-format=compressed +imagenet/inception_v2/classification,https://tfhub.dev/google/imagenet/inception_v2/classification/5?tf-hub-format=compressed +efficientnet/b5/feature-vector,https://tfhub.dev/google/efficientnet/b5/feature-vector/1?tf-hub-format=compressed +distilbert_multi_cased_preprocess,https://tfhub.dev/jeongukjae/distilbert_multi_cased_preprocess/2?tf-hub-format=compressed +nnlm-de-dim128,https://tfhub.dev/google/nnlm-de-dim128/2?tf-hub-format=compressed +bertseq2seq/roberta24_gigaword,https://tfhub.dev/google/bertseq2seq/roberta24_gigaword/1?tf-hub-format=compressed +vit_b8_fe,https://tfhub.dev/sayakpaul/vit_b8_fe/1?tf-hub-format=compressed +aiy/vision/classifier/insects_V1,https://tfhub.dev/google/aiy/vision/classifier/insects_V1/1?tf-hub-format=compressed +bertseq2seq/roberta24_cnndm,https://tfhub.dev/google/bertseq2seq/roberta24_cnndm/1?tf-hub-format=compressed +movinet/a1/base/kinetics-600/classification,https://tfhub.dev/tensorflow/movinet/a1/base/kinetics-600/classification/3?tf-hub-format=compressed +remote_sensing/eurosat-resnet50,https://tfhub.dev/google/remote_sensing/eurosat-resnet50/1?tf-hub-format=compressed +universal-sentence-encoder-cmlm/en-large,https://tfhub.dev/google/universal-sentence-encoder-cmlm/en-large/1?tf-hub-format=compressed +imagenet/mobilenet_v2_050_96/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v2_050_96/feature_vector/5?tf-hub-format=compressed +efficientnet/b3/classification,https://tfhub.dev/tensorflow/efficientnet/b3/classification/1?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet21k_b1/feature_vector,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_b1/feature_vector/2?tf-hub-format=compressed +universal-sentence-encoder-xling/en-de,https://tfhub.dev/google/universal-sentence-encoder-xling/en-de/1?tf-hub-format=compressed +bit/s-r101x1,https://tfhub.dev/google/bit/s-r101x1/1?tf-hub-format=compressed +smaller_LaBSE_15lang_preprocess,https://tfhub.dev/jeongukjae/smaller_LaBSE_15lang_preprocess/1?tf-hub-format=compressed +vgg19-block5-conv2-unpooling-decoder,https://tfhub.dev/emilutz/vgg19-block5-conv2-unpooling-decoder/1?tf-hub-format=compressed +imagenet/mobilenet_v1_050_224/classification,https://tfhub.dev/google/imagenet/mobilenet_v1_050_224/classification/5?tf-hub-format=compressed +efficientnet/b5/classification,https://tfhub.dev/tensorflow/efficientnet/b5/classification/1?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet21k_b3/feature_vector,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_b3/feature_vector/2?tf-hub-format=compressed +experts/bert/wiki_books/mnli,https://tfhub.dev/google/experts/bert/wiki_books/mnli/2?tf-hub-format=compressed +tf2-preview/nnlm-en-dim50-with-normalization,https://tfhub.dev/google/tf2-preview/nnlm-en-dim50-with-normalization/1?tf-hub-format=compressed +efficientnet/b1/feature-vector,https://tfhub.dev/google/efficientnet/b1/feature-vector/1?tf-hub-format=compressed +cropnet/feature_vector/concat,https://tfhub.dev/google/cropnet/feature_vector/concat/1?tf-hub-format=compressed +efficientnet/b5/classification,https://tfhub.dev/google/efficientnet/b5/classification/1?tf-hub-format=compressed +bit/m-r101x1/ilsvrc2012_classification,https://tfhub.dev/google/bit/m-r101x1/ilsvrc2012_classification/1?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet1k_l/classification,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_l/classification/2?tf-hub-format=compressed +nnlm-zh-dim128,https://tfhub.dev/google/nnlm-zh-dim128/2?tf-hub-format=compressed +tf2-preview/nnlm-ja-dim128-with-normalization,https://tfhub.dev/google/tf2-preview/nnlm-ja-dim128-with-normalization/1?tf-hub-format=compressed +imagenet/resnet_v1_152/classification,https://tfhub.dev/google/imagenet/resnet_v1_152/classification/5?tf-hub-format=compressed +covid-twitter-bert,https://tfhub.dev/digitalepidemiologylab/covid-twitter-bert/2?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet21k_b0/classification,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_b0/classification/2?tf-hub-format=compressed +silero-stt/en,https://tfhub.dev/silero/silero-stt/en/1?tf-hub-format=compressed +experts/bit/r50x1/in21k/clothing,https://tfhub.dev/google/experts/bit/r50x1/in21k/clothing/1?tf-hub-format=compressed +roberta_en_cased_L-12_H-768_A-12,https://tfhub.dev/jeongukjae/roberta_en_cased_L-12_H-768_A-12/1?tf-hub-format=compressed +on_device_vision/classifier/landmarks_classifier_north_america_V1,https://tfhub.dev/google/on_device_vision/classifier/landmarks_classifier_north_america_V1/1?tf-hub-format=compressed +bit/s-r50x3/ilsvrc2012_classification,https://tfhub.dev/google/bit/s-r50x3/ilsvrc2012_classification/1?tf-hub-format=compressed +cond-biggan,https://tfhub.dev/vtab/cond-biggan/1?tf-hub-format=compressed +LAReQA/mBERT_X_Y,https://tfhub.dev/google/LAReQA/mBERT_X_Y/1?tf-hub-format=compressed +imagenet/mobilenet_v2_100_192/classification,https://tfhub.dev/google/imagenet/mobilenet_v2_100_192/classification/5?tf-hub-format=compressed +nnlm-es-dim50,https://tfhub.dev/google/nnlm-es-dim50/2?tf-hub-format=compressed +supcon/resnet_v1_50/imagenet/classification,https://tfhub.dev/google/supcon/resnet_v1_50/imagenet/classification/1?tf-hub-format=compressed +nnlm-zh-dim128-with-normalization,https://tfhub.dev/google/nnlm-zh-dim128-with-normalization/2?tf-hub-format=compressed +experts/bit/r50x1/in21k/animal,https://tfhub.dev/google/experts/bit/r50x1/in21k/animal/1?tf-hub-format=compressed +imagenet/mobilenet_v2_035_96/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v2_035_96/feature_vector/5?tf-hub-format=compressed +edgetpu/vision/deeplab-edgetpu/fused_argmax/xs,https://tfhub.dev/google/edgetpu/vision/deeplab-edgetpu/fused_argmax/xs/1?tf-hub-format=compressed +efficientnet/b2/classification,https://tfhub.dev/tensorflow/efficientnet/b2/classification/1?tf-hub-format=compressed +sentence-t5/st5-large,https://tfhub.dev/google/sentence-t5/st5-large/1?tf-hub-format=compressed +on_device_vision/classifier/landmarks_classifier_europe_V1,https://tfhub.dev/google/on_device_vision/classifier/landmarks_classifier_europe_V1/1?tf-hub-format=compressed +bit/m-r50x3/imagenet21k_classification,https://tfhub.dev/google/bit/m-r50x3/imagenet21k_classification/1?tf-hub-format=compressed +mmv/s3d,https://tfhub.dev/deepmind/mmv/s3d/1?tf-hub-format=compressed +soundstream/mel/decoder/music,https://tfhub.dev/google/soundstream/mel/decoder/music/1?tf-hub-format=compressed +imagenet/mobilenet_v2_075_160/classification,https://tfhub.dev/google/imagenet/mobilenet_v2_075_160/classification/5?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet21k_ft1k_s/classification,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_s/classification/2?tf-hub-format=compressed +imagenet/mobilenet_v2_075_128/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v2_075_128/feature_vector/5?tf-hub-format=compressed +ulmfit/en/sp35k_uncased,https://tfhub.dev/edrone/ulmfit/en/sp35k_uncased/1?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet21k_ft1k_b0/classification,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_b0/classification/2?tf-hub-format=compressed +imagenet/mobilenet_v2_075_96/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v2_075_96/feature_vector/5?tf-hub-format=compressed +bertseq2seq/roberta24_wikisplit,https://tfhub.dev/google/bertseq2seq/roberta24_wikisplit/1?tf-hub-format=compressed +imagenet/mobilenet_v1_100_128/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v1_100_128/feature_vector/5?tf-hub-format=compressed +regnety800mf_classification,https://tfhub.dev/adityakane2001/regnety800mf_classification/1?tf-hub-format=compressed +mmv/tsm-resnet50x2,https://tfhub.dev/deepmind/mmv/tsm-resnet50x2/1?tf-hub-format=compressed +vgg19-block1-conv2-unpooling-encoder,https://tfhub.dev/emilutz/vgg19-block1-conv2-unpooling-encoder/1?tf-hub-format=compressed +efficientnet/b2/classification,https://tfhub.dev/google/efficientnet/b2/classification/1?tf-hub-format=compressed +imagenet/mobilenet_v2_050_160/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v2_050_160/feature_vector/5?tf-hub-format=compressed +imagenet/mobilenet_v2_075_224/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v2_075_224/feature_vector/5?tf-hub-format=compressed +cropnet/feature_vector/imagenet,https://tfhub.dev/google/cropnet/feature_vector/imagenet/1?tf-hub-format=compressed +nnlm-ko-dim50-with-normalization,https://tfhub.dev/google/nnlm-ko-dim50-with-normalization/2?tf-hub-format=compressed +vgg19-block2-conv2-unpooling-encoder,https://tfhub.dev/emilutz/vgg19-block2-conv2-unpooling-encoder/1?tf-hub-format=compressed +experts/bit/r50x1/in21k/solid,https://tfhub.dev/google/experts/bit/r50x1/in21k/solid/1?tf-hub-format=compressed +efficientnet/b4/classification,https://tfhub.dev/google/efficientnet/b4/classification/1?tf-hub-format=compressed +regnety400mf_classification,https://tfhub.dev/adityakane2001/regnety400mf_classification/1?tf-hub-format=compressed +bit/m-r152x4/ilsvrc2012_classification,https://tfhub.dev/google/bit/m-r152x4/ilsvrc2012_classification/1?tf-hub-format=compressed +nnlm-zh-dim50-with-normalization,https://tfhub.dev/google/nnlm-zh-dim50-with-normalization/2?tf-hub-format=compressed +on_device_vision/classifier/landmarks_classifier_south_america_V1,https://tfhub.dev/google/on_device_vision/classifier/landmarks_classifier_south_america_V1/1?tf-hub-format=compressed +unsupervised-adversarial-training/cifar10/wrn_106,https://tfhub.dev/deepmind/unsupervised-adversarial-training/cifar10/wrn_106/1?tf-hub-format=compressed +tf2-preview/nnlm-de-dim128-with-normalization,https://tfhub.dev/google/tf2-preview/nnlm-de-dim128-with-normalization/1?tf-hub-format=compressed +wiki40b-lm-zh-cn,https://tfhub.dev/google/wiki40b-lm-zh-cn/1?tf-hub-format=compressed +on_device_vision/classifier/landmarks_classifier_africa_V1,https://tfhub.dev/google/on_device_vision/classifier/landmarks_classifier_africa_V1/1?tf-hub-format=compressed +regnety200mf_classification,https://tfhub.dev/adityakane2001/regnety200mf_classification/1?tf-hub-format=compressed +talkheads_ggelu_bert_en_large,https://tfhub.dev/tensorflow/talkheads_ggelu_bert_en_large/2?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet1k_m/classification,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_m/classification/2?tf-hub-format=compressed +HRNet/coco-hrnetv2-w48,https://tfhub.dev/google/HRNet/coco-hrnetv2-w48/1?tf-hub-format=compressed +imagenet/mobilenet_v2_050_192/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v2_050_192/feature_vector/5?tf-hub-format=compressed +imagenet/mobilenet_v1_050_128/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v1_050_128/feature_vector/5?tf-hub-format=compressed +experts/bert/pubmed/squad2,https://tfhub.dev/google/experts/bert/pubmed/squad2/2?tf-hub-format=compressed +german-mbmelgan,https://tfhub.dev/monatis/german-mbmelgan/1?tf-hub-format=compressed +supcon/resnet_v1_101/imagenet/classification,https://tfhub.dev/google/supcon/resnet_v1_101/imagenet/classification/1?tf-hub-format=compressed +wiki40b-lm-fr,https://tfhub.dev/google/wiki40b-lm-fr/1?tf-hub-format=compressed +bit/m-r101x3/ilsvrc2012_classification,https://tfhub.dev/google/bit/m-r101x3/ilsvrc2012_classification/1?tf-hub-format=compressed +imagenet/mobilenet_v1_100_160/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v1_100_160/feature_vector/5?tf-hub-format=compressed +imagenet/mobilenet_v2_035_128/classification,https://tfhub.dev/google/imagenet/mobilenet_v2_035_128/classification/5?tf-hub-format=compressed +edgetpu/vision/autoseg-edgetpu/fused_argmax/xs,https://tfhub.dev/google/edgetpu/vision/autoseg-edgetpu/fused_argmax/xs/1?tf-hub-format=compressed +bit/s-r101x1/ilsvrc2012_classification,https://tfhub.dev/google/bit/s-r101x1/ilsvrc2012_classification/1?tf-hub-format=compressed +german-tacotron2,https://tfhub.dev/monatis/german-tacotron2/1?tf-hub-format=compressed +convnext_xlarge_21k_224,https://tfhub.dev/sayakpaul/convnext_xlarge_21k_224/1?tf-hub-format=compressed +gtr/gtr-large,https://tfhub.dev/google/gtr/gtr-large/1?tf-hub-format=compressed +AraBERT,https://tfhub.dev/callmemehdi/AraBERT/1?tf-hub-format=compressed +trillsson1,https://tfhub.dev/google/trillsson1/1?tf-hub-format=compressed +imagenet/mobilenet_v1_050_128/classification,https://tfhub.dev/google/imagenet/mobilenet_v1_050_128/classification/5?tf-hub-format=compressed +tf2-preview/nnlm-de-dim50-with-normalization,https://tfhub.dev/google/tf2-preview/nnlm-de-dim50-with-normalization/1?tf-hub-format=compressed +imagenet/mobilenet_v1_100_192/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v1_100_192/feature_vector/5?tf-hub-format=compressed +efficientnet/b6/classification,https://tfhub.dev/tensorflow/efficientnet/b6/classification/1?tf-hub-format=compressed +vit_b8_classification,https://tfhub.dev/sayakpaul/vit_b8_classification/1?tf-hub-format=compressed +universal-sentence-encoder-xling/en-es,https://tfhub.dev/google/universal-sentence-encoder-xling/en-es/1?tf-hub-format=compressed +mil-nce/i3d,https://tfhub.dev/deepmind/mil-nce/i3d/1?tf-hub-format=compressed +vit_l16_fe,https://tfhub.dev/sayakpaul/vit_l16_fe/1?tf-hub-format=compressed +nonsemantic-speech-benchmark/frill-nofrontend,https://tfhub.dev/google/nonsemantic-speech-benchmark/frill-nofrontend/1?tf-hub-format=compressed +vit_r50_l32_fe,https://tfhub.dev/sayakpaul/vit_r50_l32_fe/1?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet21k_ft1k_b2/feature_vector,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_b2/feature_vector/2?tf-hub-format=compressed +remote_sensing/so2sat-resnet50,https://tfhub.dev/google/remote_sensing/so2sat-resnet50/1?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet21k_ft1k_b3/classification,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_b3/classification/2?tf-hub-format=compressed +experts/bit/r50x1/in21k/spermatophyte,https://tfhub.dev/google/experts/bit/r50x1/in21k/spermatophyte/1?tf-hub-format=compressed +uncond-biggan,https://tfhub.dev/vtab/uncond-biggan/1?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet21k_ft1k_l/classification,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_l/classification/2?tf-hub-format=compressed +tiny_video_net/tvn3,https://tfhub.dev/google/tiny_video_net/tvn3/1?tf-hub-format=compressed +imagenet/mobilenet_v2_035_192/classification,https://tfhub.dev/google/imagenet/mobilenet_v2_035_192/classification/5?tf-hub-format=compressed +tf2-preview/nnlm-ko-dim50,https://tfhub.dev/google/tf2-preview/nnlm-ko-dim50/1?tf-hub-format=compressed +imagenet/mobilenet_v2_035_160/classification,https://tfhub.dev/google/imagenet/mobilenet_v2_035_160/classification/5?tf-hub-format=compressed +small_bert/bert_uncased_L-4_H-128_A-2,https://tfhub.dev/google/small_bert/bert_uncased_L-4_H-128_A-2/2?tf-hub-format=compressed +bit/m-r101x1/imagenet21k_classification,https://tfhub.dev/google/bit/m-r101x1/imagenet21k_classification/1?tf-hub-format=compressed +efficientnet/b3/classification,https://tfhub.dev/google/efficientnet/b3/classification/1?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet1k_b2/classification,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_b2/classification/2?tf-hub-format=compressed +wav2vec2-xlsr-53,https://tfhub.dev/vasudevgupta7/wav2vec2-xlsr-53/1?tf-hub-format=compressed +unet/industrial/class_10,https://tfhub.dev/nvidia/unet/industrial/class_10/1?tf-hub-format=compressed +movinet/a5/stream/kinetics-600/classification,https://tfhub.dev/tensorflow/movinet/a5/stream/kinetics-600/classification/3?tf-hub-format=compressed +vit_r26_s32_lightaug_fe,https://tfhub.dev/sayakpaul/vit_r26_s32_lightaug_fe/1?tf-hub-format=compressed +mmt/architecture_image-q-24,https://tfhub.dev/deepmind/mmt/architecture_image-q-24/1?tf-hub-format=compressed +wiki40b-lm-nl,https://tfhub.dev/google/wiki40b-lm-nl/1?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet21k_m/classification,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_m/classification/2?tf-hub-format=compressed +tf2-preview/nnlm-zh-dim50,https://tfhub.dev/google/tf2-preview/nnlm-zh-dim50/1?tf-hub-format=compressed +edgetpu/vision/deeplab-edgetpu/fused_argmax/s,https://tfhub.dev/google/edgetpu/vision/deeplab-edgetpu/fused_argmax/s/1?tf-hub-format=compressed +compare_gan/model_9_celebahq128_resnet19,https://tfhub.dev/google/compare_gan/model_9_celebahq128_resnet19/1?tf-hub-format=compressed +tf2-preview/nnlm-zh-dim128,https://tfhub.dev/google/tf2-preview/nnlm-zh-dim128/1?tf-hub-format=compressed +imagenet/mobilenet_v1_025_160/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v1_025_160/feature_vector/5?tf-hub-format=compressed +tweening_conv3d_kth,https://tfhub.dev/google/tweening_conv3d_kth/1?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet1k_b1/classification,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_b1/classification/2?tf-hub-format=compressed +edgetpu/vision/autoseg-edgetpu/default_argmax/s,https://tfhub.dev/google/edgetpu/vision/autoseg-edgetpu/default_argmax/s/1?tf-hub-format=compressed +LAReQA/mBERT_X_X_mono,https://tfhub.dev/google/LAReQA/mBERT_X_X_mono/1?tf-hub-format=compressed +experts/bit/r50x1/in21k/object,https://tfhub.dev/google/experts/bit/r50x1/in21k/object/1?tf-hub-format=compressed +small_bert/bert_en_uncased_L-10_H-256_A-4,https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-10_H-256_A-4/2?tf-hub-format=compressed +supcon/resnet_v1_200/imagenet/classification,https://tfhub.dev/google/supcon/resnet_v1_200/imagenet/classification/1?tf-hub-format=compressed +small_bert/bert_uncased_L-6_H-128_A-2,https://tfhub.dev/google/small_bert/bert_uncased_L-6_H-128_A-2/2?tf-hub-format=compressed +tf2-preview/nnlm-ja-dim50,https://tfhub.dev/google/tf2-preview/nnlm-ja-dim50/1?tf-hub-format=compressed +nnlm-ko-dim50,https://tfhub.dev/google/nnlm-ko-dim50/2?tf-hub-format=compressed +compare_gan/model_12_cifar10_resnet_cifar,https://tfhub.dev/google/compare_gan/model_12_cifar10_resnet_cifar/1?tf-hub-format=compressed +spiral/default-fluid-gansn-celebahq64-gen-19steps,https://tfhub.dev/deepmind/spiral/default-fluid-gansn-celebahq64-gen-19steps/1?tf-hub-format=compressed +compare_gan/s3gan_5_128x128,https://tfhub.dev/google/compare_gan/s3gan_5_128x128/1?tf-hub-format=compressed +convnext_base_1k_224_fe,https://tfhub.dev/sayakpaul/convnext_base_1k_224_fe/1?tf-hub-format=compressed +convnext_base_1k_384,https://tfhub.dev/sayakpaul/convnext_base_1k_384/1?tf-hub-format=compressed +edgetpu/vision/mobilenet-edgetpu-v2/l,https://tfhub.dev/google/edgetpu/vision/mobilenet-edgetpu-v2/l/1?tf-hub-format=compressed +small_bert/bert_uncased_L-4_H-512_A-8,https://tfhub.dev/google/small_bert/bert_uncased_L-4_H-512_A-8/2?tf-hub-format=compressed +experts/bit/r50x1/in21k/flower,https://tfhub.dev/google/experts/bit/r50x1/in21k/flower/1?tf-hub-format=compressed +distill_bit_r50x1_160_feature_extraction,https://tfhub.dev/sayakpaul/distill_bit_r50x1_160_feature_extraction/1?tf-hub-format=compressed +edgetpu/vision/mobilenet-edgetpu-v2-feature-vector/l,https://tfhub.dev/google/edgetpu/vision/mobilenet-edgetpu-v2-feature-vector/l/2?tf-hub-format=compressed +spiral/default-wgangp-celebahq64-gen-19steps/agent5,https://tfhub.dev/deepmind/spiral/default-wgangp-celebahq64-gen-19steps/agent5/1?tf-hub-format=compressed +image_augmentation/crop_rotate_color,https://tfhub.dev/google/image_augmentation/crop_rotate_color/1?tf-hub-format=compressed +tf2-preview/nnlm-id-dim50-with-normalization,https://tfhub.dev/google/tf2-preview/nnlm-id-dim50-with-normalization/1?tf-hub-format=compressed +imagenet/mobilenet_v2_035_192/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v2_035_192/feature_vector/5?tf-hub-format=compressed +small_bert/bert_uncased_L-12_H-768_A-12,https://tfhub.dev/google/small_bert/bert_uncased_L-12_H-768_A-12/2?tf-hub-format=compressed +xlm_roberta_multi_cased_L-24_H-1024_A-16,https://tfhub.dev/jeongukjae/xlm_roberta_multi_cased_L-24_H-1024_A-16/1?tf-hub-format=compressed +regnety600mf_classification,https://tfhub.dev/adityakane2001/regnety600mf_classification/1?tf-hub-format=compressed +convnext_tiny_1k_224,https://tfhub.dev/sayakpaul/convnext_tiny_1k_224/1?tf-hub-format=compressed +tf2-preview/nnlm-es-dim128,https://tfhub.dev/google/tf2-preview/nnlm-es-dim128/1?tf-hub-format=compressed +vision/embedder/fungi_V2,https://tfhub.dev/svampeatlas/vision/embedder/fungi_V2/1?tf-hub-format=compressed +imagenet/mobilenet_v1_050_128/quantops/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v1_050_128/quantops/feature_vector/3?tf-hub-format=compressed +wiki40b-lm-sr,https://tfhub.dev/google/wiki40b-lm-sr/1?tf-hub-format=compressed +imagenet/mobilenet_v1_075_160/classification,https://tfhub.dev/google/imagenet/mobilenet_v1_075_160/classification/5?tf-hub-format=compressed +small_bert/bert_uncased_L-6_H-512_A-8,https://tfhub.dev/google/small_bert/bert_uncased_L-6_H-512_A-8/2?tf-hub-format=compressed +tf2-preview/nnlm-es-dim50,https://tfhub.dev/google/tf2-preview/nnlm-es-dim50/1?tf-hub-format=compressed +sup-rotation-100,https://tfhub.dev/vtab/sup-rotation-100/1?tf-hub-format=compressed +wiki40b-lm-ko,https://tfhub.dev/google/wiki40b-lm-ko/1?tf-hub-format=compressed +imagenet/mobilenet_v2_075_128/classification,https://tfhub.dev/google/imagenet/mobilenet_v2_075_128/classification/5?tf-hub-format=compressed +efficientdet/lite3x/feature-vector,https://tfhub.dev/tensorflow/efficientdet/lite3x/feature-vector/1?tf-hub-format=compressed +mmt/baseline_baseline,https://tfhub.dev/deepmind/mmt/baseline_baseline/1?tf-hub-format=compressed +boundless/three_quarter,https://tfhub.dev/google/boundless/three_quarter/1?tf-hub-format=compressed +imagenet/mobilenet_v1_100_160/classification,https://tfhub.dev/google/imagenet/mobilenet_v1_100_160/classification/5?tf-hub-format=compressed +unet/industrial/class_3,https://tfhub.dev/nvidia/unet/industrial/class_3/1?tf-hub-format=compressed +imagenet/mobilenet_v1_050_160/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v1_050_160/feature_vector/5?tf-hub-format=compressed +musiq/koniq-10k,https://tfhub.dev/google/musiq/koniq-10k/1?tf-hub-format=compressed +vision/embedder/inaturalist_V2,https://tfhub.dev/inaturalist/vision/embedder/inaturalist_V2/1?tf-hub-format=compressed +relative-patch-location,https://tfhub.dev/vtab/relative-patch-location/1?tf-hub-format=compressed +experts/bit/r50x1/in21k/mammal,https://tfhub.dev/google/experts/bit/r50x1/in21k/mammal/1?tf-hub-format=compressed +imagenet/mobilenet_v1_100_128/quantops/classification,https://tfhub.dev/google/imagenet/mobilenet_v1_100_128/quantops/classification/3?tf-hub-format=compressed +ulmfit/en/sp35k_cased,https://tfhub.dev/edrone/ulmfit/en/sp35k_cased/1?tf-hub-format=compressed +edgetpu/vision/mobilenet-edgetpu-v2-feature-vector/tiny,https://tfhub.dev/google/edgetpu/vision/mobilenet-edgetpu-v2-feature-vector/tiny/2?tf-hub-format=compressed +sentence-t5/st5-3b,https://tfhub.dev/google/sentence-t5/st5-3b/1?tf-hub-format=compressed +tf2nq,https://tfhub.dev/prvi/tf2nq/1?tf-hub-format=compressed +bertseq2seq/bert24_en_de,https://tfhub.dev/google/bertseq2seq/bert24_en_de/1?tf-hub-format=compressed +edgetpu/nlp/mobilebert-edgetpu/m,https://tfhub.dev/google/edgetpu/nlp/mobilebert-edgetpu/m/1?tf-hub-format=compressed +vit_b32_fe,https://tfhub.dev/sayakpaul/vit_b32_fe/1?tf-hub-format=compressed +experts/bit/r50x1/in21k/structure,https://tfhub.dev/google/experts/bit/r50x1/in21k/structure/1?tf-hub-format=compressed +vit_b32_classification,https://tfhub.dev/sayakpaul/vit_b32_classification/1?tf-hub-format=compressed +wiki40b-lm-ca,https://tfhub.dev/google/wiki40b-lm-ca/1?tf-hub-format=compressed +spiral/default-wgangp-celebahq64-gen-19steps/agent3,https://tfhub.dev/deepmind/spiral/default-wgangp-celebahq64-gen-19steps/agent3/1?tf-hub-format=compressed +imagenet/mobilenet_v1_100_224/quantops/classification,https://tfhub.dev/google/imagenet/mobilenet_v1_100_224/quantops/classification/3?tf-hub-format=compressed +imagenet/mobilenet_v1_025_128/quantops/classification,https://tfhub.dev/google/imagenet/mobilenet_v1_025_128/quantops/classification/3?tf-hub-format=compressed +bit/s-r152x4/ilsvrc2012_classification,https://tfhub.dev/google/bit/s-r152x4/ilsvrc2012_classification/1?tf-hub-format=compressed +ulmfit/pl/sp50k_uncased,https://tfhub.dev/edrone/ulmfit/pl/sp50k_uncased/1?tf-hub-format=compressed +vgg19-block3-conv2-unpooling-encoder,https://tfhub.dev/emilutz/vgg19-block3-conv2-unpooling-encoder/1?tf-hub-format=compressed +experts/bert/wiki_books/qqp,https://tfhub.dev/google/experts/bert/wiki_books/qqp/2?tf-hub-format=compressed +remote_sensing/uc_merced-resnet50,https://tfhub.dev/google/remote_sensing/uc_merced-resnet50/1?tf-hub-format=compressed +mixer_b16_i1k_fe,https://tfhub.dev/sayakpaul/mixer_b16_i1k_fe/1?tf-hub-format=compressed +mmt/loss_single-modality-contrastive1024,https://tfhub.dev/deepmind/mmt/loss_single-modality-contrastive1024/1?tf-hub-format=compressed +regnety600mf_feature_extractor,https://tfhub.dev/adityakane2001/regnety600mf_feature_extractor/1?tf-hub-format=compressed +exemplar,https://tfhub.dev/vtab/exemplar/1?tf-hub-format=compressed +imagenet/mobilenet_v1_025_192/classification,https://tfhub.dev/google/imagenet/mobilenet_v1_025_192/classification/5?tf-hub-format=compressed +wiki40b-lm-tr,https://tfhub.dev/google/wiki40b-lm-tr/1?tf-hub-format=compressed +mixer_b16_sam_fe,https://tfhub.dev/sayakpaul/mixer_b16_sam_fe/1?tf-hub-format=compressed +logit_reconstruction/robust,https://tfhub.dev/google/logit_reconstruction/robust/1?tf-hub-format=compressed +mmt/baseline_baseline-no-bert-transfer,https://tfhub.dev/deepmind/mmt/baseline_baseline-no-bert-transfer/1?tf-hub-format=compressed +nnlm-ko-dim128,https://tfhub.dev/google/nnlm-ko-dim128/2?tf-hub-format=compressed +experts/bit/r50x1/in21k/angiosperm,https://tfhub.dev/google/experts/bit/r50x1/in21k/angiosperm/1?tf-hub-format=compressed +vit_l16_classification,https://tfhub.dev/sayakpaul/vit_l16_classification/1?tf-hub-format=compressed +wiki40b-lm-lt,https://tfhub.dev/google/wiki40b-lm-lt/1?tf-hub-format=compressed +mixer_l16_i21k_classification,https://tfhub.dev/sayakpaul/mixer_l16_i21k_classification/1?tf-hub-format=compressed +wiki40b-lm-pl,https://tfhub.dev/google/wiki40b-lm-pl/1?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet21k_b3/classification,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_b3/classification/2?tf-hub-format=compressed +spiral/default-wgangp-celebahq64-gen-19steps/agent8,https://tfhub.dev/deepmind/spiral/default-wgangp-celebahq64-gen-19steps/agent8/1?tf-hub-format=compressed +experts/bit/r50x1/in21k/carnivore,https://tfhub.dev/google/experts/bit/r50x1/in21k/carnivore/1?tf-hub-format=compressed +HRNet/ade20k-hrnetv2-w48,https://tfhub.dev/google/HRNet/ade20k-hrnetv2-w48/1?tf-hub-format=compressed +wae-gan,https://tfhub.dev/vtab/wae-gan/1?tf-hub-format=compressed +mmt/architecture_image-q-12,https://tfhub.dev/deepmind/mmt/architecture_image-q-12/1?tf-hub-format=compressed +MuRIL-Large,https://tfhub.dev/google/MuRIL-Large/1?tf-hub-format=compressed +spiral/default-wgangp-celebahq64-gen-19steps/agent9,https://tfhub.dev/deepmind/spiral/default-wgangp-celebahq64-gen-19steps/agent9/1?tf-hub-format=compressed +swin_large_patch4_window7_224_in22k_fe,https://tfhub.dev/sayakpaul/swin_large_patch4_window7_224_in22k_fe/1?tf-hub-format=compressed +answer_equivalence/bem,https://tfhub.dev/google/answer_equivalence/bem/1?tf-hub-format=compressed +wiki40b-lm-id,https://tfhub.dev/google/wiki40b-lm-id/1?tf-hub-format=compressed +edgetpu/nlp/mobilebert-edgetpu/xs,https://tfhub.dev/google/edgetpu/nlp/mobilebert-edgetpu/xs/1?tf-hub-format=compressed +experts/bit/r50x1/in21k/artifact,https://tfhub.dev/google/experts/bit/r50x1/in21k/artifact/1?tf-hub-format=compressed +distilbert_en_cased_preprocess,https://tfhub.dev/jeongukjae/distilbert_en_cased_preprocess/2?tf-hub-format=compressed +experts/bit/r50x1/in21k/conveyance,https://tfhub.dev/google/experts/bit/r50x1/in21k/conveyance/1?tf-hub-format=compressed +wiki40b-lm-ar,https://tfhub.dev/google/wiki40b-lm-ar/1?tf-hub-format=compressed +compare_gan/model_2_celebahq128_resnet19,https://tfhub.dev/google/compare_gan/model_2_celebahq128_resnet19/1?tf-hub-format=compressed +experts/bit/r50x1/in21k/living_thing,https://tfhub.dev/google/experts/bit/r50x1/in21k/living_thing/1?tf-hub-format=compressed +wae-mmd,https://tfhub.dev/vtab/wae-mmd/1?tf-hub-format=compressed +american-sign-language,https://tfhub.dev/sayannath/american-sign-language/1?tf-hub-format=compressed +vgg19-block4-conv2-unpooling-decoder,https://tfhub.dev/emilutz/vgg19-block4-conv2-unpooling-decoder/1?tf-hub-format=compressed +experts/bit/r50x1/in21k/plant,https://tfhub.dev/google/experts/bit/r50x1/in21k/plant/1?tf-hub-format=compressed +compare_gan/ssgan_128x128,https://tfhub.dev/google/compare_gan/ssgan_128x128/1?tf-hub-format=compressed +spiral/default-wgangp-celebahq64-gen-19steps/agent4,https://tfhub.dev/deepmind/spiral/default-wgangp-celebahq64-gen-19steps/agent4/1?tf-hub-format=compressed +image_augmentation/crop_color,https://tfhub.dev/google/image_augmentation/crop_color/1?tf-hub-format=compressed +vae,https://tfhub.dev/vtab/vae/1?tf-hub-format=compressed +experts/bit/r50x1/in21k/arthropod,https://tfhub.dev/google/experts/bit/r50x1/in21k/arthropod/1?tf-hub-format=compressed +roberta_en_cased_L-24_H-1024_A-16,https://tfhub.dev/jeongukjae/roberta_en_cased_L-24_H-1024_A-16/1?tf-hub-format=compressed +wiki40b-lm-fa,https://tfhub.dev/google/wiki40b-lm-fa/1?tf-hub-format=compressed +vit_r26_s32_medaug_classification,https://tfhub.dev/sayakpaul/vit_r26_s32_medaug_classification/1?tf-hub-format=compressed +remote_sensing/eurosat-ms-resnet50,https://tfhub.dev/google/remote_sensing/eurosat-ms-resnet50/1?tf-hub-format=compressed +compare_gan/model_10_lsun_bedroom_resnet19,https://tfhub.dev/google/compare_gan/model_10_lsun_bedroom_resnet19/1?tf-hub-format=compressed +convnext_xlarge_21k_1k_384_fe,https://tfhub.dev/sayakpaul/convnext_xlarge_21k_1k_384_fe/1?tf-hub-format=compressed +mobilevit_xxs_1k_256_fe,https://tfhub.dev/sayannath/mobilevit_xxs_1k_256_fe/1?tf-hub-format=compressed +mmt/data_cc,https://tfhub.dev/deepmind/mmt/data_cc/1?tf-hub-format=compressed +spiral/default-wgangp-celebahq64-gen-19steps/agent1,https://tfhub.dev/deepmind/spiral/default-wgangp-celebahq64-gen-19steps/agent1/1?tf-hub-format=compressed +small_bert/bert_uncased_L-8_H-768_A-12,https://tfhub.dev/google/small_bert/bert_uncased_L-8_H-768_A-12/2?tf-hub-format=compressed +edgetpu/vision/deeplab-edgetpu/fused_argmax/m,https://tfhub.dev/google/edgetpu/vision/deeplab-edgetpu/fused_argmax/m/1?tf-hub-format=compressed +image_augmentation/nas_imagenet,https://tfhub.dev/google/image_augmentation/nas_imagenet/1?tf-hub-format=compressed +mixer_b16_i21k_fe,https://tfhub.dev/sayakpaul/mixer_b16_i21k_fe/1?tf-hub-format=compressed +nonsemantic-speech-benchmark/trillsson1,https://tfhub.dev/google/nonsemantic-speech-benchmark/trillsson1/1?tf-hub-format=compressed +trillsson5,https://tfhub.dev/google/trillsson5/1?tf-hub-format=compressed +logit_reconstruction/inceptionv3,https://tfhub.dev/google/logit_reconstruction/inceptionv3/1?tf-hub-format=compressed +spiral/default-wgangp-celebahq64-gen-19steps/agent6,https://tfhub.dev/deepmind/spiral/default-wgangp-celebahq64-gen-19steps/agent6/1?tf-hub-format=compressed +tf2-preview/nnlm-id-dim128,https://tfhub.dev/google/tf2-preview/nnlm-id-dim128/1?tf-hub-format=compressed +LEALLA/LEALLA-large,https://tfhub.dev/google/LEALLA/LEALLA-large/1?tf-hub-format=compressed +compare_gan/model_13_cifar10_resnet_cifar,https://tfhub.dev/google/compare_gan/model_13_cifar10_resnet_cifar/1?tf-hub-format=compressed +distilkobert_cased_L-3_H-768_A-12,https://tfhub.dev/jeongukjae/distilkobert_cased_L-3_H-768_A-12/1?tf-hub-format=compressed +imagenet/mobilenet_v1_100_192/quantops/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v1_100_192/quantops/feature_vector/3?tf-hub-format=compressed +vit_b16_classification,https://tfhub.dev/sayakpaul/vit_b16_classification/1?tf-hub-format=compressed +convnext_base_21k_1k_224,https://tfhub.dev/sayakpaul/convnext_base_21k_1k_224/1?tf-hub-format=compressed +compare_gan/model_11_cifar10_resnet_cifar,https://tfhub.dev/google/compare_gan/model_11_cifar10_resnet_cifar/1?tf-hub-format=compressed +wiki40b-lm-fi,https://tfhub.dev/google/wiki40b-lm-fi/1?tf-hub-format=compressed +vit_r26_s32_medaug_fe,https://tfhub.dev/sayakpaul/vit_r26_s32_medaug_fe/1?tf-hub-format=compressed +distill_bit_r50x1_224_feature_extraction,https://tfhub.dev/sayakpaul/distill_bit_r50x1_224_feature_extraction/1?tf-hub-format=compressed +small_bert/bert_uncased_L-2_H-256_A-4,https://tfhub.dev/google/small_bert/bert_uncased_L-2_H-256_A-4/2?tf-hub-format=compressed +edgetpu/vision/autoseg-edgetpu/fused_argmax/s,https://tfhub.dev/google/edgetpu/vision/autoseg-edgetpu/fused_argmax/s/1?tf-hub-format=compressed +image_augmentation/flipx_crop_rotate_color,https://tfhub.dev/google/image_augmentation/flipx_crop_rotate_color/1?tf-hub-format=compressed +circularnet_3,https://tfhub.dev/google/circularnet_3/1?tf-hub-format=compressed +random-nnlm-en-dim50,https://tfhub.dev/google/random-nnlm-en-dim50/1?tf-hub-format=compressed +mixer_b16_i21k_classification,https://tfhub.dev/sayakpaul/mixer_b16_i21k_classification/1?tf-hub-format=compressed +imagenet/mobilenet_v1_025_160/quantops/classification,https://tfhub.dev/google/imagenet/mobilenet_v1_025_160/quantops/classification/3?tf-hub-format=compressed +imagenet/mobilenet_v1_075_224/quantops/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v1_075_224/quantops/feature_vector/3?tf-hub-format=compressed +imagenet/mobilenet_v1_050_192/quantops/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v1_050_192/quantops/feature_vector/3?tf-hub-format=compressed +experts/bit/r50x1/in21k/herb,https://tfhub.dev/google/experts/bit/r50x1/in21k/herb/1?tf-hub-format=compressed +sup-exemplar-100,https://tfhub.dev/vtab/sup-exemplar-100/1?tf-hub-format=compressed +mixer_b16_sam_classification,https://tfhub.dev/sayakpaul/mixer_b16_sam_classification/1?tf-hub-format=compressed +imagenet/mobilenet_v1_075_224/quantops/classification,https://tfhub.dev/google/imagenet/mobilenet_v1_075_224/quantops/classification/3?tf-hub-format=compressed +wav2vec2-robust,https://tfhub.dev/vasudevgupta7/wav2vec2-robust/1?tf-hub-format=compressed +mixer_l16_i1k_classification,https://tfhub.dev/sayakpaul/mixer_l16_i1k_classification/1?tf-hub-format=compressed +planet/vision/classifier/planet_v2,https://tfhub.dev/google/planet/vision/classifier/planet_v2/1?tf-hub-format=compressed +wiki40b-lm-lv,https://tfhub.dev/google/wiki40b-lm-lv/1?tf-hub-format=compressed +sentence-t5/st5-11b,https://tfhub.dev/google/sentence-t5/st5-11b/1?tf-hub-format=compressed +gtr/gtr-xxl,https://tfhub.dev/google/gtr/gtr-xxl/1?tf-hub-format=compressed +swin_base_patch4_window7_224_fe,https://tfhub.dev/sayakpaul/swin_base_patch4_window7_224_fe/1?tf-hub-format=compressed +small_bert/bert_uncased_L-8_H-128_A-2,https://tfhub.dev/google/small_bert/bert_uncased_L-8_H-128_A-2/2?tf-hub-format=compressed +mmt/data_mscoco,https://tfhub.dev/deepmind/mmt/data_mscoco/1?tf-hub-format=compressed +vit_r26_s32_lightaug_classification,https://tfhub.dev/sayakpaul/vit_r26_s32_lightaug_classification/1?tf-hub-format=compressed +compare_gan/s3gan_10_256x256,https://tfhub.dev/google/compare_gan/s3gan_10_256x256/1?tf-hub-format=compressed +experts/bit/r50x1/in21k/tree,https://tfhub.dev/google/experts/bit/r50x1/in21k/tree/1?tf-hub-format=compressed +small_bert/bert_uncased_L-6_H-768_A-12,https://tfhub.dev/google/small_bert/bert_uncased_L-6_H-768_A-12/2?tf-hub-format=compressed +imagenet/mobilenet_v2_075_96/classification,https://tfhub.dev/google/imagenet/mobilenet_v2_075_96/classification/5?tf-hub-format=compressed +mixer_b16_i1k_classification,https://tfhub.dev/sayakpaul/mixer_b16_i1k_classification/1?tf-hub-format=compressed +edgetpu/nlp/mobilebert-edgetpu/s,https://tfhub.dev/google/edgetpu/nlp/mobilebert-edgetpu/s/1?tf-hub-format=compressed +convnext_large_21k_1k_384,https://tfhub.dev/sayakpaul/convnext_large_21k_1k_384/1?tf-hub-format=compressed +llr-pretrain-adv/latents,https://tfhub.dev/deepmind/llr-pretrain-adv/latents/1?tf-hub-format=compressed +imagenet/mobilenet_v1_025_224/quantops/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v1_025_224/quantops/feature_vector/3?tf-hub-format=compressed +experts/bit/r50x1/in21k/consumer_goods,https://tfhub.dev/google/experts/bit/r50x1/in21k/consumer_goods/1?tf-hub-format=compressed +swin_base_patch4_window7_224,https://tfhub.dev/sayakpaul/swin_base_patch4_window7_224/1?tf-hub-format=compressed +swin_small_patch244_window877_kinetics400_1k,https://tfhub.dev/shoaib6174/swin_small_patch244_window877_kinetics400_1k/1?tf-hub-format=compressed +experts/bit/r50x1/in21k/covering,https://tfhub.dev/google/experts/bit/r50x1/in21k/covering/1?tf-hub-format=compressed +HRNet/msegpcontext-hrnetv2-w48,https://tfhub.dev/google/HRNet/msegpcontext-hrnetv2-w48/1?tf-hub-format=compressed +convnext_large_21k_1k_224_fe,https://tfhub.dev/sayakpaul/convnext_large_21k_1k_224_fe/1?tf-hub-format=compressed +mmt/baseline-ft_baseline-no-bert-transfer,https://tfhub.dev/deepmind/mmt/baseline-ft_baseline-no-bert-transfer/1?tf-hub-format=compressed +wiki40b-lm-el,https://tfhub.dev/google/wiki40b-lm-el/1?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet21k_ft1k_b2/classification,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_b2/classification/2?tf-hub-format=compressed +wiki40b-lm-ro,https://tfhub.dev/google/wiki40b-lm-ro/1?tf-hub-format=compressed +convnext_base_21k_1k_224_fe,https://tfhub.dev/sayakpaul/convnext_base_21k_1k_224_fe/1?tf-hub-format=compressed +imagenet/mobilenet_v1_025_192/quantops/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v1_025_192/quantops/feature_vector/3?tf-hub-format=compressed +experts/bit/r50x1/in21k/foodstuff,https://tfhub.dev/google/experts/bit/r50x1/in21k/foodstuff/1?tf-hub-format=compressed +imagenet/mobilenet_v1_075_128/quantops/classification,https://tfhub.dev/google/imagenet/mobilenet_v1_075_128/quantops/classification/3?tf-hub-format=compressed +wiki40b-lm-ms,https://tfhub.dev/google/wiki40b-lm-ms/1?tf-hub-format=compressed +convnext_large_1k_224,https://tfhub.dev/sayakpaul/convnext_large_1k_224/1?tf-hub-format=compressed +convnext_large_21k_1k_384_fe,https://tfhub.dev/sayakpaul/convnext_large_21k_1k_384_fe/1?tf-hub-format=compressed +mobilevit_s_1k_256_fe,https://tfhub.dev/sayannath/mobilevit_s_1k_256_fe/1?tf-hub-format=compressed +experts/bit/r50x1/in21k/abstraction,https://tfhub.dev/google/experts/bit/r50x1/in21k/abstraction/1?tf-hub-format=compressed +wiki40b-lm-th,https://tfhub.dev/google/wiki40b-lm-th/1?tf-hub-format=compressed +imagenet/mobilenet_v1_100_160/quantops/classification,https://tfhub.dev/google/imagenet/mobilenet_v1_100_160/quantops/classification/3?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet21k_b1/classification,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_b1/classification/2?tf-hub-format=compressed +mmt/data_sbu,https://tfhub.dev/deepmind/mmt/data_sbu/1?tf-hub-format=compressed +mmt/data_combined-instance,https://tfhub.dev/deepmind/mmt/data_combined-instance/1?tf-hub-format=compressed +semi-exemplar-10,https://tfhub.dev/vtab/semi-exemplar-10/1?tf-hub-format=compressed +swin_tiny_patch4_window7_224,https://tfhub.dev/sayakpaul/swin_tiny_patch4_window7_224/1?tf-hub-format=compressed +vila/image,https://tfhub.dev/google/vila/image/1?tf-hub-format=compressed +imagenet/mobilenet_v1_075_160/quantops/classification,https://tfhub.dev/google/imagenet/mobilenet_v1_075_160/quantops/classification/3?tf-hub-format=compressed +convnext_large_21k_1k_224,https://tfhub.dev/sayakpaul/convnext_large_21k_1k_224/1?tf-hub-format=compressed +mmt/architecture_vilbert-4block,https://tfhub.dev/deepmind/mmt/architecture_vilbert-4block/1?tf-hub-format=compressed +imagenet/mobilenet_v1_050_160/quantops/classification,https://tfhub.dev/google/imagenet/mobilenet_v1_050_160/quantops/classification/3?tf-hub-format=compressed +mmt/architecture-ft_language-q-12,https://tfhub.dev/deepmind/mmt/architecture-ft_language-q-12/1?tf-hub-format=compressed +convnext_small_1k_224_fe,https://tfhub.dev/sayakpaul/convnext_small_1k_224_fe/1?tf-hub-format=compressed +bit_resnet152x2_224_classification,https://tfhub.dev/sayakpaul/bit_resnet152x2_224_classification/1?tf-hub-format=compressed +experts/bit/r50x1/in21k/relation,https://tfhub.dev/google/experts/bit/r50x1/in21k/relation/1?tf-hub-format=compressed +mmt/architecture_single-modality,https://tfhub.dev/deepmind/mmt/architecture_single-modality/1?tf-hub-format=compressed +convnext_xlarge_21k_1k_224_fe,https://tfhub.dev/sayakpaul/convnext_xlarge_21k_1k_224_fe/1?tf-hub-format=compressed +convnext_base_1k_384_fe,https://tfhub.dev/sayakpaul/convnext_base_1k_384_fe/1?tf-hub-format=compressed +compare_gan/model_4_lsun_bedroom_resnet19,https://tfhub.dev/google/compare_gan/model_4_lsun_bedroom_resnet19/1?tf-hub-format=compressed +experts/bit/r50x1/in21k/whole,https://tfhub.dev/google/experts/bit/r50x1/in21k/whole/1?tf-hub-format=compressed +movinet/a3/stream/kinetics-600/classification,https://tfhub.dev/tensorflow/movinet/a3/stream/kinetics-600/classification/3?tf-hub-format=compressed +experts/bit/r50x1/in21k/part,https://tfhub.dev/google/experts/bit/r50x1/in21k/part/1?tf-hub-format=compressed +cait_s24_224_fe,https://tfhub.dev/sayakpaul/cait_s24_224_fe/1?tf-hub-format=compressed +experts/bit/r50x1/in21k/vertebrate,https://tfhub.dev/google/experts/bit/r50x1/in21k/vertebrate/1?tf-hub-format=compressed +convnext_base_1k_224,https://tfhub.dev/sayakpaul/convnext_base_1k_224/1?tf-hub-format=compressed +deit_base_patch16_384,https://tfhub.dev/sayakpaul/deit_base_patch16_384/1?tf-hub-format=compressed +deit_small_distilled_patch16_224_fe,https://tfhub.dev/sayakpaul/deit_small_distilled_patch16_224_fe/1?tf-hub-format=compressed +llr-pretrain-adv/linear,https://tfhub.dev/deepmind/llr-pretrain-adv/linear/1?tf-hub-format=compressed +edgetpu/vision/mobilenet-edgetpu-v2-feature-vector/m,https://tfhub.dev/google/edgetpu/vision/mobilenet-edgetpu-v2-feature-vector/m/2?tf-hub-format=compressed +mmt/architecture_vilbert-2block,https://tfhub.dev/deepmind/mmt/architecture_vilbert-2block/1?tf-hub-format=compressed +mobilevit_xs_1k_256_fe,https://tfhub.dev/sayannath/mobilevit_xs_1k_256_fe/1?tf-hub-format=compressed +convnext_base_21k_1k_384,https://tfhub.dev/sayakpaul/convnext_base_21k_1k_384/1?tf-hub-format=compressed +imagenet/mobilenet_v2_100_128/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v2_100_128/feature_vector/5?tf-hub-format=compressed +jigsaw,https://tfhub.dev/vtab/jigsaw/1?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet21k_s/classification,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_s/classification/2?tf-hub-format=compressed +imagenet/mobilenet_v2_075_160/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v2_075_160/feature_vector/5?tf-hub-format=compressed +experts/bit/r50x1/in21k/matter,https://tfhub.dev/google/experts/bit/r50x1/in21k/matter/1?tf-hub-format=compressed +imagenet/mobilenet_v2_035_224/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v2_035_224/feature_vector/5?tf-hub-format=compressed +klue_roberta_cased_L-24_H-1024_A-16,https://tfhub.dev/jeongukjae/klue_roberta_cased_L-24_H-1024_A-16/1?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet21k_ft1k_b1/feature_vector,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_b1/feature_vector/2?tf-hub-format=compressed +experts/bert/wiki_books/squad2,https://tfhub.dev/google/experts/bert/wiki_books/squad2/2?tf-hub-format=compressed +convnext_tiny_1k_224_fe,https://tfhub.dev/sayakpaul/convnext_tiny_1k_224_fe/1?tf-hub-format=compressed +tf2-preview/nnlm-zh-dim50-with-normalization,https://tfhub.dev/google/tf2-preview/nnlm-zh-dim50-with-normalization/1?tf-hub-format=compressed +wiki40b-lm-de,https://tfhub.dev/google/wiki40b-lm-de/1?tf-hub-format=compressed +mixer_b32_sam_fe,https://tfhub.dev/sayakpaul/mixer_b32_sam_fe/1?tf-hub-format=compressed +small_bert/bert_en_uncased_L-8_H-512_A-8,https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-8_H-512_A-8/2?tf-hub-format=compressed +tiny_video_net/mobile_1,https://tfhub.dev/google/tiny_video_net/mobile_1/1?tf-hub-format=compressed +vgg19-block5-conv2-unpooling-encoder,https://tfhub.dev/emilutz/vgg19-block5-conv2-unpooling-encoder/1?tf-hub-format=compressed +mmt/data-ft_sbu,https://tfhub.dev/deepmind/mmt/data-ft_sbu/1?tf-hub-format=compressed +swin_s3_base_224,https://tfhub.dev/sayakpaul/swin_s3_base_224/1?tf-hub-format=compressed +mmt/data_cc-with-bert,https://tfhub.dev/deepmind/mmt/data_cc-with-bert/1?tf-hub-format=compressed +experts/bit/r50x1/in21k/instrument,https://tfhub.dev/google/experts/bit/r50x1/in21k/instrument/1?tf-hub-format=compressed +imagenet/mobilenet_v3_small_100_224/classification,https://tfhub.dev/google/imagenet/mobilenet_v3_small_100_224/classification/5?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet1k_b1/feature_vector,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_b1/feature_vector/2?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet21k_xl/classification,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_xl/classification/2?tf-hub-format=compressed +bit/m-r152x4/imagenet21k_classification,https://tfhub.dev/google/bit/m-r152x4/imagenet21k_classification/1?tf-hub-format=compressed +tf2-preview/nnlm-id-dim50,https://tfhub.dev/google/tf2-preview/nnlm-id-dim50/1?tf-hub-format=compressed +small_bert/bert_en_uncased_L-10_H-768_A-12,https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-10_H-768_A-12/2?tf-hub-format=compressed +unet/industrial/class_2,https://tfhub.dev/nvidia/unet/industrial/class_2/1?tf-hub-format=compressed +imagenet/mobilenet_v1_025_224/quantops/classification,https://tfhub.dev/google/imagenet/mobilenet_v1_025_224/quantops/classification/3?tf-hub-format=compressed +efficientnet/b0/classification,https://tfhub.dev/tensorflow/efficientnet/b0/classification/1?tf-hub-format=compressed +convnext_large_1k_384_fe,https://tfhub.dev/sayakpaul/convnext_large_1k_384_fe/1?tf-hub-format=compressed +imagenet/efficientnet_v2_imagenet21k_b2/classification,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_b2/classification/2?tf-hub-format=compressed +vgg19-block2-conv2-unpooling-decoder,https://tfhub.dev/emilutz/vgg19-block2-conv2-unpooling-decoder/1?tf-hub-format=compressed +faster_rcnn/resnet50_v1_1024x1024,https://tfhub.dev/tensorflow/faster_rcnn/resnet50_v1_1024x1024/1?tf-hub-format=compressed +Wiki-words-500-with-normalization,https://tfhub.dev/google/Wiki-words-500-with-normalization/2?tf-hub-format=compressed +wiki40b-lm-zh-tw,https://tfhub.dev/google/wiki40b-lm-zh-tw/1?tf-hub-format=compressed diff --git a/tests/e2e_tests/pipelines/tf_hub/precommit.yml b/tests/e2e_tests/pipelines/tf_hub/precommit.yml new file mode 100644 index 00000000000000..2ee145dd0dc29f --- /dev/null +++ b/tests/e2e_tests/pipelines/tf_hub/precommit.yml @@ -0,0 +1,26 @@ +vision/embedder/fungi_V2,https://tfhub.dev/svampeatlas/vision/embedder/fungi_V2/1?tf-hub-format=compressed,skip,Model is not available +movenet/singlepose/lightning,https://www.kaggle.com/models/google/movenet/frameworks/tensorFlow2/variations/singlepose-lightning/versions/4 +imagenet/resnet_v2_50/feature_vector,https://www.kaggle.com/models/google/resnet-v2/frameworks/tensorFlow2/variations/50-feature-vector/versions/2 +movenet/singlepose/thunder,https://www.kaggle.com/models/google/movenet/frameworks/tensorFlow2/variations/singlepose-thunder/versions/4 +imagenet/mobilenet_v2_100_224/feature_vector,https://www.kaggle.com/models/google/mobilenet-v2/frameworks/tensorFlow2/variations/100-224-feature-vector/versions/2 +efficientnet/lite0/classification,https://www.kaggle.com/models/tensorflow/efficientnet/frameworks/tensorFlow1/variations/lite0-classification/versions/2 +movenet/multipose/lightning,https://www.kaggle.com/models/google/movenet/frameworks/tensorFlow2/variations/multipose-lightning/versions/1 +imagenet/efficientnet_v2_imagenet1k_b0/feature_vector,https://www.kaggle.com/models/google/efficientnet-v2/frameworks/tensorFlow2/variations/imagenet1k-b0-feature-vector/versions/2 +imagenet/mobilenet_v1_100_224/classification,https://www.kaggle.com/models/google/mobilenet-v1/frameworks/tensorFlow2/variations/100-224-classification/versions/2 +magenta/arbitrary-image-stylization-v1-256,https://www.kaggle.com/models/google/arbitrary-image-stylization-v1/frameworks/tensorFlow1/variations/256/versions/2 +small_bert/bert_en_uncased_L-4_H-256_A-4,https://www.kaggle.com/models/tensorflow/bert/frameworks/tensorFlow2/variations/bert-en-uncased-l-4-h-256-a-4/versions/2 +movinet/a5/base/kinetics-600/classification,https://www.kaggle.com/models/google/movinet/frameworks/tensorFlow2/variations/a5-base-kinetics-600-classification/versions/3 +# models with TensorListConcatV2 op +efficientdet/lite0/detection,https://www.kaggle.com/models/tensorflow/efficientdet/frameworks/tensorFlow2/variations/lite0-detection/versions/1,skip,105671 - extend support for TensorList* ops when element_shape has undefined dimension +# model with complex tensors and FFT ops +yamnet,https://www.kaggle.com/models/google/yamnet/frameworks/tensorFlow2/variations/yamnet/versions/1 +# secure notebook models +unet/industrial/class_1,https://tfhub.dev/nvidia/unet/industrial/class_1/1?tf-hub-format=compressed,skip,Model is not available +movenet/singlepose/thunder,https://www.kaggle.com/models/google/movenet/frameworks/tensorFlow2/variations/singlepose-thunder/versions/4 +esrgan-tf2,https://www.kaggle.com/models/kaggle/esrgan-tf2/frameworks/tensorFlow2/variations/esrgan-tf2/versions/1 +film,https://www.kaggle.com/models/google/film/frameworks/tensorFlow2/variations/film/versions/1 +planet/vision/classifier/planet_v2,https://www.kaggle.com/models/google/planet-v2/frameworks/tensorFlow1/variations/planet-vision-classifier-planet-v2/versions/1 +# TF1 models in .pb format +i3d-rgb,https://storage.openvinotoolkit.org/repositories/open_model_zoo/public/2022.1/i3d-rgb-tf/rgb.frozen.pb +# Model with SentencePiece tokenizer, use openvino-tokenizers package +universal-sentence-encoder-multilingual,https://www.kaggle.com/models/google/universal-sentence-encoder/frameworks/tensorFlow2/variations/multilingual/versions/2,skip, 129480 - Add openvino-tokenizers wheel build to OpenVINO GHA Workflow \ No newline at end of file diff --git a/tests/e2e_tests/pipelines/tf_hub/tf_hub.py b/tests/e2e_tests/pipelines/tf_hub/tf_hub.py new file mode 100644 index 00000000000000..efff0819c6c82e --- /dev/null +++ b/tests/e2e_tests/pipelines/tf_hub/tf_hub.py @@ -0,0 +1,50 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import os +import sys + +from e2e_tests.test_utils.test_utils import class_factory +from e2e_tests.pipelines.production.tf_hub_case_class import TFHUB_eltwise_Base + + +def get_models_list(file_name: str): + models = [] + with open(file_name) as f: + for model_info in f: + # skip comment in model scope file + if model_info.startswith('#'): + continue + mark = None + reason = None + assert len(model_info.split(',')) == 2 or len(model_info.split(',')) == 4, \ + "Incorrect model info `{}`. It must contain either 2 or 4 fields.".format(model_info) + if len(model_info.split(',')) == 2: + model_name, model_link = model_info.split(',') + elif len(model_info.split(',')) == 4: + model_name, model_link, mark, reason = model_info.split(',') + assert mark == "skip", "Incorrect failure mark for model info {}".format(model_info) + models.append((model_name, model_link.strip(), mark, reason)) + + return models + + +model_files = ['precommit'] +models = [] +for file in model_files: + models += get_models_list(os.path.join(os.path.dirname(__file__), f"{file}.yml")) + +base_class = TFHUB_eltwise_Base + +for model in models: + class_name = model[0] + model_link = model[1] + if sys.platform == 'win32': + model_link = model_link.split('?')[0] + locals()[class_name] = class_factory(cls_name=class_name, + cls_kwargs={'__is_test_config__': True, + 'model_name': class_name, + 'model_link': model_link}, + BaseClass=base_class) + + diff --git a/tests/e2e_tests/pipelines/tf_hub/tf_hub_case_class.py b/tests/e2e_tests/pipelines/tf_hub/tf_hub_case_class.py new file mode 100644 index 00000000000000..7b2f7300a7610f --- /dev/null +++ b/tests/e2e_tests/pipelines/tf_hub/tf_hub_case_class.py @@ -0,0 +1,30 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from collections import OrderedDict + +from e2e_tests.common.common.common_base_class import CommonConfig +from e2e_tests.pipelines.pipeline_templates.comparators_template import eltwise_comparators +from e2e_tests.pipelines.pipeline_templates.infer_templates import common_infer_step +from e2e_tests.pipelines.pipeline_templates.ir_gen_templates import common_ir_generation + + +class TFHUB_eltwise_Base(CommonConfig): + def __init__(self, device, precision, **kwargs): + self.model = {"load_model": + {"load_tf_hub_model": + {"model_name": self.model_name, + 'model_link': self.model_link, + }}} + self.input = {"read_input": + {"generate_tf_hub_inputs": {}}} + + self.ref_pipeline = {"get_refs_tf_hub": + {'score_tf_hub': {}}} + + self.ie_pipeline = OrderedDict([ + common_ir_generation(mo_out=self.environment["mo_out"], + precision=precision), + common_infer_step(device=device, **kwargs) + ]) + self.comparators = eltwise_comparators(precision=precision, device=device) diff --git a/tests/e2e_tests/pytest.ini b/tests/e2e_tests/pytest.ini new file mode 100644 index 00000000000000..2bf1a4fa5d177c --- /dev/null +++ b/tests/e2e_tests/pytest.ini @@ -0,0 +1,68 @@ +[pytest] +#rp_uuid = a69aebe9-a68d-471e-ac6a-c04988c36d24 +#rp_endpoint = http://nncv-reportportal.inn.intel.com:8080/ +#rp_project = oss_tests +#rp_launch = oss_tests +#rp_launch_tags = 'nightly' 'oss_tests' +#rp_ignore_errors = True +#rp_ignore_tags = 'xfail' 'usefixture' +timeout = 600 +log_format = [ %(levelname)s ] %(message)s +log_level=INFO +addopts = --show-capture=log +# --html=e2e_tests_report.html +# --self-contained-html +# --junitxml=e2e_tests_report.xml + --strict + # rp_log_level requires number as an argument instead of readable + # string so I consider passing it to addopts is better + # --rp-log-level="INFO" + #-s # this option enables runtime stdout printing +markers = + timeout: known marker + hookwrapper: known marker + no_comparison: known marker + winml: known marker + onnx: known marker + fuse: known marker + FP32: known marker + classification: known marker + caffe: known marker + object_detection: known marker + od: known marker + segmentation: known marker + yolo: known marker + caffe2: known marker + opset8: known marker + opset7: known marker + opset9: known marker + opset10: known marker + opset11: known marker + speech: known marker + kaldi: known marker + nnet3: known marker + nnet2: known marker + nnet1 : known marker + mxnet: known marker + onnx_runtime: known marker + style_transfer: known marker + onnx_precollected_data: known marker + pytorch: known marker + pytorch_hf: known_marker + tf: known marker + mask: known marker + broken_test: known marker + test_group: known marker + api_on_commit: known marker + api_regression: known marker + api_enabling: known marker + components: known marker + reqids: known marker + bugs: known marker + Pytorch_group_0: known marker + Pytorch_group_1: known marker + Pytorch_group_2: known marker + Pytorch_group_3: known marker + Pytorch_group_4: known marker + Pytorch_group_5: known marker + Pytorch_group_6: known marker diff --git a/tests/e2e_tests/requirements.txt b/tests/e2e_tests/requirements.txt new file mode 100644 index 00000000000000..b841185be8bbad --- /dev/null +++ b/tests/e2e_tests/requirements.txt @@ -0,0 +1,52 @@ +# This file contains baseline versions of Python modules needed to run various OpenVINO tests +# Some of them is used by OpenVINO itself as well. +# It means all requirement versions should be aligned with the product requirements. +# We use pin versions for packages to stabilize test runs + +# for common utils +py-cpuinfo==7.0.0 +scipy>=1.5.4,<1.12 +opencv-python>=4.5; sys_platform != "darwin" +opencv-python==4.6.0.66; sys_platform == "darwin" +# test-generator==0.1.1 +unittest-xml-reporting==3.0.4 +lpips==0.1.3 + +# for utils/e2e/comparator note: python 3.6 wheels is not available since 0.18 +# Add upper-bound due CVS-105039, CVS-105040 +scikit-image>=0.17.2 + + +# for utils legacy +tabulate==0.9.0 + +pytest>=5.0,<=7.0.1; python_version < '3.10' +pytest==7.2.0; python_version >= '3.10' +pytest-cov==2.11.1 +# pytest-html==1.19.0 +pytest-html +pytest-json-report==1.5.0 +# pytest-metadata==1.7.0 +pytest-xdist==2.1.0 +pytest-timeout==2.2.0 + +# for common utils, e2e_tests +openvino-dev +distro==1.9.0 +pyyaml==6.0 +jsonschema==4.17.0 +# filelock==3.9.0 +omegaconf>=2.1,<2.4 +pycocotools>=2.0.6 + + +# For Torch tests +pretrainedmodels==0.7.4 +timm>=0.9.2 +deepctr-torch + +# To avoid conflict dependencies +numpy + +# Tensorflow-hub tests +tensorflow-hub diff --git a/tests/e2e_tests/reshape_test_rules.yml b/tests/e2e_tests/reshape_test_rules.yml new file mode 100644 index 00000000000000..2b2bcab0f75f69 --- /dev/null +++ b/tests/e2e_tests/reshape_test_rules.yml @@ -0,0 +1,33 @@ +# Reshape test rules configuration file + +[ + { + rules: [ + { model: ONNX_Runtime_ResNeXt101_32x32d_wsl, device: [ CPU ] }, # CVS-88083 + { model: PDPD_ResNeXt101_32x32d_wsl, device: [ CPU ] }, # CVS-92692 + { model: Pytorch_Blip, device: [ CPU ] }, # CVS-105259 + { model: Pytorch_BridgeTower, device: [ CPU ] }, # CVS-108319 + { model: Pytorch_Stable_Diffusion_2_1_Text_Encoder, device: [ CPU ]}, # (CVS-110572) + { model: Pytorch_Stable_Diffusion_2_1_Unet, device: [ CPU ]}, # (CVS-110572) + { model: Pytorch_Stable_Diffusion_2_1_Vae_Decoder, device: [ CPU ]}, # (CVS-110572) + { model: Pytorch_Stable_Diffusion_2_1_Vae_Encoder, device: [ CPU ]}, # (CVS-110572) + { model: Pytorch_Stable_Diffusion_2_Inpainting_Text_Encoder, device: [ CPU ]}, # (CVS-110572) + { model: Pytorch_Stable_Diffusion_2_Inpainting_Unet, device: [ CPU ] }, # (CVS-110572) + { model: Pytorch_Stable_Diffusion_2_Inpainting_Vae_Decoder, device: [ CPU ]}, # (CVS-110572) + { model: Pytorch_Stable_Diffusion_2_Inpainting_Vae_Encoder, device: [ CPU ]}, # (CVS-110572) + { model: Pytorch_StableLM, device: [ CPU ]}, # Model was requested only for CPU CVS-111394 + { model: Pytorch_Llama_3b_v2, device: [ CPU ] }, # (CVS-106319) + + { model: ONNX_3D_UNet, device: [ CPU ] }, # CVS-88087 + + { model: Pytorch_Gpt_J_6B, device: [ CPU ] }, # CVS-110556 + + { model: TF_BlackMagic_Model_C, device: [ CPU ] }, # CVS-88049 + { model: TF_Faster_RCNN_Inception_ResNet_v2_atrous_coco, device: [ GPU ] }, # leave only GPU as conversion for CPU takes more than 13 minutes and this model needs to be switched to new FE (CVS-99381) + { model: TF_Faster_RCNN_Inception_ResNet_v2_atrous_lowproposals_coco, device: [ GPU ] }, # leave only GPU as conversion for CPU takes more than 13 minutes and this model needs to be switched to new FE (CVS-99381) + { model: TF_Mask_RCNN_Inception_ResNet_v2_atrous_coco, device: [ GPU ] }, # leave only GPU as conversion for CPU takes more than 13 minutes and this model needs to be switched to new FE (CVS-99381) + ], + + filter_by: model + } +] diff --git a/tests/e2e_tests/test_base.py b/tests/e2e_tests/test_base.py new file mode 100644 index 00000000000000..66e5e773269b59 --- /dev/null +++ b/tests/e2e_tests/test_base.py @@ -0,0 +1,159 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Main entry-point to run E2E OSS tests. + +Default run: +$ pytest test.py + +Options[*]: +--modules Paths to tests +--env_conf Path to environment config +--test_conf Path to test config + +[*] For more information see conftest.py +""" +# pylint:disable=invalid-name +import logging as log +import os +import re +from pathlib import Path +from shutil import rmtree + +import yaml +from e2e_tests.common.logger import get_logger +from e2e_tests.common.parsers import pipeline_cfg_to_string +from e2e_tests.test_utils.test_utils import log_timestamp, read_irs_mapping_file, get_ir_tag, check_mo_precision, \ + set_infer_precision_hint, remove_mo_args_oob, store_data_to_csv, timestamp +from e2e_tests.common.common.pipeline import Pipeline +from e2e_tests.common.comparator.container import ComparatorsContainer + +pytest_plugins = ('e2e_tests.common.plugins.e2e_test.conftest',) + +log = get_logger(__name__) + + +def _test_run(instance, pregen_irs, record_property, prepare_test_info, inference_precision_hint): + """Parameterized test. + + :param instance: test instance + :param pregen_irs: custom fixture. Provides path to a CSV-formatted file with IRs mapping + :param record_property: extra property for the calling test + :param instance: test instance + """ + # Name of tests group + prepare_test_info['pytestEntrypoint'] = 'E2E TF Hub: Base' + + ir_version = "v11" + + log.info("Running {test_id} test".format(test_id=instance.test_id)) + instance.prepare_prerequisites() + + log.debug("Test scenario:") + instance_model_pipeline = instance.model + model_pipeline = Pipeline(instance_model_pipeline) + log.debug("Model Pipeline:\n{}".format(pipeline_cfg_to_string(model_pipeline._config))) + model_pipeline.run() + model = model_pipeline.fetch_results() + + instance_input_pipeline = instance.input + input_pipeline = Pipeline(instance_input_pipeline, {'model_obj': model}) + log.debug("Input Pipeline:\n{}".format(pipeline_cfg_to_string(input_pipeline._config))) + input_pipeline.run() + feed_dict = input_pipeline.fetch_results() + + instance_ie_pipeline = instance.ie_pipeline + instance_ref_pipeline = instance.ref_pipeline + + ref_pipeline = Pipeline(instance_ref_pipeline, {'model_obj': model, 'feed_dict': feed_dict}) + + log.debug("Reference Pipeline:\n{}".format(pipeline_cfg_to_string(ref_pipeline._config))) + if ref_pipeline.steps: + with log_timestamp('reference pipeline'): + log.info("Running reference pipeline:") + ref_pipeline.run() + else: + log.warning("Reference pipeline is empty, no results comparison will be performed") + + log.info("Running inference pipeline:") + if pregen_irs and "get_ir" in instance_ie_pipeline: + try: + log.info("Searching pre-generated IR in IR's mapping: {} ...".format(pregen_irs)) + irs_mapping = read_irs_mapping_file(pregen_irs) + instance.required_params = {"sequence_length": instance.sequence_length} if type( + instance.sequence_length) == int else {} + ir_tag = get_ir_tag(instance.__class__.__name__, ir_version, instance.precision, + instance.batch, instance.required_params.get("sequence_length", None)) + if ir_tag not in irs_mapping: + log.warning("IR with tag '{}' not found in IRs mapping. " + "IR will be generated in runtime ...".format(ir_tag)) + else: + log.info("Found pre-generated IR entry in IRs mapping: {}.\nTrying to reuse it ..." + .format({ir_tag: irs_mapping[ir_tag]})) + pregen_ir_status, mo_log, xml, bin = irs_mapping[ir_tag] + if not pregen_ir_status: + log.error('IR pre-generation failed. IR will be generated in runtime ...') + else: + if not mo_log: + log.warning('IR was collected successfully, but MO log was not saved.') + else: + with open(mo_log, "r") as file: + mo_output = file.read() + log.info("Model Optimizer output:\n{output}".format(output=mo_output)) + if not (Path(xml).exists() and Path(bin).exists()): + log.error("One of IR's .xml or .bin files not found. IR will be generated in runtime ...") + else: + ir_http_path = "http://{}".format(re.sub(r"^[/|\\]+", "", str(xml)).replace("\\", "/")) + record_property("ir_link", ir_http_path) + instance_ie_pipeline["get_ir"] = {"pregenerated": {"xml": xml, "bin": bin}} + except Exception as ex: + log.error("Search of pre-generated IR failed with error: {err}." + " IR will be generated in runtime ...".format(err=ex)) + + check_mo_precision(instance_ie_pipeline) + + if instance_ie_pipeline.get('infer'): + instance_ie_pipeline = set_infer_precision_hint(instance, instance_ie_pipeline, inference_precision_hint) + ie_pipeline = Pipeline(instance_ie_pipeline, {'model_obj': model, 'feed_dict': feed_dict}) + log.debug("Inference Pipeline:\n{}".format(pipeline_cfg_to_string(ie_pipeline._config))) + ie_pipeline.run() + + comparators = ComparatorsContainer( + config=instance.comparators, + infer_result=ie_pipeline.fetch_results(), + reference=ref_pipeline.fetch_results(), + result_aligner=getattr(instance, 'align_results', None, ), + ) + + log.info("Running comparators:") + with log_timestamp('comparators'): + comparators.apply_postprocessors() + comparators.apply_all() + status = comparators.report_statuses() + assert status, "inferred model results != reference results" + + +def empty_dirs(env_conf): + test_config = None + with open(env_conf, 'r') as fd: + test_config = yaml.load(fd, Loader=yaml.FullLoader) + + for env_clean_dir_flag, test_cfg_dir_to_clean in [("TT_CLEAN_MO_OUT_DIR", 'mo_out'), + ("TT_CLEAN_PREGEN_IRS_DIR", 'pregen_irs_path'), + ("TT_CLEAN_INPUT_MODEL_DIR", 'input_model_dir')]: + clean_flag = True if os.environ.get(env_clean_dir_flag, 'False') == 'True' else False + if clean_flag: + dir_to_clean = test_config.get(test_cfg_dir_to_clean, '') + if os.path.exists(dir_to_clean): + log.info(f"Clear {dir_to_clean} dir") + rmtree(dir_to_clean) + + +def test_run(instance, pregen_irs, record_property, prepare_test_info, copy_input_files, env_conf, + inference_precision_hint): + try: + _test_run(instance, pregen_irs, record_property, prepare_test_info, inference_precision_hint) + except Exception as ex: + raise Exception(f'{timestamp()}') from ex + finally: + empty_dirs(env_conf) diff --git a/tests/e2e_tests/test_config_local.yml b/tests/e2e_tests/test_config_local.yml new file mode 100644 index 00000000000000..1bd7b13f8aef83 --- /dev/null +++ b/tests/e2e_tests/test_config_local.yml @@ -0,0 +1,12 @@ +device: + - CPU +precision: + - FP32 +# LSTM specific +sequence_length: + - 1 +# Kaldi specific +qb: + - 8 +device_mode: + - GNA_AUTO diff --git a/tests/e2e_tests/test_utils/__init__.py b/tests/e2e_tests/test_utils/__init__.py new file mode 100644 index 00000000000000..8ba81a92b19c53 --- /dev/null +++ b/tests/e2e_tests/test_utils/__init__.py @@ -0,0 +1,3 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + diff --git a/tests/e2e_tests/test_utils/coverage_runner.py b/tests/e2e_tests/test_utils/coverage_runner.py new file mode 100644 index 00000000000000..302da041e84639 --- /dev/null +++ b/tests/e2e_tests/test_utils/coverage_runner.py @@ -0,0 +1,90 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +#!/usr/bin/env python3 +""" + Copyright (c) 2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +""" + + +import argparse +import os +import sys +import subprocess +from glob import iglob + +parser = argparse.ArgumentParser(description="Runner for coverage measurement " + "for each tests explicitly") +parser.add_argument("-f", "--test-filters", "--test_filters", nargs="+", + help="Tests for which we want measure coverage separately", + required=True) +parser.add_argument("--py-cov-path", "--cov", + help="Path to python module which coverage is inspected.") +parser.add_argument("--py-cov-config", "--cov_config", + help="Path to python coverage configuration file") +parser.add_argument("--c-gcov-notes", + help="Path to gcov notes directory (C/C++ coverage)") +parser.add_argument("--output-dir", "--output_dir", + help="Path to directory where coverage info will be stored.", + default=os.path.join(os.getcwd(), "..", "reports")) +args = parser.parse_args() + + +def run_coverage(): + if not os.path.isdir(args.output_dir): + os.makedirs(args.output_dir) + + # run each tests separately + for test in args.test_filters: + env = dict(os.environ) + if args.c_gcov_notes: + with open(f"{args.c_gcov_notes}/build_prefix.txt") as f: + build_prefix = f.read().strip() + env["GCOV_PREFIX"] = args.c_gcov_notes + env["GCOV_PREFIX_STRIP"] = str(len(build_prefix.strip("/").split("/"))) + subprocess.run( + [ + "pytest", "collect_irs.py", + "-k", test, "-m", "not launch_only_if_manually_specified", + "--env_conf", ".automation/env_config.yml", + "--test_conf", ".automation/test_configs/coverage_test_config.yml", + "--modules", "pipelines", "-s", "--tb=native", + "--log-cli-level", "INFO", + "--pregen_irs", "irs_mapping.csv", + "--cov-report", f"xml:{args.output_dir}/{test}.xml", + "--cov", args.py_cov_path, + "--cov-config", args.py_cov_config + ], + cwd=f"{os.path.dirname(os.path.realpath(__file__))}/..", + env=env + ) + if args.c_gcov_notes: + output = f"{args.output_dir}/{test}.info" + subprocess.run([ + "grcov", "-t", "lcov", args.c_gcov_notes, + "--ignore", "/usr/*", + "--ignore", "*tbb*", + "--ignore", "*.inc", + "--ignore", "**/*thirdparty/pugixml*", + "-o", output + ]) + + # clean coverage data for the next test + for item in iglob(f"{args.c_gcov_notes}/**/*.gcda", recursive=True): + os.remove(item) + + +if __name__ == "__main__": + sys.exit(run_coverage()) diff --git a/tests/e2e_tests/test_utils/env_tools.py b/tests/e2e_tests/test_utils/env_tools.py new file mode 100644 index 00000000000000..136f483f50d88d --- /dev/null +++ b/tests/e2e_tests/test_utils/env_tools.py @@ -0,0 +1,44 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from pathlib import Path + + +class EnvironmentConfigException(Exception): + """ Environment configuration exception """ + + +class Environment: + """ + Environment used by tests. + + :attr env: environment dictionary. populated dynamically from environment + configuration file. + """ + + env = {} + locked_dirs = [] + + @classmethod + def abs_path(cls, env_key, *paths): + """Construct absolute path by appending paths to environment value. + + :param cls: class + :param env_key: Environment.env key used to get the base path + :param paths: paths to be appended to Environment.env value + :return: absolute path string where Environment.env[env_key] is + appended with paths + """ + if not cls.env: + raise EnvironmentConfigException( + "Test environment is not initialized. " + "Please initialize environment by calling `fix_env_conf` function before usage." + ) + + if env_key not in cls.env: + raise EnvironmentConfigException( + f"Key {env_key} is absent in environment dictionary: {cls.env}\n" + f"Please check environment configuration file." + ) + + return str(Path(cls.env[env_key], *paths)) diff --git a/tests/e2e_tests/test_utils/get_test_info.py b/tests/e2e_tests/test_utils/get_test_info.py new file mode 100644 index 00000000000000..b8c02151fffcf1 --- /dev/null +++ b/tests/e2e_tests/test_utils/get_test_info.py @@ -0,0 +1,66 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from collections import defaultdict + + +class TestInfo: + extra_info = None + e2e_models_info = None + + def fill_mo_args(self, **mo_params): + if self.extra_info is None: + self.extra_info = defaultdict(list) + if self.e2e_models_info is None: + self.e2e_models_info = defaultdict(list) + + if self.extra_info.get('model_optimizer_args', None) is None: + self.extra_info.update({'model_optimizer_args': {}}) + + for mo_key in mo_params.keys(): + if mo_key == 'additional_args': + for add_arg, add_val in mo_params['additional_args'].items(): + self.fill_mo_args(**{add_arg: add_val}) + else: + self.extra_info['model_optimizer_args'].update({mo_key: str(mo_params[mo_key])}) + + def fill_extra_info(self, op_type, name, op, shape, dtype): + if self.extra_info is None: + self.extra_info = defaultdict(list) + if self.e2e_models_info is None: + self.e2e_models_info = defaultdict(list) + + if op_type == 'Const': + self.extra_info['Constants'].append({'name': name, + 'op': op, + 'shape': shape, + 'dtype': dtype}) + + if op_type == 'FakeConst': + self.extra_info['FakeConstants'].append({'name': name, + 'op': op, + 'shape': shape, + 'dtype': dtype}) + + if op_type == 'Variable': + self.extra_info['Variables'].append({'name': name, + 'op': op, + 'shape': shape, + 'dtype': dtype}) + + if op_type == 'Input': + self.extra_info['Inputs'].append({'name': name, + 'op': op, + 'shape': shape, + 'dtype': dtype}) + if op_type == 'Intermediate': + self.extra_info['Intermediates'].append({'name': name, + 'op': op, + 'shape': shape, + 'dtype': dtype}) + + if op_type == 'Output': + self.extra_info['Outputs'].append({'name': name, + 'op': op, + 'shape': shape, + 'dtype': dtype}) diff --git a/tests/e2e_tests/test_utils/modify_configs.py b/tests/e2e_tests/test_utils/modify_configs.py new file mode 100644 index 00000000000000..d848595befa80f --- /dev/null +++ b/tests/e2e_tests/test_utils/modify_configs.py @@ -0,0 +1,129 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from collections import OrderedDict +from copy import deepcopy + +from e2e_tests.test_utils.reshape_tests_utils import get_mo_input_with_frozen_values, reorder_shapes_to_old_api, \ + get_input_data +from e2e_tests.test_utils.test_utils import prepare_data_consecutive_inferences + + +def mo_reshape_config(pipeline, shapes, instance_class_name): + mo_config = deepcopy(pipeline) + + # we force model optimizer to generate already reshaped IR + mo_shape = deepcopy(shapes) # shapes: dict(name=list(shape_in_IE_layout)) + mo_arg_input = pipeline['get_ir']['get_ovc_model'].get('additional_args').get('input') + + if mo_arg_input is not None and "->" in mo_arg_input: + cmd_mo_input = get_mo_input_with_frozen_values(mo_arg_input, shapes) + mo_config['get_ir']['get_ovc_model']['additional_args'].update({'input': ','.join(list(map(str, cmd_mo_input)))}) + else: + mo_config['get_ir']['get_ovc_model']['additional_args'].update({ + 'input': ','.join(list(map(str, mo_shape.keys()))), + 'input_shape': ','.join(list(map(str, mo_shape.values()))), + }) + # prevent MO reshape keys usage + for attribute in ['batch']: + if attribute in mo_config['get_ir']['get_ovc_model']['additional_args']: + del mo_config['get_ir']['get_ovc_model']['additional_args'][attribute] + # prevent IE network modifications + infer_step_name = list(mo_config['infer'].keys())[0] + if 'network_modifiers' in mo_config["infer"][infer_step_name]: + del mo_config["infer"][infer_step_name]['network_modifiers'] + + mo_config = update_pre_post_process_reshape_config(mo_config, shapes, instance_class_name) + + return mo_config + + +def ie_reshape_config(pipeline, shapes, test_name): + ie_config = deepcopy(pipeline) + ie_shapes = deepcopy(shapes) + + if test_name.lower().startswith('tf') and 'ie_sync' in pipeline['infer']: + ie_shapes = reorder_shapes_to_old_api(shapes) + + # we force model optimizer to generate reshapable IR + ie_config['infer'][list(ie_config['infer'].keys())[0]]['network_modifiers'] = {'reshape': {'shapes': ie_shapes}} + ie_config = update_pre_post_process_reshape_config(ie_config, shapes, test_name) + + return ie_config + + +def update_pre_post_process_reshape_config(instance_ie_pipeline, shapes, instance_class_name, default_shapes=None, + changed_values=None, layout=None, changed_dims=None, + consecutive_infer=False): + config = deepcopy(instance_ie_pipeline) + ie_api = next(iter(config['infer'])) + + # preprocess stage + config['infer'][ie_api]['consecutive_infer'] = consecutive_infer + if 'preprocess' not in config: + stages = OrderedDict() + for stage in config: + stages[stage] = config[stage] + if stage == 'read_input': + stages['preprocess'] = OrderedDict() + config.clear() + config = stages + # There is no need to reorder 'default_shapes' since we do not run old API for dynamism + if instance_class_name.lower().startswith('tf') and 'ie_sync' in instance_ie_pipeline['infer']: + shapes = reorder_shapes_to_old_api(shapes) + if not consecutive_infer: + config['preprocess'].update(get_input_data(shapes)) + else: + config['preprocess'].update(prepare_data_consecutive_inferences(default_shapes, changed_values, layout, + changed_dims)) + + # postprocess stage + if 'postprocessor' in instance_ie_pipeline: + shape = iter(shapes.values()).__next__() + for action_name, action_attrs in instance_ie_pipeline['postprocessor'].items(): + if 'batch' in action_attrs: + action_attrs['batch'] = shape[0] + return config + + +def dynamism_config(instance_ie_pipeline, shapes, test_name, default_shapes, changed_values, layout, changed_dims, + consecutive_infer_num): + dynamic_config = deepcopy(instance_ie_pipeline) + + reshape_action_list = ['set_batch_using_reshape', 'reshape'] + infer_network_modifiers = {} + if dynamic_config['infer'][list(dynamic_config['infer'].keys())[0]].get('network_modifiers'): + for item in dynamic_config['infer'][list(dynamic_config['infer'].keys())[0]]['network_modifiers']: + if item not in reshape_action_list: + infer_network_modifiers[item] = \ + dynamic_config['infer'][list(dynamic_config['infer'].keys())[0]]['network_modifiers'][item] + + dynamic_config['infer'][list(dynamic_config['infer'].keys())[0]]['network_modifiers'] = { + 'reshape': {'shapes': shapes}} + dynamic_config['infer'][list(dynamic_config['infer'].keys())[0]]['network_modifiers'].update( + infer_network_modifiers) + if consecutive_infer_num: + dynamic_config = update_pre_post_process_reshape_config(dynamic_config, shapes, test_name, default_shapes, + changed_values, layout, changed_dims, + consecutive_infer_num) + + return dynamic_config + + +def get_original_model_importer_pipeline_config(instance_ie_pipeline): + """ + This function configures the pipeline which produces the results to be tested. + In this pipeline the ONNX model is loaded into IE directly from a .onnx file without MO + The network created from a model is then reshaped according to the configuration in 'shapes' + """ + ie_config = deepcopy(instance_ie_pipeline) + model_path = ie_config["get_ir"]["get_ovc_model"]["model"] + + # discard the IR generation with MO which comes from the original pipeline + del ie_config["get_ir"] + + # reconfigure pipeline to use IE ONNX reader step instead of default IE step + ie_api = next(iter(ie_config["infer"])) + ie_config["infer"][ie_api]["model_path"] = model_path + + return ie_config diff --git a/tests/e2e_tests/test_utils/path_utils.py b/tests/e2e_tests/test_utils/path_utils.py new file mode 100644 index 00000000000000..e3ac9355051493 --- /dev/null +++ b/tests/e2e_tests/test_utils/path_utils.py @@ -0,0 +1,227 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Path utils used across E2E tests framework.""" + +import datetime +import getpass +import hashlib +import logging as log +import math +import os +import re +import socket +import sys +import time +from contextlib import contextmanager +from glob import iglob +from pathlib import Path, PurePath +from typing import Union + +log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) + + +@contextmanager +def import_from(path): + """ + Import decorator to resolve module import issues + """ + path = os.path.abspath(os.path.realpath(path)) + sys.path.insert(0, path) + yield + sys.path.remove(path) + + +def resolve_file_path(file: str, as_str=False): + """Return absolute file path checking if it exists in the process.""" + path = Path(file).resolve() + if not path.is_file(): + raise FileNotFoundError("{} doesn't exist".format(path)) + if as_str: + return str(path) + return path + + +def resolve_dir_path(file: str, as_str=False): + """Return absolute directory path if it exists in the process.""" + path = Path(file).resolve() + if not path.is_dir(): + raise FileNotFoundError("{} doesn't exist".format(path)) + if as_str: + return str(path) + return path + + +def is_absolute(path: str): + """Check if given path is an absolute path.""" + return Path(path).is_absolute() + + +def is_writable(path: str): + """Check if given path has write access.""" + return os.access(path, os.W_OK) + + +def prepend_with_env_path(config_key, *paths): + """Prepend given paths with base path specified in env_config.yml for given config_key""" + # Local import to avoid circular dependency + from e2e_tests.test_utils.env_tools import Environment + return Environment.abs_path(config_key, *paths) + + +def search_model_path_recursively(config_key, model_name): + from e2e_tests.test_utils.env_tools import Environment + search_pattern = Environment.abs_path(config_key) + '/**/' + model_name + path_found = list(iglob(search_pattern, recursive=True)) + if len(path_found) == 1: + return path_found[0] + elif len(path_found) == 0: + raise FileNotFoundError("File not found for pattern {}".format(search_pattern)) + else: + raise ValueError("More than one file with {} name".format(model_name)) + + +def proto_from_model(caffemodel): + """Construct .prototxt path from model.caffemodel path.""" + return str(PurePath(caffemodel).with_suffix(".prototxt")) + + +def ref_from_model(model_name, framework, opset="", check_empty_ref_path=True, extension=".npz"): + """Construct reference path from model base name.""" + ref_filename = os.path.splitext(os.path.basename(model_name))[ + 0] + extension # split is needed in case filename contains . symbol + ref_path = prepend_with_env_path("references", framework, opset, ref_filename) + if check_empty_ref_path and not os.path.isfile(ref_path): + ref_path = prepend_with_env_path("references_repo", framework, opset, ref_filename) + return ref_path + + +def symbol_from_model(mxnetmodel): + """Construct symbolic graph path from mxnet model path.""" + # If mxnet model contains -NNNN patter (epochs number) it will be stripped + if re.search(r"(-[0-9]{4})", mxnetmodel): + return os.path.splitext(mxnetmodel)[0][:-5] + '-symbol.json' + else: + return os.path.splitext(mxnetmodel)[0] + '-symbol.json' + + +def md5(file_path): + hash_md5 = hashlib.md5() + if not os.path.exists(file_path): + return None + with open(file_path, "rb") as f: + for chunk in iter(lambda: f.read(4096), b""): + hash_md5.update(chunk) + return hash_md5.hexdigest() + + +class DirLockingHandler: + + def __init__(self, target_dir): + self.target_dir = resolve_dir_path(target_dir, as_str=True) + self.fallback_dir = os.getcwd() + self.writable = is_writable(self.target_dir) + self._lock_file = Path(self.target_dir) / '.lock' + + def is_locked(self): + return self._lock_file.exists() + + def lock(self): + # Local import to avoid cyclic import + from e2e_tests.test_utils.env_tools import Environment + if self.writable: + if not self.is_locked(): + log.info("Marking {} directory as locked".format(self.target_dir)) + self._lock_file.touch(exist_ok=False) + Environment.locked_dirs.append(self.target_dir) + lock_info = "Locked at {} by host {} process PID {} running under {}".format(datetime.datetime.now(), + socket.gethostname(), + os.getpid(), + getpass.getuser()) + self._lock_file.write_text(lock_info) + else: + raise PermissionError( + "Failed to lock target directory {} because it's not writable!".format(self.target_dir)) + + def unlock(self): + # Local import to avoid cyclic import + from e2e_tests.test_utils.env_tools import Environment + if self.is_locked(): + self._lock_file.unlink() + if self.target_dir in Environment.locked_dirs: + Environment.locked_dirs.remove(self.target_dir) + log.info("Marking {} directory as unlocked".format(self.target_dir)) + else: + log.warning("Target directory {} is not locked".format(self.target_dir)) + + def execute_after_unlock(self, max_wait_time: int = 600, + exec_after_unlock: callable = lambda *args, **kwargs: log.info("Directory unlocked"), + fallback_to_cwd=True, + *args, + **kwargs): + wait_iters = math.ceil(max_wait_time / 30) + if self.is_locked(): + log.info("Target directory {} locked".format(self.target_dir)) + for i in range(wait_iters): + if self.is_locked(): + log.info("[{}] Waiting for directory unlocking".format(i + 1)) + time.sleep(30) + else: + self.lock() + try: + exec_after_unlock(*args, **kwargs) + except Exception as e: + log.error(str(e)) + finally: + self.unlock() + break + else: + if self.is_locked(): + if not fallback_to_cwd: + raise TimeoutError( + "Timeout exceeded. Directory {} was not unlocked after {} seconds.".format(self.target_dir, + max_wait_time)) + else: + # TODO: think about fallback latter + pass + + +def get_abs_path(entry: Union[str, Path]) -> Path: + """ Return pathlib.Path object representing absolute path for the entry """ + try: + path = Path(entry).expanduser().absolute() + except TypeError as type_error: + raise TypeError(f'"{entry}" is expected to be a path-like') from type_error + return path + + +def get_rel_path(entry: Union[str, Path], start_path: Union[str, Path]) -> Path: + """ Return pathlib.Path object representing path for the entry relative to start_path """ + return Path(entry).resolve().relative_to(Path(start_path).resolve()) + + +def get_dir_path(entry: Union[str, Path]) -> Path: + """Return pathlib.Path object representing + - absolute path for the entry if entry is directory, + - absolute path for the entry.parent if entry is file + """ + path = get_abs_path(entry) + return path if path.is_dir() else path.parent + + +def get_ir(search_dir: Path, model_name: str) -> dict: + """Look for IR (xml/bin files) with specified model_name in specified search_dir, return dict + with absolute paths to IR components if exist or empty dict otherwise, for example: + { model: /.xml, weights: /.bin } + """ + ir = {} + + filename_pattern = model_name or "*" + models_list = list(search_dir.glob(f"{filename_pattern}.xml")) + if models_list: + model = get_abs_path(models_list[0]) + weights = model.with_suffix(".bin") + if weights.exists(): + ir = {"model": model, "weights": weights} + + return ir diff --git a/tests/e2e_tests/test_utils/pytorch_loaders.py b/tests/e2e_tests/test_utils/pytorch_loaders.py new file mode 100644 index 00000000000000..23dcc785a516c5 --- /dev/null +++ b/tests/e2e_tests/test_utils/pytorch_loaders.py @@ -0,0 +1,107 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import importlib +import os +import sys + +import torch + + +class LoadPyTorchModel: + def __init__(self, module: str, args: dict, inputs: dict): + self.module = module + self.args = args + self.export = args.get('torch_export_method') + self.inputs_order = args.get('inputs_order') + self.inputs = inputs + self.model = None + + def load_model(self): + self.model = loader_map[self.module](self.module, self.args) + self.model.eval() + if self.export == 'trace': + self.inputs = self._convert_inputs() + self.model = self._trace() + if self.export == 'export': + self.model = self._script() + return self.model + + def _trace(self): + assert self.model, "Model should be loaded through 'load_model" + self.model = torch.jit.trace(self.model, self.inputs, strict=False) + return self.model + + def _script(self): + assert self.model, "Model should be loaded through 'load_model" + self.model = torch.jit.script(self.model, self.inputs) + return self.model + + def _convert_inputs(self): + helper = [] + if self.inputs_order: + for input_name in self.inputs_order: + helper.append(self.inputs[input_name]) + else: + helper = list(self.inputs.values()) + + return helper + + +def load_torchvision_model(module, args): + module = importlib.import_module(module) + + creator = getattr(module, args['model-name']) + model = creator(**args['model-param'], pretrained=True) + + return model + + +def load_cadene_model(module, args): + import ssl + ssl._create_default_https_context = ssl._create_unverified_context + + module = importlib.import_module(module) + creator = getattr(module, args['model-name']) + model = creator(**args['model-param']) + + return model + + +def load_hugging_face_model(module, args): + module = importlib.import_module(module) + model = module.AutoModel.from_pretrained(args['model-name'], torchscript=True) + + return model + + +def load_timm_model(module, args): + module = importlib.import_module(module) + model = module.create_model(args['model-name'], pretrained=True) + + return model + + +def load_saved_jit_model(module, args): + module = importlib.import_module('torch') + return module.jit.load(args['model-path']) + + +def load_saved_model(module, args): + if args.get('model_class_path'): + sys.path.insert(0, os.path.abspath(args['model_class_path'])) + module = importlib.import_module(module) + return module.load(args['model-path']) + + +loader_map = { + 'torchvision.models': load_torchvision_model, + 'torchvision.models.detection': load_torchvision_model, + 'torchvision.models.optical_flow': load_torchvision_model, + 'pretrainedmodels': load_cadene_model, + 'pretrained': load_cadene_model, + 'transformers': load_hugging_face_model, + 'timm': load_timm_model, + 'torch_jit': load_saved_jit_model, + 'torch': load_saved_model +} diff --git a/tests/e2e_tests/test_utils/reshape_pipeline_executers.py b/tests/e2e_tests/test_utils/reshape_pipeline_executers.py new file mode 100644 index 00000000000000..bd3d27e9a09196 --- /dev/null +++ b/tests/e2e_tests/test_utils/reshape_pipeline_executers.py @@ -0,0 +1,25 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import logging as log + +from e2e_tests.test_utils.modify_configs import ie_sbs_reshape_config, ie_reshape_config, mo_reshape_config +from e2e_tests.common.common.pipeline import Pipeline + + +def ie_pipeline_runner(instance_ie_pipeline, shapes, test_name): + log.info('Executing IE reshape pipeline for {}'.format(test_name)) + ie_reshape_pipeline = ie_reshape_config(instance_ie_pipeline, shapes, test_name) + ie_reshape_pipeline = Pipeline(ie_reshape_pipeline) + ie_reshape_pipeline.run() + + return ie_reshape_pipeline + + +def mo_pipeline_runner(instance_ie_pipeline, shapes, test_name): + log.info('Executing MO reshape pipeline for {}'.format(test_name)) + mo_reshape_pipeline = mo_reshape_config(instance_ie_pipeline, shapes, test_name) + mo_reshape_pipeline = Pipeline(mo_reshape_pipeline) + mo_reshape_pipeline.run() + + return mo_reshape_pipeline diff --git a/tests/e2e_tests/test_utils/reshape_tests_utils.py b/tests/e2e_tests/test_utils/reshape_tests_utils.py new file mode 100644 index 00000000000000..5d5cfab6731d2b --- /dev/null +++ b/tests/e2e_tests/test_utils/reshape_tests_utils.py @@ -0,0 +1,320 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import copy +import logging as log +import re +from collections import defaultdict +from itertools import zip_longest +from types import SimpleNamespace + +import numpy as np + +from e2e_tests.common.test_utils import name_aligner +from e2e_tests.pipelines.pipeline_templates.comparators_template import dummy_comparators, eltwise_comparators +from e2e_tests.common.comparator.container import ComparatorsContainer + + +def should_run_reshape(instance) -> bool: + if not hasattr(instance, 'ie_pipeline'): + # test does not involve IE + return False + + if 'infer' not in instance.ie_pipeline: + # test does not involve Infer step + return False + + if 'get_ovc_model' not in instance.ie_pipeline['get_ir']: + # can not reshape without `mo` + return False + + if hasattr(instance, 'model_info') and instance.model_info.framework != 'dldt': + # downloader models with IRs only are not tested by reshape + return False + + if not hasattr(instance, 'input_descriptor'): + # no info for reshape was provided + log.info('Please, specify input_descriptor attribute for {}'.format(instance)) + return False + + if all([v.get('changeable_dims') is None for v in instance.input_descriptor.values()]): + # model was set as non-reshape-able + return False + + return True + + +def get_reshape_pipeline_pairs(instance) -> list: + supported_pipelines = ['MO', 'IE'] + types = getattr(instance, 'requested_reshape_types', supported_pipelines) + + if len(types) == 1 or isinstance(types, str): + log.info(f'Only {types} reshape pipeline was set for {instance.__class__.__name__}') + return [types] + else: + pipelines_pairs = [] + for pipeline in types[1:]: + pipelines_pairs.append([types[0], pipeline]) + return pipelines_pairs + + +def check_config(default_shapes, layout, changeable_dims): + for input_layer, layer_value in changeable_dims.items(): + + assert len(layout[input_layer]) == len(default_shapes[input_layer]), \ + 'Layout {} and default_shapes {} of layer "{}"' \ + ' must have the same number of values'.format( + layout[input_layer], default_shapes[input_layer], input_layer) + + if layer_value is not None: + for dimension in layer_value: + assert dimension in layout[input_layer], \ + "Dimension '{}' wasn't found in input '{}'" \ + " layout: {}".format(dimension, input_layer, layout[input_layer]) + + for data in layer_value[dimension]: + assert len(data) == len(dimension), \ + 'Number of values {} for dimension "{}" in input layer "{}" should be the same' \ + " as length of changeable dimension".format(data, dimension, input_layer) + + +def get_dims_to_change(changeable_dims): + dims_to_change = defaultdict(list) + for layer_name in changeable_dims: + if changeable_dims[layer_name] is None: + dims_to_change[layer_name].append(None) + else: + for dim in changeable_dims[layer_name]: + for _ in range(len(changeable_dims[layer_name][dim])): + dims_to_change[layer_name].append(dim) + + dims_to_change = refactor_values(dims_to_change) + + return dims_to_change + + +def get_values_to_change(changeable_dims): + values_to_change = defaultdict(list) + for layer_name in changeable_dims: + if changeable_dims[layer_name] is None: + values_to_change[layer_name].append(None) + else: + for dim in changeable_dims[layer_name]: + for value in changeable_dims[layer_name][dim]: + values_to_change[layer_name].append(value) + + return values_to_change + + +def refactor_values(values): + refactored_values = lambda x: list(zip_longest(*x.values())) + return [tuple(zip(values.keys(), obj)) for obj in refactored_values(values)] + + +def construct_new_shapes(test_number, default_shapes, layout, dims_to_change, values_to_change, dynamism_type=False): + reshape_config = {k: list(v) for k, v in default_shapes.items()} + for input_layer, input_dimension in dims_to_change[test_number]: + if input_dimension is None: + reshape_config[input_layer] = default_shapes[input_layer] + else: + # list comprehension is necessary in cases when multiply dimensions was set like 'HW' + dim_indexes = [layout[input_layer].index(d) for d in input_dimension] + # we should use default values if None is set as value + for value_index, value in enumerate(values_to_change[input_layer][test_number]): + if value is None: + continue + else: + if not dynamism_type or dynamism_type == 'None': + reshape_config[input_layer][dim_indexes[value_index]] = value + if dynamism_type == 'negative_ones': + reshape_config[input_layer][dim_indexes[value_index]] = -1 + if dynamism_type == 'range_values': + reshape_config[input_layer][dim_indexes[value_index]] = sorted([ + default_shapes[input_layer][dim_indexes[value_index]], value]) + + return reshape_config + + +def get_reshape_configurations(reshape_test_case, dynamism_type) -> list: + """ + This function returns list of reshape configurations. + + Reshape configuration here is a list with info for reshape. + It has the following structure: + 1. shapes: {input_layer: [input_layer_shapes], next_input_layer: [next_input_layer_shapes]} + 2. dimensions are supposed to be changed: {input_layer: dimension, next_input_layer: dimension} + 3. layout: layout of each input layer + 4. default shapes: dictionary with input layer names and its shapes + """ + input_descriptor = reshape_test_case.input_descriptor + + default_shapes = {k: v['default_shape'] for k, v in input_descriptor.items() if not v.get('frozen_input')} + layout = {k: v['layout'] for k, v in input_descriptor.items() if not v.get('frozen_input')} + changeable_dims = {k: v['changeable_dims'] for k, v in input_descriptor.items() if not v.get('frozen_input')} + check_config(default_shapes, layout, changeable_dims) + + reshape_configurations = [] + + # get input layer-changed dimensions pairs for each specified shape value + dims_to_change = get_dims_to_change(changeable_dims) + # construct matrix of shape values for each input layer + values_to_change = get_values_to_change(changeable_dims) + # number of tests is number of input layer-changed dimensions pairs + number_of_tests = len(dims_to_change) + refactored_values = refactor_values(values_to_change) + + # construct new shapes from layer-changed dimensions pairs and matrix of values + for test in range(number_of_tests): + reshape_config = construct_new_shapes(test, default_shapes, layout, dims_to_change, + values_to_change, dynamism_type) + reshape_configurations.append(SimpleNamespace(shapes=reshape_config, changed_dims=dict(dims_to_change[test]), + layout=layout, default_shapes=default_shapes, + changed_values=refactored_values[test])) + + return reshape_configurations + + +def get_input_data(shapes): + return {'dynamism_preproc': {'execution_function': lambda data: replicator(data, shapes)}} + + +def batch_was_changed(shapes, changed_dims, layout, default_shapes): + batch = None + + for layer, dimension in changed_dims.items(): + if dimension is None: + continue + if len(dimension) > 1: + continue + index = layout[layer].index(dimension) + # we assume that batch index is always == 0 + if index != 0: + continue + if shapes[layer][index] != default_shapes[layer][index]: + batch = shapes[layer][index] + + return batch + + +def compare(instance, ref_results, cur_results): + assert len(instance.comparators) == 1 or "dummy" not in instance.comparators, \ + "Dummy comparator is not the only one in comparators of instance" + + if not ref_results: + ref_results = {} + instance.comparators = dummy_comparators() + else: + instance.comparators = eltwise_comparators(device=getattr(instance, 'device'), + precision=getattr(instance, 'precision'), + a_eps=getattr(instance, 'a_eps', None), + r_eps=getattr(instance, 'r_eps', None)) + + cur_results = cur_results.fetch_results() + cur_results = cur_results if type(cur_results) is list else [cur_results] + statuses = [] + for ref_result, cur_result in zip(ref_results, cur_results): + comparators = ComparatorsContainer( + config=instance.comparators, + infer_result=cur_result, + reference=ref_result.fetch_results(), + result_aligner=name_aligner, + ) + + log.info('Running comparators:') + comparators.apply_postprocessors() + comparators.apply_all() + statuses.append(comparators.report_statuses()) + + return all(statuses) + + +def reorder_shapes_to_old_api(shapes): + reorder_shapes = copy.deepcopy(shapes) + for k, v in shapes.items(): + if len(v) in [4, 5]: + reorder_shapes[k] = tuple(np.array(v).take((0, len(v) - 1, *list(range(1, len(v) - 1))))) + return reorder_shapes + + +def get_mo_input_with_frozen_values(mo_arg_input, shapes): + cmd_mo_input = [] + for input in mo_arg_input.split(","): + if "->" in input: + cmd_mo_input.append(input) + else: + input = re.sub(r"[(\[]([0-9 -]*)[)\]]", "", input) + cmd_mo_input.append(input + str(shapes[input]).replace(',', '')) + return cmd_mo_input + + +def prepare_data_consecutive_inferences(default_shapes, changed_values, layout, dims_to_change): + def construct_input_data(data): + input_data = copy.deepcopy(data) + consecutive_infer_input_data = [data] + + changed_data_shapes = get_static_shape(default_shapes, changed_values, layout, dims_to_change) + second_data = replicator(input_data, changed_data_shapes) + consecutive_infer_input_data.append(second_data) + + return consecutive_infer_input_data + return {'dynamism_preproc': {'execution_function': lambda data: construct_input_data(data)}} + + +def get_static_shape(default_shapes, changed_values, layout, dims_to_change): + static_shapes = copy.deepcopy(default_shapes) + static_shapes = {k: list(v) for k, v in static_shapes.items()} + for input_layer, dimension in dims_to_change.items(): + if dimension is None: + continue + else: + dim_indexes = [layout[input_layer].index(d) for d in dimension] + for value_index, value in enumerate(dict(changed_values)[input_layer]): + if value is None: + static_shapes[input_layer][dim_indexes[value_index]] = \ + default_shapes[input_layer][dim_indexes[value_index]] + else: + static_shapes[input_layer][dim_indexes[value_index]] = value + return static_shapes + + +def replicator(data, shapes): + for name, shape in shapes.items(): + if name not in data: + log.info(f"Input '{name}' from shapes was not found in data") + continue + err_msg = 'Final batch alignment error for layer `{}`: '.format(name) + + data[name] = np.array(data[name]) + old_shape = np.array(data[name].shape) + new_shape = np.array(shapes[name]) + + if old_shape.size != new_shape.size: + # Rank resize. We assume that it is Faster-like input with input shape + if np.prod(old_shape) == np.prod(new_shape): + data[name].reshape(new_shape) + old_shape = new_shape + + assert old_shape.size == new_shape.size, 'Rank resize detected' + if np.all((new_shape % old_shape) == 0): + assert np.all(old_shape <= new_shape), 'Reshaping to shape that is less than original network shape' + log.info('New shape is evenly divided by original network shape') + multiplier = tuple(np.array(new_shape / old_shape, dtype=np.int_)) + data[name] = np.tile(data[name], multiplier) + else: + # TF OD models can not be reshaped in 2x bacause they should keep aspect ratio + log.info('New shape is not evenly divided by original network shape data_shape={}, net_shape={}' + ''.format(data[name].shape, new_shape)) + assert len(new_shape) == 4, \ + "Unsupported by tests reshape: Non 4D input {}, original shape {}".format(new_shape, old_shape) + + multiplier = tuple(np.array(new_shape // old_shape + np.ones(new_shape.size), dtype=np.int)) + replicated_data = np.tile(data[name], multiplier) + data[name] = replicated_data[0:new_shape[0], 0:new_shape[1], 0:new_shape[2], 0:new_shape[3]] + + assert np.array_equal(data[name].shape, new_shape), \ + err_msg + 'data_shape={}, net_shape={}'.format(data[name].shape, new_shape) + + log.info('Input data was aligned with shapes=`{}`, new_data_shapes=`{}`' + ''.format(shapes, {k: v.shape for k, v in data.items()})) + return data + diff --git a/tests/e2e_tests/test_utils/test_utils.py b/tests/e2e_tests/test_utils/test_utils.py new file mode 100644 index 00000000000000..2d0b0f0757ed5c --- /dev/null +++ b/tests/e2e_tests/test_utils/test_utils.py @@ -0,0 +1,493 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import csv +import logging as log +import os +import re +import shutil +import sys +from pathlib import Path +from typing import Union +from filelock import FileLock + +from contextlib import contextmanager +from datetime import datetime +import copy +import numpy as np +import tensorflow as tf +import subprocess + + +from openvino.runtime import Dimension, PartialShape + + +log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.DEBUG, stream=sys.stdout) + + +_csv_bool_map = {"true": True, "false": False, + True: "true", False: "false"} + +PRECISION_MAP = { + 'FP32': 'f32', + 'BF16': 'bf16' +} + + +def write_to_csv(csv_path: Path, data: list): + """ + Writes specified data to a CSV-formatted file + :param csv_path: path to CSV-formatted file to write data + :param data: data to write + :return: None + """ + # NOTE: concurrent writing to a file using `csv` module may lead + # to lost of several rows, but every row isn't corrupted. + # In case of using it for IRs pre-generation (in `collect_irs.py`), + # `test.py` is ready that some records may be not available. + with open(str(csv_path), 'a', newline='') as csvfile: + writer = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL) + writer.writerow(data) + + +def write_irs_mapping_file(path: Path, ir_tag: str, status: bool, mo_log: [str, Path, None], + xml: [str, Path, None], bin: [str, Path, None], timeout: int = 30): + """ + Writes record to IRs mapping file. + IR related paths are saved in relative format to support different + OS specific base paths. + + :param path: path to CSV-formatted IRs mapping file + :param ir_tag: tag to map IRs + :param status: status of IRs pre-generation + :param mo_log: full path to MO log file + :param xml: full path to IR's .xml file + :param bin: full path to IR's .bin file, + :param timeout: filelock timeout in seconds + :return: dictionary with IRs mapping + """ + assert path.parent.exists(), "File's parent directory should exists" + + def _rel_path(path, parent_path): + return Path(path).relative_to(parent_path) if path is not None else None + + status = _csv_bool_map[status] + mo_log, xml, bin = _rel_path(mo_log, path.parent), _rel_path(xml, path.parent), _rel_path(bin, path.parent) + log.info("Prepare record to a mapping file: {}".format({ir_tag: [status, mo_log, xml, bin]})) + + lock_irs_mapping_path = path.with_suffix('.lock') + + with FileLock(lock_irs_mapping_path, timeout): + write_to_csv(path, [ir_tag, status, mo_log, xml, bin]) + + +def read_irs_mapping_file(path: Path, timeout: int = 30, lock_access: bool = False): + """ + Reads IRs mapping file + :param path: path to CSV-formatted IRs mapping + :param timeout: filelock timeout in seconds + :param lock_access: boolean which specifies should the file be locked or not. + Lock is required when read/write simultaneously in parallel. + :return: dictionary with IRs mapping + """ + def _full_path(path, parent_path): + # `csv` module converts None to empty string, so implicitly convert it to None + return parent_path / path if path else None + + def _read(csv_path): + with open(str(csv_path), 'r', newline='') as csvfile: + fixed_csvfile = (line.replace('\0', '') for line in + csvfile) # replace '\0' to prevent "_csv.Error: line contains NULL byte" + reader = csv.reader(fixed_csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL) + irs_mapping = {} + for row in reader: + if row: + try: + ir_tag, status, rel_mo_log, rel_xml, rel_bin = row + status = _csv_bool_map[status] + mo_log, xml, bin = _full_path(rel_mo_log, path.parent), _full_path(rel_xml, path.parent), \ + _full_path(rel_bin, path.parent) + irs_mapping.update({ir_tag: [status, mo_log, xml, bin]}) + except: + pass # ignore any corrupted row + return irs_mapping + + lock_irs_mapping_path = path.with_suffix('.lock') + + if not lock_access: + irs_mapping = _read(path) + else: + with FileLock(lock_irs_mapping_path, timeout): + irs_mapping = _read(path) + + return irs_mapping + + +def get_ir_tag(name, ir_version, precision, batch, sequence_length=None, skip_mo_args=None): + """ + Prepares tag to map IR generated in E2E test + :param name: test (or any specific) name + :param ir_version: version of IR (e.g. v11) + :param precision: precision value of IR + :param batch: batch value of IR + :param sequence_length: sequence_length value of IR + :param skip_mo_args: line with comma separated args that will be deleted from MO cmd line + + :return: IR tag + """ + model_tag = f"{name}_IR_{ir_version}_{precision}_batch_{batch}" + if sequence_length: + model_tag = f"{model_tag}_seqlen_{sequence_length}" + if skip_mo_args: + model_tag += f"deleted_mo_args:_{skip_mo_args}" + return model_tag + + +def store_data_to_csv(csv_path, instance, ir_version, data, device, data_name, skip_mo_args=None): + """ + This function helps to store runtime data such as time of IR generation by MO or of network loading to plugin. + To store it, please, execute test.py (both for MO and load net to plugin) with keys: + `--ir_gen_time_csv_name ` and `--load_net_to_plug_time_csv_name ` + + :param csv_path: csv_path for saving csv file + :param instance: test class instance + :param ir_version: string in format "vN" (e.g. v11) + :param device: in general, it is device for execution, but for MO it is useless. But for MO it is set as 'CPU' + :param data_name: name of runtime data (such as attribute where time was saved) + :param skip_mo_args: line with comma separated args that will be deleted from MO cmd line + :return: + """ + csv_header = ["Model Tag", "Device", "Data", "Operation type"] + if not os.path.exists(csv_path): + write_to_csv(csv_path=csv_path, data=csv_header) + model_mapping_tag = get_ir_tag(instance.__class__.__name__, ir_version, instance.precision, + instance.batch, instance.required_params.get("sequence_length", None), + skip_mo_args) + write_to_csv(csv_path=csv_path, data=[model_mapping_tag, device, data, data_name]) + + +def class_factory(cls_name, cls_kwargs, BaseClass): + """ + Function that generates of custom classes + :param cls_name: name of the future class + :param cls_kwargs: attributes required for the class (e.g. __is_test_config__) + :param BaseClass: basic class where implemented behaviour of the test + :return: + """ + + # Generates new class with "cls_name" type inherited from "object" and + # with specified "__init__" and other class attributes + + newclass = type(cls_name, (BaseClass,), {**cls_kwargs}) + return newclass + + +def remove_mo_args(mo_args_to_skip: Union[list, str], mo_cmd): + """ + This function deletes arguments from MO cmd line + + :param mo_args_to_skip: mo arguments to delete + :param mo_cmd: MO command line that is supposed to be reconfigured + """ + mo_args_to_skip = mo_args_to_skip if isinstance(mo_args_to_skip, list) else mo_args_to_skip.split(',') + + for mo_arg in mo_args_to_skip: + if mo_arg in mo_cmd: + log.info('Deleting argument from MO cmd: {}'.format(mo_arg)) + del mo_cmd[mo_arg] + + return mo_cmd + + +def remove_mo_args_oob(mo_args_to_skip: Union[list, str], mo_cmd: dict, instance) -> dict: + """ + This function removes shapes from MO cmd line of instance + If test instance has specific inputs for MO cmd then + "--input" will be equal to it + + :param mo_args_to_skip: mo arguments to delete + :param mo_cmd: MO command line that is supposed to be reconfigured + :param instance: test instance + """ + + mo_input = mo_cmd.get('input') + mo_cmd = remove_mo_args(mo_args_to_skip, mo_cmd) + + if mo_input and hasattr(instance, 'frozen_inputs'): + mo_cmd['input'] = instance.frozen_inputs + + return mo_cmd + + +def get_framework_from_model_ex(path_to_test_file): + frameworks_path = {'caffe': 'caffe', + 'kaldi': 'kaldi', + 'mxnet': 'mxnet', + 'onnx': 'onnx', + 'paddlepaddle': 'paddle', + 'pytorch': 'pytorch', + 'tf': 'tf', + 'tflite': 'tflite', + 'tf_2x': 'tf2'} + pattern = r'pipelines\w*[\\/]\w+[\\/](\w+)[\\/]' + name_fw = re.search(pattern, path_to_test_file) + if name_fw: + return frameworks_path.get(name_fw.group(1), 'Undefined') + + return 'Undefined' + + +def align_output_name(name, outputs): + if isinstance(name, int): + return name + if ":" in name: + name_without_port, port = name.rsplit(":", 1) + use_name_without_port = name_without_port in outputs and port.isnumeric() + if use_name_without_port: + return use_name_without_port, name_without_port + name_with_default_port = name + ":0" + if name_with_default_port in outputs: + return name_with_default_port + + +def construct_names_set(name): + if ":" in name: + name_without_port, port = name.rsplit(":", 1) + if port.isnumeric(): + return {name_without_port, name} + name_with_default_port = name + ":0" + return {name_with_default_port, name} + + +def align_input_names(input_dict, model): + if all([isinstance(x, int) for x in input_dict]): + return input_dict + new_input_dict = {} + for input_data_layer in input_dict: + new_input_dict[input_data_layer] = input_dict[input_data_layer] + for input_layer in model.inputs: + common_names = input_layer.names.intersection(construct_names_set(input_data_layer)) + if common_names: + if input_data_layer not in common_names: + new_input_dict[common_names.pop()] = new_input_dict.pop(input_data_layer) + return new_input_dict + + +def get_infer_result(input_data, compiled_model, ov_model, infer_run_counter=0, index_infer=False): + log.info("Starting inference") + log.info("Inference run counter: " + str(infer_run_counter + 1)) + + request = compiled_model.create_infer_request() + cur_input_data = align_input_names(input_data, ov_model) + infer_result = request.infer(cur_input_data) + + helper = {} + + if index_infer: + for i, out_tensor in enumerate(infer_result.values()): + helper[i] = out_tensor + else: + for out_obj, out_tensor in infer_result.items(): + assert out_obj.names, "Output tensor doesn't have name" + tensor_name = out_obj.get_any_name() + if tensor_name in helper: + tensor_name = next(iter(out_obj.names - set(helper.keys())), tensor_name) + helper[tensor_name] = out_tensor + + return helper + + +def get_shapes_with_frame_size(default_shapes, ov_model, input_data): + # there could be dynamic shapes in ov_model.inputs, therefore shapes should be known from test + inputs = ov_model.inputs if default_shapes is None else default_shapes + + for input_layer in inputs: + if default_shapes: + frame_size = default_shapes[input_layer] + input_data[input_layer] = input_data[input_layer].reshape(-1, *frame_size) + else: + layer_name = input_layer.names.intersection(set(input_data.keys())).pop() + frame_size = [dim for dim in input_layer.shape] + input_data[layer_name] = input_data[layer_name].reshape(-1, *frame_size) + + return input_data + + +def copy_files_by_pattern(directory: Path, pattern_to_find: str, pattern_to_copy: str): + for file in directory.glob("{}*".format(pattern_to_find)): + file_extension = ''.join(file.suffixes) + copied_file = file.parent / (pattern_to_copy + file_extension) + if file.exists(): + log.info('Copying file from {} to {}'.format(file, copied_file)) + try: + shutil.copy(str(file), str(copied_file)) + except shutil.SameFileError: + pass + else: + log.info('File {} does not exist'.format(file)) + + +def check_mo_precision(instance): + # SPR use BF16 precision by default, and it requires thresholds that are different from FP32 threshold + # Run Model Optimizer with FP32 precision because it hasn't bf16 option + if 'get_ovc_model' in instance['get_ir'] and instance['get_ir']['get_ovc_model']['precision'] == "BF16": + log.info("Setting precision FP32 for Model Optimizer...") + instance['get_ir']['get_ovc_model']['precision'] = "FP32" + + +def set_infer_precision_hint(instance, pipeline, inference_precision_hint): + api = next(iter(pipeline.get('infer'))) + # inference_precision_hint is required only for GPU + if instance.device == 'GPU': + if inference_precision_hint: + # f16 is default value + supported_values = ['bf16', 'f32'] + assert inference_precision_hint in supported_values, f"{inference_precision_hint} not in" \ + f" supported values: {supported_values}" + pipeline['infer'][api]['plugin_config'] = { + 'INFERENCE_PRECISION_HINT': inference_precision_hint} + else: + test_precision = instance.precision + if test_precision != 'FP16': + inference_precision_hint = PRECISION_MAP[test_precision] + pipeline['infer'][api]['plugin_config'] = { + 'INFERENCE_PRECISION_HINT': inference_precision_hint} + + return pipeline + + +class BrokenTestException(Exception): + """ + Custom exception type required for catching only errors related to incorrectly defined test pipeline + """ + pass + + +class BrokenTest: + """ + Class which used to substitute the test pipeline class which are incorrectly defined. + Used in conftest pytest plugins on test collection stage. If during creation of test pipeline instance + some exception happens, the pipeline class replaced with BrokenTest class with keeping original class name. + Attempt to refer to any test pipeline in test runners leads to raising original error happened in initial + test pipeline class + """ + + def __init__(self, test_id, fail_message, exception, *args, **kwargs): + """ + :param test_id: test identificator + :param fail_message: string which should be logged while reference to ie_pipeline or ref_pipeline attributes + :param exception: exception which will be raised while reference to ie_pipeline or ref_pipeline attributes + :param args: auxiliary positional arguments + :param kwargs: auxiliary keyword arguments + """ + self.test_id = test_id + self.fail_message = fail_message + self.exception = exception + + @property + def ref_pipeline(self, *args, **kwargs): + log.error(self.fail_message) + raise BrokenTestException(str(self.exception)) + + @property + def ie_pipeline(self, *args, **kwargs): + log.error(self.fail_message) + raise BrokenTestException(str(self.exception)) + + @property + def prepare_prerequisites(self, *args, **kwargs): + log.error(self.fail_message) + raise BrokenTestException(str(self.exception)) + + +@contextmanager +def log_timestamp(action): + """ + Function adds timestamp for the start and the end of the action + :param action: name of action for logging + """ + log.debug(f'{datetime.fromtimestamp(datetime.now().timestamp(), tz=None)}: Started {action}') + yield + log.debug(f'{datetime.fromtimestamp(datetime.now().timestamp(), tz=None)}: Finished {action}') + + +def timestamp(): + """ + Function return current timestamp for logging + """ + return f'{datetime.fromtimestamp(datetime.now().timestamp(), tz=None)}' + + +def get_static_shape(default_shapes, changed_values, layout, dims_to_change): + static_shapes = copy.deepcopy(default_shapes) + static_shapes = {k: list(v) for k, v in static_shapes.items()} + for input_layer, dimension in dims_to_change.items(): + if dimension is None: + continue + else: + dim_indexes = [layout[input_layer].index(d) for d in dimension] + for value_index, value in enumerate(dict(changed_values)[input_layer]): + if value is None: + static_shapes[input_layer][dim_indexes[value_index]] = \ + default_shapes[input_layer][dim_indexes[value_index]] + else: + static_shapes[input_layer][dim_indexes[value_index]] = value + return static_shapes + + +def get_shapes_from_data(input_data) -> dict: + shapes = {} + for input_layer in input_data: + shapes[input_layer] = PartialShape(input_data[input_layer].shape) + return shapes + + +def convert_shapes_to_partial_shape(shapes: dict) -> dict: + partial_shape = {} + for layer, shape in shapes.items(): + dimension_tmp = [] + for item in shape: + dimension_tmp.append(Dimension(item[0], item[1])) if type(item) == list else dimension_tmp.append( + Dimension(item)) + partial_shape[layer] = PartialShape(dimension_tmp) + return partial_shape + + +def name_aligner(infer_result, reference, xml=None): + """ + Function name_aligner aligns names for inference and reference outputs if number of their outputs == 1 + """ + if len(infer_result.keys()) == 1 == len(reference.keys()): + log.info("Renaming inferred output layer {} to referenced output layer {}".format( + list(infer_result.keys())[0], list(reference.keys())[0])) + infer_result[next(iter(reference))] = infer_result.pop(next(iter(infer_result))) + + return infer_result, reference + + +def shell(cmd, env=None, cwd=None, out_format="plain", log=True): + """ + Run command execution in specified environment + + :param cmd: list containing command and its parameters + :param env: set of environment variables to set for this command + :param cwd: working directory from which execute call + :param out_format: 'plain' or 'html'. If 'html' all '\n; symbols are replaced by '
' tag + :param log: display output info into sys.stdout or not + :return: returncode, stdout, stderr + """ + if sys.platform.startswith('linux') or sys.platform == 'darwin': + cmd = ['/bin/bash', '-c', "unset OMP_NUM_THREADS; " + " ".join(cmd)] + else: + cmd = " ".join(cmd) + if log: + sys.stdout.write("Running command:\n" + "".join(cmd) + "\n") + p = subprocess.Popen(cmd, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (stdout, stderr) = p.communicate() + stdout = str(stdout.decode('utf-8')) + stderr = str(stderr.decode('utf-8')) + if out_format == "html": + stdout = "
\n".join(stdout.split('\n')) + stderr = "
\n".join(stderr.split('\n')) + return p.returncode, stdout, stderr + diff --git a/tests/e2e_tests/test_utils/tf_helper.py b/tests/e2e_tests/test_utils/tf_helper.py new file mode 100644 index 00000000000000..88a82a66592c20 --- /dev/null +++ b/tests/e2e_tests/test_utils/tf_helper.py @@ -0,0 +1,89 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# pylint: disable=no-member + +""" +The module contains helper class for working with TensorFlow version dependencies +""" +import os +import logging as log +from distutils.version import LooseVersion + +import yaml + + +class TFVersionHelper: + """ + Contain version of used TensorFlow models for IR conversion + + Description: Some TensorFlow IR providers and test classes use TF version as a part of + auxiliary file paths but the current TensorFlow version may not match the version needed + """ + + _instance = None + + _configs_map = None + __tf_models_version = None + + def __new__(cls, *_args, **_kwargs): + """Singleton + We consider having one TensorFlow models version per session. Once created the object is + stored as _instance and shall be returned then in case of attempt to create new object. + """ + if not TFVersionHelper._instance: + TFVersionHelper._instance = super(TFVersionHelper, cls).__new__(cls) + return TFVersionHelper._instance + + def __init__(self, tf_models_version: str = None): + """Set TF models version as explicit value or installed TF version""" + if self._configs_map: + return + if tf_models_version is None: + try: + # the following try-catch is useful to be able to run non-tensorflow tests + # without the need to have tensorflow installed + import tensorflow as tf + + tf_models_version = tf.__version__ + except ImportError: + log.warning("Module 'tensorflow' is not found") + else: + log.info( + 'Version of TensorFlow models has been changed to "%s" explicitly', + tf_models_version, + ) + with open( + os.path.join(os.path.dirname(__file__), "tf_helper_config.yml"), "r" + ) as configs_map_file: + self._configs_map = yaml.safe_load(configs_map_file) + self.__tf_models_version = tf_models_version + + @property + def tf_models_version(self): + """ Return defined TF models version """ + if self.__tf_models_version is None: + raise AttributeError("attribute 'tf_models_version' is not defined!") + return self.__tf_models_version + + def _get_transformations_config_file_name(self, model_type: str, config_versions: list): + """ + Return sub-graph replacement config file name for models based on TF version and models type + """ + tf_models_loose_version = LooseVersion(self.__tf_models_version) + for version in config_versions: + if tf_models_loose_version >= LooseVersion(str(version)): + return f"{model_type}_support_api_v{version}.json" + if model_type == "ssd": + return "ssd_v2_support.json" + return f"{model_type}_support.json" + + def resolve_tf_transformations_config(self, config_alias: str, relative_mo: bool = False): + """Return name of sub-graph replacement config file or its path relative MO root folder""" + config_info = self._configs_map.get(config_alias) + if config_info: + config_file_name = self._get_transformations_config_file_name( + config_info["model_type"], config_info["versions"] + ) + return f"front/tf/{config_file_name}" if relative_mo else config_file_name + return config_alias diff --git a/tests/e2e_tests/test_utils/tf_helper_config.yml b/tests/e2e_tests/test_utils/tf_helper_config.yml new file mode 100644 index 00000000000000..9a24ea8088cc51 --- /dev/null +++ b/tests/e2e_tests/test_utils/tf_helper_config.yml @@ -0,0 +1,39 @@ +# : +# model_type: Type of TensorFlow model +# +# config_versions: list of available replacement config versions corresponding to +# TensorFlow versions +# + +faster_rcnn_subgraph_replacement_config_file: + model_type: faster_rcnn + versions: + - 2.0 + - 1.15 + - 1.14 + - 1.13 + - 1.10 + - 1.7 +mask_rcnn_subgraph_replacement_config_file: + model_type: mask_rcnn + versions: + - 2.0 + - 1.15 + - 1.14 + - 1.13 + - 1.11 + - 1.7 + +ssd_subgraph_replacement_config_file: + model_type: ssd + versions: + - 2.0 + - 1.15 + - 1.14 + +rfcn_subgraph_replacement_config_file: + model_type: rfcn + versions: + - 1.14 + - 1.13 + - 1.10 diff --git a/tests/e2e_tests/test_utils/tf_hub_utils.py b/tests/e2e_tests/test_utils/tf_hub_utils.py new file mode 100644 index 00000000000000..e4767d8ad670dc --- /dev/null +++ b/tests/e2e_tests/test_utils/tf_hub_utils.py @@ -0,0 +1,81 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import tensorflow as tf + +rng = np.random.default_rng(seed=56190) + +type_map = { + tf.float64: np.float64, + tf.float32: np.float32, + tf.int8: np.int8, + tf.int16: np.int16, + tf.int32: np.int32, + tf.int64: np.int64, + tf.uint8: np.uint8, + tf.uint16: np.uint16, + tf.string: str, + tf.bool: bool, +} + + +def prepare_input(input_shape, input_type): + if input_type in [np.float32, np.float64]: + return 2.0 * rng.random(size=input_shape, dtype=input_type) + elif input_type in [np.uint8, np.uint16, np.int8, np.int16, np.int32, np.int64]: + return rng.integers(0, 5, size=input_shape).astype(input_type) + elif input_type in [str]: + return np.broadcast_to("Some string", input_shape) + elif input_type in [bool]: + return rng.integers(0, 2, size=input_shape).astype(input_type) + else: + assert False, "Unsupported type {}".format(input_type) + + +def prepare_inputs(inputs_info): + # if len(inputs_info) > 0 and inputs_info[0] == 'list': + # inputs = [] + # inputs_info = inputs_info[1:] + # for input_name, input_shape, input_type in inputs_info: + # inputs.append(prepare_input(input_shape, input_type)) + # else: + inputs = {} + for input_name, input_shape, input_type in inputs_info: + inputs[input_name] = prepare_input(input_shape, input_type) + return inputs + + +def get_inputs_info(model_obj): + inputs_info = [] + assert len(model_obj.structured_input_signature) > 1, "incorrect model or test issue" + for input_name, input_info in model_obj.structured_input_signature[1].items(): + input_shape = [] + try: + if input_info.shape.as_list() == [None, None, None, 3] and input_info.dtype == tf.float32: + # image classification case, let us imitate an image + # that helps to avoid compute output size issue + input_shape = [1, 200, 200, 3] + else: + for dim in input_info.shape.as_list(): + if dim is None: + input_shape.append(1) + else: + input_shape.append(dim) + except ValueError: + # unknown rank case + pass + if input_info.dtype == tf.resource: + # skip inputs corresponding to variables + continue + assert input_info.dtype in type_map, "Unsupported input type: {}".format(input_info.dtype) + inputs_info.append((input_name, input_shape, type_map[input_info.dtype])) + + return inputs_info + + +def generate_tf_hub_inputs(model): + """ + Generates random inputs depending on model's input type + """ + return prepare_inputs(get_inputs_info(model))