From caf749213ba003e1c44beca2a3b3e7a0afd82e08 Mon Sep 17 00:00:00 2001 From: Harim Kang Date: Mon, 26 Sep 2022 20:19:16 +0900 Subject: [PATCH] [Feature/OTX] Rebase develop to feature/otx before MPA refactoring (#1284) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update submodule branch (#1222) * Enhance training schedule for multi-label classification (#1212) * [CVS-88098] Remove initialize from export functions (#1226) * Train graph added (#1211) Co-authored-by: Lee, Soobee * Add @attrs decorator for base configs (#1229) Signed-off-by: Songki Choi Co-authored-by: Harim Kang * Pretrained weight download error in MobilenetV3-large-1 of deep-object-reid in SC (#1233) * [Anomaly Task] Revert hpo template (#1230) * 🐞 [Anomaly Task] Fix progress bar (#1223) * [CVS-90555] Fix NaN value in classification (#1244) * update hpo_config.yaml (#1240) * [CVS-90400, CVS-91015] NNCF pruning supported tweaks (#1248) * Turned off pruning_support visibility for anomaly models (CVS-91015) * Disabled pruning for EfficientNet-V2-S (CVS-90400) * [Anomaly Task] 🐞 Fix inference when model backbone changes (#1242) * Fix CVS-91469 sseg compatibility issue * [CVS-91472] Add pruning_supported value (#1263) * Pruning supported tweaks (#1256) * [CVS-90400, CVS-91015] NNCF pruning supported tweaks (#1248) * Turned off pruning_support visibility for anomaly models (CVS-91015) * Disabled pruning for EfficientNet-V2-S (CVS-90400) * Revert "[CVS-90400, CVS-91015] NNCF pruning supported tweaks (#1248)" (#1269) * [OTE-TEST] Disable obsolete test cases (#1220) * [OTE-TEST] hot-fix for MPA performance tests (#1273) * Expose early stopping hyper-parameters for all tasks (#1241) * Resolve pre-commit issues (#1272) * Remove LazyEarlyStopHook in model_multilabel.py (#1281) * Removed xfail (#1239) Signed-off-by: Songki Choi Co-authored-by: Ashwin Vaidya Co-authored-by: Jaeguk Hyun Co-authored-by: Nikita Savelyev Co-authored-by: Vladisalv Sovrasov Co-authored-by: Jihwan Eom Co-authored-by: Songki Choi Co-authored-by: Soobee Lee Co-authored-by: Lee, Soobee Co-authored-by: Eugene Liu Co-authored-by: Emily Chun Co-authored-by: ljcornel Co-authored-by: Eunwoo Shin --- .pre-commit-config.yaml | 6 +- QUICK_START_GUIDE.md | 19 + external/README.md | 82 +- .../efficientnet_v2_s/compression_config.json | 10 - .../template_experimental.yaml | 2 +- external/deep-object-reid/constraints.txt | 1 + external/deep-object-reid/submodule | 2 +- .../tests/ote_cli/test_classification.py | 2 +- .../deep-object-reid/tests/test_ote_api.py | 6 + .../tests/test_ote_training.py | 9 +- .../torchreid_tasks/inference_task.py | 13 +- .../torchreid_tasks/openvino_task.py | 2 +- .../deep-object-reid/torchreid_tasks/utils.py | 11 +- .../tools/draw_recall_graph.py | 19 +- .../apis/detection/inference_task.py | 18 +- .../apis/detection/openvino_task.py | 11 +- external/mmdetection/submodule | 2 +- .../tests/ote_cli/test_detection.py | 3 +- .../ote_cli/test_instance_segmentation.py | 3 +- .../tests/ote_cli/test_rotated_detection.py | 3 +- external/mmdetection/tests/test_ote_api.py | 4 + .../mmdetection/tests/test_ote_training.py | 10 +- .../apis/segmentation/inference_task.py | 59 +- .../apis/segmentation/model_wrappers/blur.py | 35 +- .../apis/segmentation/openvino_task.py | 28 +- .../apis/segmentation/ote_utils.py | 15 +- external/mmsegmentation/submodule | 2 +- .../tests/ote_cli/test_segmentation.py | 1 + external/mmsegmentation/tests/test_ote_api.py | 1 + .../tests/test_ote_configuration.py | 2 + .../mmsegmentation/tests/test_ote_training.py | 17 +- .../configs/classification/configuration.yaml | 72 +- .../efficientnet_b0_cls_incr/hpo_config.yaml | 10 +- .../efficientnet_b0_cls_incr/model.py | 5 +- .../model_hierarchical.py | 2 +- .../model_multilabel.py | 5 +- .../efficientnet_b0_cls_incr/template.yaml | 15 +- .../hpo_config.yaml | 8 +- .../efficientnet_v2_s_cls_incr/model.py | 5 +- .../model_hierarchical.py | 2 +- .../model_multilabel.py | 5 +- .../efficientnet_v2_s_cls_incr/template.yaml | 17 +- .../mobilenet_v3_large_075_cls_incr/model.py | 2 +- .../model_hierarchical.py | 2 +- .../model_multilabel.py | 2 +- .../hpo_config.yaml | 10 +- .../mobilenet_v3_large_1_cls_incr/model.py | 5 +- .../model_hierarchical.py | 2 +- .../model_multilabel.py | 5 +- .../template.yaml | 15 +- .../mobilenet_v3_small_cls_incr/model.py | 2 +- .../model_hierarchical.py | 2 +- .../model_multilabel.py | 2 +- .../configs/detection/configuration.yaml | 94 ++ .../data_pipeline.py | 75 +- .../cspdarknet_yolox_cls_incr/hpo_config.yaml | 6 +- .../cspdarknet_yolox_cls_incr/model.py | 6 +- .../cspdarknet_yolox_cls_incr/template.yaml | 9 + .../mobilenetv2_atss_cls_incr/hpo_config.yaml | 2 +- .../mobilenetv2_atss_cls_incr/model.py | 15 +- .../mobilenetv2_atss_cls_incr/template.yaml | 9 + .../mobilenetv2_ssd_cls_incr/data_pipeline.py | 66 +- .../mobilenetv2_ssd_cls_incr/hpo_config.yaml | 8 +- .../mobilenetv2_ssd_cls_incr/model.py | 13 +- .../mobilenetv2_ssd_cls_incr/template.yaml | 9 + .../resnet50_vfnet_cls_incr/model.py | 4 +- .../instance-segmentation/configuration.yaml | 94 ++ .../efficientnetb2b_maskrcnn/data_pipeline.py | 66 +- .../efficientnetb2b_maskrcnn/model.py | 6 +- .../efficientnetb2b_maskrcnn/template.yaml | 9 + .../resnet50_maskrcnn/data_pipeline.py | 88 +- .../resnet50_maskrcnn/model.py | 4 +- .../resnet50_maskrcnn/template.yaml | 9 + .../rotated-detection/configuration.yaml | 80 ++ .../efficientnetb2b_maskrcnn/data_pipeline.py | 66 +- .../efficientnetb2b_maskrcnn/model.py | 4 +- .../efficientnetb2b_maskrcnn/template.yaml | 9 + .../resnet50_maskrcnn/data_pipeline.py | 88 +- .../resnet50_maskrcnn/model.py | 4 +- .../resnet50_maskrcnn/template.yaml | 9 + .../configs/segmentation/configuration.yaml | 80 ++ .../ocr-lite-hrnet-18-mod2/hpo_config.yaml | 12 +- .../ocr-lite-hrnet-18-mod2/model.py | 7 +- .../ocr-lite-hrnet-18-mod2/template.yaml | 9 + .../ocr-lite-hrnet-18/hpo_config.yaml | 15 + .../segmentation/ocr-lite-hrnet-18/model.py | 7 + .../ocr-lite-hrnet-18/template.yaml | 52 ++ .../ocr-lite-hrnet-s-mod2/hpo_config.yaml | 13 +- .../ocr-lite-hrnet-s-mod2/model.py | 7 +- .../ocr-lite-hrnet-s-mod2/template.yaml | 9 + .../ocr-lite-hrnet-x-mod3/hpo_config.yaml | 9 +- .../ocr-lite-hrnet-x-mod3/model.py | 7 +- .../ocr-lite-hrnet-x-mod3/template.yaml | 9 + .../constraints.txt | 1 + .../model-preparation-algorithm/init_venv.sh | 6 - .../mpa_tasks/apis/__init__.py | 4 +- .../mpa_tasks/apis/classification/__init__.py | 20 +- .../mpa_tasks/apis/classification/config.py | 14 +- .../mpa_tasks/apis/classification/task.py | 301 +++--- .../mpa_tasks/apis/config.py | 137 ++- .../mpa_tasks/apis/detection/__init__.py | 15 +- .../mpa_tasks/apis/detection/config.py | 36 +- .../mpa_tasks/apis/detection/task.py | 387 ++++---- .../mpa_tasks/apis/segmentation/__init__.py | 15 +- .../mpa_tasks/apis/segmentation/config.py | 57 +- .../mpa_tasks/apis/segmentation/task.py | 308 ++++--- .../mpa_tasks/apis/task.py | 146 +-- .../extensions/datasets/mpa_cls_dataset.py | 179 ++-- .../extensions/datasets/mpa_det_dataset.py | 10 +- .../extensions/datasets/mpa_seg_dataset.py | 22 +- .../datasets/pipelines/mpa_cls_pipeline.py | 33 +- .../mpa_tasks/samples/cls_cls_il_sample.py | 219 ++--- .../mpa_tasks/samples/det_cls_il_sample.py | 117 ++- .../samples/inst_seg_cls_il_sample.py | 255 +++--- .../mpa_tasks/samples/seg_cls_il_sample.py | 209 ++--- .../mpa_tasks/utils/__init__.py | 2 + .../mpa_tasks/utils/data_utils.py | 15 +- .../mpa_tasks/utils/runner.py | 17 +- external/model-preparation-algorithm/setup.py | 11 +- .../model-preparation-algorithm/submodule | 2 +- .../tests/__init__.py | 3 + .../api_tests/test_ote_classification_api.py | 279 ++++++ .../tests/api_tests/test_ote_detection_api.py | 253 ++++++ .../api_tests/test_ote_segmentation_api.py | 293 ++++++ .../tests/config.py | 11 - .../tests/conftest.py | 53 +- .../metrics_test_ote_training.yml | 6 +- .../tests/mpa_common.py | 189 ++++ .../tests/ote_cli/test_classification.py | 503 ++++++++++ .../tests/ote_cli/test_det_cls_il.py | 178 ---- ...multiclass_cls_il.py => test_detection.py} | 89 +- .../tests/ote_cli/test_hierarchical_cls_il.py | 162 ---- ...ls_il.py => test_instance_segmentation.py} | 92 +- .../tests/ote_cli/test_multilabel_cls_il.py | 180 ---- ...est_seg_cls_il.py => test_segmentation.py} | 84 +- .../tests/test_ote_api.py | 858 ------------------ .../tests/test_ote_classification_training.py | 211 +++++ .../tests/test_ote_detection_training.py | 247 +++++ .../tests/test_ote_segmentation_training.py | 205 +++++ .../tests/test_ote_training.py | 710 --------------- ote_sdk/ote_sdk/entities/dataset_item.py | 1 + ote_sdk/ote_sdk/entities/shapes/rectangle.py | 2 +- .../demo_package/executors/sync_pipeline.py | 2 +- ote_sdk/ote_sdk/utils/vis_utils.py | 30 + otx/api/entities/dataset_item.py | 1 + otx/api/entities/shapes/rectangle.py | 2 +- .../demo_package/executors/sync_pipeline.py | 2 +- otx/api/utils/vis_utils.py | 33 + .../ote_cli/external/deep-object-reid/ote_cli | 1 - tests/ote_cli/external/mmdetection/ote_cli | 1 - tests/ote_cli/external/mmsegmentation/ote_cli | 1 - tests/ote_cli/misc/test_docs.py | 35 +- tests/ote_cli/misc/test_template_files.py | 22 +- tests/ote_cli/pytest.ini | 2 +- 154 files changed, 5053 insertions(+), 3907 deletions(-) create mode 100644 external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-18/hpo_config.yaml create mode 100644 external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-18/model.py create mode 100644 external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-18/template.yaml create mode 100644 external/model-preparation-algorithm/tests/__init__.py create mode 100644 external/model-preparation-algorithm/tests/api_tests/test_ote_classification_api.py create mode 100644 external/model-preparation-algorithm/tests/api_tests/test_ote_detection_api.py create mode 100644 external/model-preparation-algorithm/tests/api_tests/test_ote_segmentation_api.py delete mode 100644 external/model-preparation-algorithm/tests/config.py create mode 100644 external/model-preparation-algorithm/tests/mpa_common.py create mode 100644 external/model-preparation-algorithm/tests/ote_cli/test_classification.py delete mode 100644 external/model-preparation-algorithm/tests/ote_cli/test_det_cls_il.py rename external/model-preparation-algorithm/tests/ote_cli/{test_multiclass_cls_il.py => test_detection.py} (59%) delete mode 100644 external/model-preparation-algorithm/tests/ote_cli/test_hierarchical_cls_il.py rename external/model-preparation-algorithm/tests/ote_cli/{test_ins_seg_cls_il.py => test_instance_segmentation.py} (58%) delete mode 100644 external/model-preparation-algorithm/tests/ote_cli/test_multilabel_cls_il.py rename external/model-preparation-algorithm/tests/ote_cli/{test_seg_cls_il.py => test_segmentation.py} (59%) delete mode 100644 external/model-preparation-algorithm/tests/test_ote_api.py create mode 100644 external/model-preparation-algorithm/tests/test_ote_classification_training.py create mode 100644 external/model-preparation-algorithm/tests/test_ote_detection_training.py create mode 100644 external/model-preparation-algorithm/tests/test_ote_segmentation_training.py delete mode 100644 external/model-preparation-algorithm/tests/test_ote_training.py create mode 100644 ote_sdk/ote_sdk/utils/vis_utils.py create mode 100644 otx/api/utils/vis_utils.py delete mode 120000 tests/ote_cli/external/deep-object-reid/ote_cli delete mode 120000 tests/ote_cli/external/mmdetection/ote_cli delete mode 120000 tests/ote_cli/external/mmsegmentation/ote_cli diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index aaf90cd50cb..35dba6f46de 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -19,7 +19,7 @@ repos: - id: isort alias: isort_rest name: "isort - legacy (ote_cli|external)" - files: '^(ote_cli|external/anomaly)/.*\.py' + files: '^(ote_cli|external/anomaly|external/model-preparation-algorithm)/.*\.py' exclude: "tests/" - repo: https://github.com/psf/black @@ -39,7 +39,7 @@ repos: - id: black name: "black - legacy (rest)" args: [--line-length, "120"] - files: '^external/anomaly/.*\.py' + files: '^(external/anomaly|external/model-preparation-algorithm)/.*\.py' - repo: https://github.com/PyCQA/flake8 rev: "5.0.3" @@ -56,7 +56,7 @@ repos: # is to be removed. - id: flake8 name: "flake8 - legacy " - files: '^(ote_sdk|ote_cli|external/anomaly)/.*\.py' + files: '^(ote_sdk|ote_cli|external/anomaly|external/model-preparation-algorithm)/.*\.py' args: ["--config", ".flake8", "--max-complexity", "20"] exclude: ".*/protobuf" diff --git a/QUICK_START_GUIDE.md b/QUICK_START_GUIDE.md index 7cff62bddb5..f8665775b5c 100644 --- a/QUICK_START_GUIDE.md +++ b/QUICK_START_GUIDE.md @@ -173,6 +173,9 @@ usage: ote train template params [-h] [--learning_parameters.learning_rate LEARNING_RATE] [--learning_parameters.learning_rate_warmup_iters LEARNING_RATE_WARMUP_ITERS] [--learning_parameters.num_iters NUM_ITERS] + [--learning_parameters.enable_early_stopping ENABLE_EARLY_STOPPING] + [--learning_parameters.early_stop_patience EARLY_STOP_PATIENCE] + [--learning_parameters.early_stop_iteration_patience EARLY_STOP_ITERATION_PATIENCE] [--postprocessing.confidence_threshold CONFIDENCE_THRESHOLD] [--postprocessing.result_based_confidence_threshold RESULT_BASED_CONFIDENCE_THRESHOLD] [--nncf_optimization.enable_quantization ENABLE_QUANTIZATION] @@ -205,6 +208,22 @@ optional arguments: default_value: 300 max_value: 100000 min_value: 1 + --learning_parameters.enable_early_stopping ENABLE_EARLY_STOPPING + header: Enable early stopping of the training + type: BOOLEAN + default_value: True + --learning_parameters.early_stop_patience EARLY_STOP_PATIENCE + header: Patience for early stopping + type: INTEGER + default_value: 10 + max_value: 50 + min_value: 0 + --learning_parameters.early_stop_iteration_patience EARLY_STOP_ITERATION_PATIENCE + header: Iteration patience for early stopping + type: INTEGER + default_value: 0 + max_value: 1000 + min_value: 0 --postprocessing.confidence_threshold CONFIDENCE_THRESHOLD header: Confidence threshold type: FLOAT diff --git a/external/README.md b/external/README.md index 5e44c179aa7..86a73fc99b1 100644 --- a/external/README.md +++ b/external/README.md @@ -5,68 +5,60 @@ Every sub-project is fully indepedent from each other, and each of them has its ## Anomaly Classification -| ID | Name | Complexity (GFlops) | Model size (MB) | Path | -| -------------------------------- | ----- | ------------------- | --------------- | ---------------------------------------------------- | -| ote_anomaly_classification_padim | PADIM | 3.9 | 168.4 | anomaly/templates/classification/padim/template.yaml | -| ote_anomaly_classification_stfpm | STFPM | 5.6 | 21.1 | anomaly/templates/classification/stfpm/template.yaml | +| ID | Name | Complexity (GFlops) | Model size (MB) | Path | +| -------------------------------- | ----- | ------------------- | --------------- | -------------------------------------------------- | +| ote_anomaly_classification_padim | PADIM | 3.9 | 168.4 | anomaly/configs/classification/padim/template.yaml | +| ote_anomaly_classification_stfpm | STFPM | 5.6 | 21.1 | anomaly/configs/classification/stfpm/template.yaml | ## Anomaly Detection -| ID | Name | Complexity (GFlops) | Model size (MB) | Path | -| --------------------------- | ----- | ------------------- | --------------- | ----------------------------------------------- | -| ote_anomaly_detection_padim | PADIM | 3.9 | 168.4 | anomaly/templates/detection/padim/template.yaml | -| ote_anomaly_detection_stfpm | STFPM | 5.6 | 21.1 | anomaly/templates/detection/stfpm/template.yaml | +| ID | Name | Complexity (GFlops) | Model size (MB) | Path | +| --------------------------- | ----- | ------------------- | --------------- | --------------------------------------------- | +| ote_anomaly_detection_padim | PADIM | 3.9 | 168.4 | anomaly/configs/detection/padim/template.yaml | +| ote_anomaly_detection_stfpm | STFPM | 5.6 | 21.1 | anomaly/configs/detection/stfpm/template.yaml | ## Anomaly Segmentation -| ID | Name | Complexity (GFlops) | Model size (MB) | Path | -| ------------------------------ | ----- | ------------------- | --------------- | -------------------------------------------------- | -| ote_anomaly_segmentation_padim | PADIM | 3.9 | 168.4 | anomaly/templates/segmentation/padim/template.yaml | -| ote_anomaly_segmentation_stfpm | STFPM | 5.6 | 21.1 | anomaly/templates/segmentation/stfpm/template.yaml | +| ID | Name | Complexity (GFlops) | Model size (MB) | Path | +| ------------------------------ | ----- | ------------------- | --------------- | ------------------------------------------------ | +| ote_anomaly_segmentation_padim | PADIM | 3.9 | 168.4 | anomaly/configs/segmentation/padim/template.yaml | +| ote_anomaly_segmentation_stfpm | STFPM | 5.6 | 21.1 | anomaly/configs/segmentation/stfpm/template.yaml | ## Image Classification -| ID | Name | Complexity (GFlops) | Model size (MB) | Path | -| -------------------------------------------------------------- | -------------------------------- | ------------------- | --------------- | -------------------------------------------------------------------------------------------------- | -| ClassIncremental_Image_Classification_MobileNet-V3-small | MobileNet-V3-small-ClsIncr | 0.12 | 1.56 | model-preparation-algorithm/configs/classification/mobilenet_v3_small_cls_incr/template.yaml | -| ClassIncremental_Image_Classification_MobileNet-V3-large-0.75x | MobileNet-V3-large-0.75x-ClsIncr | 0.32 | 2.76 | model-preparation-algorithm/configs/classification/mobilenet_v3_large_075_cls_incr/template.yaml | -| ClassIncremental_Image_Classification_MobileNet-V3-large-1x | MobileNet-V3-large-1x-ClsIncr | 0.44 | 4.29 | model-preparation-algorithm/configs/classification/mobilenet_v3_large_1_cls_incr/template.yaml | -| Custom_Image_Classification_MobileNet-V3-large-1x | MobileNet-V3-large-1x | 0.44 | 4.29 | deep-object-reid/configs/ote_custom_classification/mobilenet_v3_large_1/template_experimental.yaml | -| ClassIncremental_Image_Classification_EfficinetNet-B0 | EfficientNet-B0-ClsIncr | 0.81 | 4.09 | model-preparation-algorithm/configs/classification/efficientnet_b0_cls_incr/template.yaml | -| Custom_Image_Classification_EfficinetNet-B0 | EfficientNet-B0 | 0.81 | 4.09 | deep-object-reid/configs/ote_custom_classification/efficientnet_b0/template_experimental.yaml | -| ClassIncremental_Image_Classification_EfficinetNet-V2-S | EfficientNet-V2-S-ClsIncr | 5.76 | 20.23 | model-preparation-algorithm/configs/classification/efficientnet_v2_s_cls_incr/template.yaml | -| Custom_Image_Classification_EfficientNet-V2-S | EfficientNet-V2-S | 5.76 | 20.23 | deep-object-reid/configs/ote_custom_classification/efficientnet_v2_s/template_experimental.yaml | +| ID | Name | Complexity (GFlops) | Model size (MB) | Path | +| ------------------------------------------------- | --------------------- | ------------------- | --------------- | ---------------------------------------------------------------------------------------------- | +| Custom_Image_Classification_MobileNet-V3-large-1x | MobileNet-V3-large-1x | 0.44 | 4.29 | model-preparation-algorithm/configs/classification/mobilenet_v3_large_1_cls_incr/template.yaml | +| Custom_Image_Classification_EfficinetNet-B0 | EfficientNet-B0 | 0.81 | 4.09 | model-preparation-algorithm/configs/classification/efficientnet_b0_cls_incr/template.yaml | +| Custom_Image_Classification_EfficientNet-V2-S | EfficientNet-V2-S | 5.76 | 20.23 | model-preparation-algorithm/configs/classification/efficientnet_v2_s_cls_incr/template.yaml | ## Object Detection -| ID | Name | Complexity (GFlops) | Model size (MB) | Path | -| -------------------------------------------- | ------------- | ------------------- | --------------- | -------------------------------------------------------------------------------------------- | -| Custom_Object_Detection_YOLOX | YOLOX | 6.5 | 20.4 | mmdetection/configs/custom-object-detection/cspdarknet_YOLOX/template_experimental.yaml | -| Custom_Object_Detection_Gen3_SSD | SSD | 9.4 | 7.6 | mmdetection/configs/custom-object-detection/gen3_mobilenetV2_SSD/template_experimental.yaml | -| ClassIncremental_Object_Detection_Gen3_ATSS | ATSS-ClsIncr | 20.6 | 9.1 | model-preparation-algorithm/configs/detection/mobilenetv2_atss_cls_incr/template.yaml | -| Custom_Object_Detection_Gen3_ATSS | ATSS | 20.6 | 9.1 | mmdetection/configs/custom-object-detection/gen3_mobilenetV2_ATSS/template_experimental.yaml | -| ClassIncremental_Object_Detection_Gen3_VFNet | VFNet-ClsIncr | 457.4 | 126.0 | model-preparation-algorithm/configs/detection/resnet50_vfnet_cls_incr/template.yaml | +| ID | Name | Complexity (GFlops) | Model size (MB) | Path | +| --------------------------------- | ----- | ------------------- | --------------- | ------------------------------------------------------------------------------------- | +| Custom_Object_Detection_YOLOX | YOLOX | 6.5 | 20.4 | model-preparation-algorithm/configs/detection/cspdarknet_yolox_cls_incr/template.yaml | +| Custom_Object_Detection_Gen3_SSD | SSD | 9.4 | 7.6 | model-preparation-algorithm/configs/detection/mobilenetv2_ssd_cls_incr/template.yaml | +| Custom_Object_Detection_Gen3_ATSS | ATSS | 20.6 | 9.1 | model-preparation-algorithm/configs/detection/mobilenetv2_atss_cls_incr/template.yaml | -## Object Counting +## Instance Segmentation (Object Counting) -| ID | Name | Complexity (GFlops) | Model size (MB) | Path | -| -------------------------------------------------------------- | ------------------------ | ------------------- | --------------- | --------------------------------------------------------------------------------------- | -| Custom_Counting_Instance_Segmentation_MaskRCNN_EfficientNetB2B | MaskRCNN-EfficientNetB2B | 68.48 | 13.27 | mmdetection/configs/custom-counting-instance-seg/efficientnetb2b_maskrcnn/template.yaml | -| Custom_Counting_Instance_Segmentation_MaskRCNN_ResNet50 | MaskRCNN-ResNet50 | 533.8 | 177.9 | mmdetection/configs/custom-counting-instance-seg/resnet50_maskrcnn/template.yaml | +| ID | Name | Complexity (GFlops) | Model size (MB) | Path | +| -------------------------------------------------------------- | ------------------------ | ------------------- | --------------- | ------------------------------------------------------------------------------------------------ | +| Custom_Counting_Instance_Segmentation_MaskRCNN_EfficientNetB2B | MaskRCNN-EfficientNetB2B | 68.48 | 13.27 | model-preparation-algorithm/configs/instance-segmentation/efficientnetb2b_maskrcnn/template.yaml | +| Custom_Counting_Instance_Segmentation_MaskRCNN_ResNet50 | MaskRCNN-ResNet50 | 533.8 | 177.9 | model-preparation-algorithm/configs/instance-segmentation/resnet50_maskrcnn/template.yaml | ## Rotated Object Detection -| ID | Name | Complexity (GFlops) | Model size (MB) | Path | -| --------------------------------------------------------------------------- | ------------------------ | ------------------- | --------------- | ---------------------------------------------------------------------------- | -| Custom_Rotated_Detection_via_Instance_Segmentation_MaskRCNN_EfficientNetB2B | MaskRCNN-EfficientNetB2B | 68.48 | 13.27 | mmdetection/configs/rotated_detection/efficientnetb2b_maskrcnn/template.yaml | -| Custom_Rotated_Detection_via_Instance_Segmentation_MaskRCNN_ResNet50 | MaskRCNN-ResNet50 | 533.8 | 177.9 | mmdetection/configs/rotated_detection/resnet50_maskrcnn/template.yaml | +| ID | Name | Complexity (GFlops) | Model size (MB) | Path | +| --------------------------------------------------------------------------- | ------------------------ | ------------------- | --------------- | -------------------------------------------------------------------------------------------- | +| Custom_Rotated_Detection_via_Instance_Segmentation_MaskRCNN_EfficientNetB2B | MaskRCNN-EfficientNetB2B | 68.48 | 13.27 | model-preparation-algorithm/configs/rotated-detection/efficientnetb2b_maskrcnn/template.yaml | +| Custom_Rotated_Detection_via_Instance_Segmentation_MaskRCNN_ResNet50 | MaskRCNN-ResNet50 | 533.8 | 177.9 | model-preparation-algorithm/configs/rotated-detection/resnet50_maskrcnn/template.yaml | ## Semantic Segmentation -| ID | Name | Complexity (GFlops) | Model size (MB) | Path | -| -------------------------------------------------------- | ------------------------- | ------------------- | --------------- | ----------------------------------------------------------------------------------------- | -| Custom_Semantic_Segmentation_Lite-HRNet-s-mod2_OCR | Lite-HRNet-s-mod2 OCR | 1.82 | 3.5 | mmsegmentation/configs/custom-sematic-segmentation/ocr-lite-hrnet-s-mod2/template.yaml | -| ClassIncremental_Semantic_Segmentation_Lite-HRNet-18_OCR | Lite-HRNet-18 OCR-ClsIncr | 3.45 | 4.5 | model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-18-cls-incr/template.yaml | -| Custom_Semantic_Segmentation_Lite-HRNet-18_OCR | Lite-HRNet-18 OCR | 3.45 | 4.5 | mmsegmentation/configs/custom-sematic-segmentation/ocr-lite-hrnet-18/template.yaml | -| Custom_Semantic_Segmentation_Lite-HRNet-18-mod2_OCR | Lite-HRNet-18-mod2 OCR | 3.63 | 4.8 | mmsegmentation/configs/custom-sematic-segmentation/ocr-lite-hrnet-18-mod2/template.yaml | -| Custom_Semantic_Segmentation_Lite-HRNet-x-mod3_OCR | Lite-HRNet-x-mod3 OCR | 13.97 | 6.4 | mmsegmentation/configs/custom-sematic-segmentation/ocr-lite-hrnet-x-mod3/template.yaml | +| ID | Name | Complexity (GFlops) | Model size (MB) | Path | +| --------------------------------------------------- | ------------------ | ------------------- | --------------- | ------------------------------------------------------------------------------------- | +| Custom_Semantic_Segmentation_Lite-HRNet-s-mod2_OCR | Lite-HRNet-s-mod2 | 1.82 | 3.5 | model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-s-mod2/template.yaml | +| Custom_Semantic_Segmentation_Lite-HRNet-18_OCR | Lite-HRNet-18 | 3.45 | 4.5 | model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-18/template.yaml | +| Custom_Semantic_Segmentation_Lite-HRNet-18-mod2_OCR | Lite-HRNet-18-mod2 | 3.63 | 4.8 | model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-18-mod2/template.yaml | +| Custom_Semantic_Segmentation_Lite-HRNet-x-mod3_OCR | Lite-HRNet-x-mod3 | 13.97 | 6.4 | model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-x-mod3/template.yaml | diff --git a/external/deep-object-reid/configs/ote_custom_classification/efficientnet_v2_s/compression_config.json b/external/deep-object-reid/configs/ote_custom_classification/efficientnet_v2_s/compression_config.json index 753aef84907..19880ff318f 100644 --- a/external/deep-object-reid/configs/ote_custom_classification/efficientnet_v2_s/compression_config.json +++ b/external/deep-object-reid/configs/ote_custom_classification/efficientnet_v2_s/compression_config.json @@ -44,16 +44,6 @@ }, "nncf_config": { "compression": [ - { - "algorithm": "filter_pruning", - "pruning_init": 0.1, - "params": { - "schedule": "baseline", - "pruning_flops_target": 0.1, - "filter_importance": "geometric_median", - "prune_downsample_convs": true - } - }, { "algorithm": "quantization", "preset": "mixed", diff --git a/external/deep-object-reid/configs/ote_custom_classification/efficientnet_v2_s/template_experimental.yaml b/external/deep-object-reid/configs/ote_custom_classification/efficientnet_v2_s/template_experimental.yaml index c2a113f745b..a73b04f87cf 100644 --- a/external/deep-object-reid/configs/ote_custom_classification/efficientnet_v2_s/template_experimental.yaml +++ b/external/deep-object-reid/configs/ote_custom_classification/efficientnet_v2_s/template_experimental.yaml @@ -43,7 +43,7 @@ hyper_parameters: enable_pruning: default_value: false pruning_supported: - default_value: true + default_value: false maximal_accuracy_degradation: default_value: 1.0 diff --git a/external/deep-object-reid/constraints.txt b/external/deep-object-reid/constraints.txt index 0a3ffc65c28..5e87a5382fe 100644 --- a/external/deep-object-reid/constraints.txt +++ b/external/deep-object-reid/constraints.txt @@ -1 +1,2 @@ opencv-python==4.5.5.64 # remedy for fixed opencv-python-headless version in e2e-test-framework +optuna==2.10.1 # remedy for fixed optuna version incompatible in OTE CI diff --git a/external/deep-object-reid/submodule b/external/deep-object-reid/submodule index 02df154b042..a4790d1b116 160000 --- a/external/deep-object-reid/submodule +++ b/external/deep-object-reid/submodule @@ -1 +1 @@ -Subproject commit 02df154b042fd184616c3c6a8a0f5e83dafc5de2 +Subproject commit a4790d1b116e46510ca83c5afc147d537f2b8527 diff --git a/external/deep-object-reid/tests/ote_cli/test_classification.py b/external/deep-object-reid/tests/ote_cli/test_classification.py index 71449c9c9ec..88520479523 100644 --- a/external/deep-object-reid/tests/ote_cli/test_classification.py +++ b/external/deep-object-reid/tests/ote_cli/test_classification.py @@ -67,6 +67,7 @@ templates_ids = [template.model_template_id for template in templates] +@pytest.mark.skip(reason="This test case will be deprecated soon") class TestToolsClassification: @e2e_pytest_component def test_create_venv(self): @@ -141,7 +142,6 @@ def test_nncf_export(self, template): @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) - @pytest.mark.xfail(reason="CVS-82892") def test_nncf_eval(self, template): if template.entrypoints.nncf is None: pytest.skip("nncf entrypoint is none") diff --git a/external/deep-object-reid/tests/test_ote_api.py b/external/deep-object-reid/tests/test_ote_api.py index 9e3bd5a0874..88d0da5f673 100644 --- a/external/deep-object-reid/tests/test_ote_api.py +++ b/external/deep-object-reid/tests/test_ote_api.py @@ -47,16 +47,19 @@ @e2e_pytest_api +@pytest.mark.skip(reason="This test case will be deprecated soon") def test_reading_efficientnet_b0(): parse_model_template(osp.join('configs', 'ote_custom_classification', 'efficientnet_b0', 'template_experimental.yaml')) @e2e_pytest_api +@pytest.mark.skip(reason="This test case will be deprecated soon") def test_reading_mobilenet_v3_large_075(): parse_model_template(osp.join('configs', 'ote_custom_classification', 'mobilenet_v3_large_075', 'template_experimental.yaml')) @e2e_pytest_api +@pytest.mark.skip(reason="This test case will be deprecated soon") def test_configuration_yaml(): configuration = OTEClassificationParameters() configuration_yaml_str = convert(configuration, str) @@ -128,6 +131,7 @@ def default_task_setup(): @e2e_pytest_api +@pytest.mark.skip(reason="This test case will be deprecated soon") def test_training_progress_tracking(default_task_setup): print('Task initialized, model training starts.') training_progress_curve = [] @@ -151,6 +155,7 @@ def progress_callback(progress: float, score: Optional[float] = None): @e2e_pytest_api +@pytest.mark.skip(reason="This test case will be deprecated soon") def test_inference_progress_tracking(default_task_setup): task, _, dataset = default_task_setup @@ -170,5 +175,6 @@ def progress_callback(progress: int): assert np.all(inference_progress_curve[1:] >= inference_progress_curve[:-1]) @e2e_pytest_api +@pytest.mark.skip(reason="This test case will be deprecated soon") def test_nncf_optimize_progress_tracking(): pytest.xfail('NNCF is not supported yet') diff --git a/external/deep-object-reid/tests/test_ote_training.py b/external/deep-object-reid/tests/test_ote_training.py index a42e01e7a06..792cc2d802b 100644 --- a/external/deep-object-reid/tests/test_ote_training.py +++ b/external/deep-object-reid/tests/test_ote_training.py @@ -130,16 +130,16 @@ def test_bunches(self) -> List[Dict[str, Any]]: test_bunches = [ dict( model_name=[ - 'Custom_Image_Classification_EfficientNet-V2-S', - 'Custom_Image_Classification_MobileNet-V3-large-1x', + 'Custom_Image_Classification_EfficientNet-V2-S', + 'Custom_Image_Classification_MobileNet-V3-large-1x', ], dataset_name=['lg_chem_short','mlc_voc_short'], usecase='precommit', ), dict( model_name=[ - 'Custom_Image_Classification_EfficientNet-V2-S', - 'Custom_Image_Classification_MobileNet-V3-large-1x', + 'Custom_Image_Classification_EfficientNet-V2-S', + 'Custom_Image_Classification_MobileNet-V3-large-1x', ], dataset_name=['lg_chem','cifar100','mlc_voc'], max_num_epochs=KEEP_CONFIG_FIELD_VALUE, @@ -457,6 +457,7 @@ def data_collector_fx(self, request) -> DataCollector: logger.info('data_collector is released') @e2e_pytest_performance + @pytest.mark.skip(reason="This test case will be deprecated soon") def test(self, test_parameters, test_case_fx, data_collector_fx, diff --git a/external/deep-object-reid/torchreid_tasks/inference_task.py b/external/deep-object-reid/torchreid_tasks/inference_task.py index adb4c4acced..bca5795c3a3 100644 --- a/external/deep-object-reid/torchreid_tasks/inference_task.py +++ b/external/deep-object-reid/torchreid_tasks/inference_task.py @@ -48,15 +48,16 @@ check_input_parameters_type, ) from ote_sdk.utils.labels_utils import get_empty_label +from ote_sdk.utils.vis_utils import get_actmap from scripts.default_config import (get_default_config, imagedata_kwargs, merge_from_files_with_base, model_kwargs) from torchreid.apis.export import export_ir, export_onnx from torchreid_tasks.monitors import DefaultMetricsMonitor, StopCallback from torchreid_tasks.parameters import OTEClassificationParameters -from torchreid_tasks.utils import (active_score_from_probs, force_fp32, get_actmap, get_multiclass_predictions, - get_multilabel_predictions, InferenceProgressCallback, - OTEClassificationDataset, sigmoid_numpy, softmax_numpy, - get_multihead_class_info, get_hierarchical_predictions) +from torchreid_tasks.utils import (active_score_from_probs, force_fp32, get_multiclass_predictions, + get_multilabel_predictions, InferenceProgressCallback, + OTEClassificationDataset, sigmoid_numpy, softmax_numpy, + get_multihead_class_info, get_hierarchical_predictions) from torchreid.metrics.classification import score_extraction from torchreid.utils import load_pretrained_weights @@ -262,9 +263,9 @@ def infer(self, dataset: DatasetEntity, if dump_features: actmap = get_actmap(saliency_maps[i], (dataset_item.width, dataset_item.height)) - saliency_media = ResultMediaEntity(name="saliency_map", type="Saliency map", + saliency_media = ResultMediaEntity(name="Saliency Map", type="saliency_map", annotation_scene=dataset_item.annotation_scene, - numpy=actmap, roi=dataset_item.roi, label = item_labels[0].label) + numpy=actmap, roi=dataset_item.roi, label=item_labels[0].label) dataset_item.append_metadata_item(saliency_media, model=self._task_environment.model) return dataset diff --git a/external/deep-object-reid/torchreid_tasks/openvino_task.py b/external/deep-object-reid/torchreid_tasks/openvino_task.py index 43e749a7792..c872ca83230 100644 --- a/external/deep-object-reid/torchreid_tasks/openvino_task.py +++ b/external/deep-object-reid/torchreid_tasks/openvino_task.py @@ -191,7 +191,7 @@ def infer(self, dataset: DatasetEntity, feature_vec_media = TensorEntity(name="representation_vector", numpy=repr_vector.reshape(-1)) dataset_item.append_metadata_item(feature_vec_media, model=self.model) if dump_features: - saliency_media = ResultMediaEntity(name="saliency_map", type="Saliency map", + saliency_media = ResultMediaEntity(name="Saliency Map", type="saliency_map", annotation_scene=dataset_item.annotation_scene, numpy=actmap, roi=dataset_item.roi, label=predicted_scene.annotations[0].get_labels()[0].label) diff --git a/external/deep-object-reid/torchreid_tasks/utils.py b/external/deep-object-reid/torchreid_tasks/utils.py index c4e4328c31d..aed9ad4794e 100644 --- a/external/deep-object-reid/torchreid_tasks/utils.py +++ b/external/deep-object-reid/torchreid_tasks/utils.py @@ -497,15 +497,6 @@ def on_initialization_end(self): self.update_progress_callback(self.get_progress()) -@check_input_parameters_type() -def get_actmap(features: Union[np.ndarray, Iterable, int, float], - output_res: Union[tuple, list]): - am = cv.resize(features, output_res) - am = cv.applyColorMap(am, cv.COLORMAP_JET) - am = cv.cvtColor(am, cv.COLOR_BGR2RGB) - return am - - @check_input_parameters_type() def active_score_from_probs(predictions: Union[np.ndarray, Iterable, int, float]): top_idxs = np.argpartition(predictions, -2)[-2:] @@ -614,7 +605,7 @@ def __init__(self, @master_only @check_input_parameters_type() def log(self, runner: BaseRunner): - tags = self.get_loggable_tags(runner, allow_text=False) + tags = self.get_loggable_tags(runner, allow_text=False, tags_to_skip=()) if runner.max_epochs is not None: normalized_iter = self.get_iter(runner) / runner.max_iters * runner.max_epochs else: diff --git a/external/mmdetection/configs/horizontal-text-detection/tools/draw_recall_graph.py b/external/mmdetection/configs/horizontal-text-detection/tools/draw_recall_graph.py index fba2efed71c..000ee81a439 100644 --- a/external/mmdetection/configs/horizontal-text-detection/tools/draw_recall_graph.py +++ b/external/mmdetection/configs/horizontal-text-detection/tools/draw_recall_graph.py @@ -19,12 +19,12 @@ import argparse from os.path import exists -import subprocess # nosec +import subprocess import mmcv -from mmdet.datasets import build_dataset # pylint: disable=import-error -from mmdet.core.evaluation.text_evaluation import text_eval # pylint: disable=import-error +from mmdet.datasets import build_dataset # pylint: disable=import-error +from mmdet.core.evaluation.text_evaluation import text_eval # pylint: disable=import-error def parse_args(): @@ -49,13 +49,12 @@ def main(): detection_file = 'horizontal_text_detection' if not exists(f'{detection_file}.bbox.json'): - subprocess.run( - f'python ../../../../../external/mmdetection/tools/test.py' - f' {args.config} {args.snapshot}' - f' --options jsonfile_prefix={detection_file}' - f' --format-only', - check=True, shell=True - ) + command = [ + 'python', '../../../../../external/mmdetection/tools/test.py', + f'{args.config}', f'{args.snapshot}', + '--options', f'jsonfile_prefix={detection_file}', '--format-only' + ] + subprocess.run(command, check=True) cfg = mmcv.Config.fromfile(args.config) dataset = build_dataset(cfg.data.test) diff --git a/external/mmdetection/detection_tasks/apis/detection/inference_task.py b/external/mmdetection/detection_tasks/apis/detection/inference_task.py index 5670dbdf7e6..358b09a9750 100644 --- a/external/mmdetection/detection_tasks/apis/detection/inference_task.py +++ b/external/mmdetection/detection_tasks/apis/detection/inference_task.py @@ -51,6 +51,7 @@ DatasetParamTypeCheck, check_input_parameters_type, ) +from ote_sdk.utils.vis_utils import get_actmap from mmdet.apis import export_model from detection_tasks.apis.detection.config_utils import patch_config, prepare_for_testing, set_hyperparams @@ -250,13 +251,12 @@ def _add_predictions_to_dataset(self, prediction_results, dataset, confidence_th if feature_vector is not None: active_score = TensorEntity(name="representation_vector", numpy=feature_vector) dataset_item.append_metadata_item(active_score, model=self._task_environment.model) - + if saliency_map is not None: - width, height = dataset_item.width, dataset_item.height - saliency_map = cv2.resize(saliency_map, (width, height), interpolation=cv2.INTER_NEAREST) - saliency_map_media = ResultMediaEntity(name="saliency_map", type="Saliency map", - annotation_scene=dataset_item.annotation_scene, - numpy=saliency_map, roi=dataset_item.roi) + saliency_map = get_actmap(saliency_map, (dataset_item.width, dataset_item.height)) + saliency_map_media = ResultMediaEntity(name="Saliency Map", type="saliency_map", + annotation_scene=dataset_item.annotation_scene, + numpy=saliency_map, roi=dataset_item.roi) dataset_item.append_metadata_item(saliency_map_media, model=self._task_environment.model) @@ -292,7 +292,7 @@ def hook(module, input, output): model = self._model with model.register_forward_pre_hook(pre_hook), model.register_forward_hook(hook): prediction_results, _ = self._infer_detector(model, self._config, dataset, dump_features=True, eval=False, - dump_saliency_map=dump_saliency_map) + dump_saliency_map=dump_saliency_map) self._add_predictions_to_dataset(prediction_results, dataset, self.confidence_threshold) logger.info('Inference completed') @@ -337,8 +337,8 @@ def dump_saliency_hook(model: torch.nn.Module, input: Tuple, out: List[torch.Ten Args: model (torch.nn.Module): PyTorch model - input (Tuple): input - out (List[torch.Tensor]): a list of feature maps + input (Tuple): input + out (List[torch.Tensor]): a list of feature maps """ with torch.no_grad(): saliency_map = get_saliency_map(out[-1]) diff --git a/external/mmdetection/detection_tasks/apis/detection/openvino_task.py b/external/mmdetection/detection_tasks/apis/detection/openvino_task.py index 0d57da2df21..2232cf90d8a 100644 --- a/external/mmdetection/detection_tasks/apis/detection/openvino_task.py +++ b/external/mmdetection/detection_tasks/apis/detection/openvino_task.py @@ -14,7 +14,6 @@ import attr import copy -import cv2 import io import json import numpy as np @@ -64,6 +63,7 @@ DatasetParamTypeCheck, check_input_parameters_type, ) +from ote_sdk.utils.vis_utils import get_actmap from shutil import copyfile, copytree from typing import Any, Dict, List, Optional, Tuple, Union from zipfile import ZipFile @@ -286,11 +286,10 @@ def infer(self, dataset: DatasetEntity, inference_parameters: Optional[Inference dataset_item.append_metadata_item(representation_vector, model=self.model) if add_saliency_map and saliency_map is not None: - width, height = dataset_item.width, dataset_item.height - saliency_map = cv2.resize(saliency_map[0], (width, height), interpolation=cv2.INTER_NEAREST) - saliency_map_media = ResultMediaEntity(name="saliency_map", type="Saliency map", - annotation_scene=dataset_item.annotation_scene, - numpy=saliency_map, roi=dataset_item.roi) + saliency_map = get_actmap(saliency_map, (dataset_item.width, dataset_item.height)) + saliency_map_media = ResultMediaEntity(name="Saliency Map", type="saliency_map", + annotation_scene=dataset_item.annotation_scene, + numpy=saliency_map, roi=dataset_item.roi) dataset_item.append_metadata_item(saliency_map_media, model=self.model) logger.info('OpenVINO inference completed') return dataset diff --git a/external/mmdetection/submodule b/external/mmdetection/submodule index 628ce1b6f37..258ca9e6a35 160000 --- a/external/mmdetection/submodule +++ b/external/mmdetection/submodule @@ -1 +1 @@ -Subproject commit 628ce1b6f37150428be3c93445147d7c9c289456 +Subproject commit 258ca9e6a35a66495cd4f80a65394dac561cac88 diff --git a/external/mmdetection/tests/ote_cli/test_detection.py b/external/mmdetection/tests/ote_cli/test_detection.py index a2516860d53..46420e21d4e 100644 --- a/external/mmdetection/tests/ote_cli/test_detection.py +++ b/external/mmdetection/tests/ote_cli/test_detection.py @@ -64,10 +64,11 @@ root = '/tmp/ote_cli/' ote_dir = os.getcwd() -templates = Registry('external/mmdetection').filter(task_type='DETECTION').templates +templates = Registry('external/mmdetection', experimental=True).filter(task_type='DETECTION').templates templates_ids = [template.model_template_id for template in templates] +@pytest.mark.skip(reason="This test case will be deprecated soon") class TestToolsDetection: @e2e_pytest_component def test_create_venv(self): diff --git a/external/mmdetection/tests/ote_cli/test_instance_segmentation.py b/external/mmdetection/tests/ote_cli/test_instance_segmentation.py index 46e6a729b91..afbeb1d422c 100644 --- a/external/mmdetection/tests/ote_cli/test_instance_segmentation.py +++ b/external/mmdetection/tests/ote_cli/test_instance_segmentation.py @@ -64,10 +64,11 @@ root = '/tmp/ote_cli/' ote_dir = os.getcwd() -templates = Registry('external/mmdetection').filter(task_type='INSTANCE_SEGMENTATION').templates +templates = Registry('external/mmdetection', experimental=True).filter(task_type='INSTANCE_SEGMENTATION').templates templates_ids = [template.model_template_id for template in templates] +@pytest.mark.skip(reason="This test case will be deprecated soon") class TestToolsInstanceSegmentation: @e2e_pytest_component def test_create_venv(self): diff --git a/external/mmdetection/tests/ote_cli/test_rotated_detection.py b/external/mmdetection/tests/ote_cli/test_rotated_detection.py index 2350cb6bed7..1c19787962e 100644 --- a/external/mmdetection/tests/ote_cli/test_rotated_detection.py +++ b/external/mmdetection/tests/ote_cli/test_rotated_detection.py @@ -57,10 +57,11 @@ root = '/tmp/ote_cli/' ote_dir = os.getcwd() -templates = Registry('external/mmdetection').filter(task_type='ROTATED_DETECTION').templates +templates = Registry('external/mmdetection', experimental=True).filter(task_type='ROTATED_DETECTION').templates templates_ids = [template.model_template_id for template in templates] +@pytest.mark.skip(reason="This test case will be deprecated soon") class TestToolsRotatedDetection: @e2e_pytest_component def test_create_venv(self): diff --git a/external/mmdetection/tests/test_ote_api.py b/external/mmdetection/tests/test_ote_api.py index ecb08405cdc..534a3f41a83 100644 --- a/external/mmdetection/tests/test_ote_api.py +++ b/external/mmdetection/tests/test_ote_api.py @@ -56,6 +56,7 @@ DEFAULT_TEMPLATE_DIR = osp.join('configs', 'custom-object-detection', 'gen3_mobilenetV2_ATSS') +@pytest.mark.skip(reason="This test case will be deprecated soon") class ModelTemplate(unittest.TestCase): def check_capabilities(self, template): self.assertTrue(template.computes_representations()) @@ -86,6 +87,7 @@ def test_reading_yolox(self): @e2e_pytest_api +@pytest.mark.skip(reason="This test case will be deprecated soon") def test_configuration_yaml(): configuration = OTEDetectionConfig() configuration_yaml_str = convert(configuration, str) @@ -94,6 +96,7 @@ def test_configuration_yaml(): assert configuration_yaml_converted == configuration_yaml_loaded +@pytest.mark.skip(reason="This test case will be deprecated soon") class Sample(unittest.TestCase): template = osp.join(DEFAULT_TEMPLATE_DIR, 'template_experimental.yaml') @@ -113,6 +116,7 @@ def test_sample_on_gpu(self): assert output.returncode == 0 +@pytest.mark.skip(reason="This test case will be deprecated soon") class API(unittest.TestCase): """ Collection of tests for OTE API and OTE Model Templates diff --git a/external/mmdetection/tests/test_ote_training.py b/external/mmdetection/tests/test_ote_training.py index 7c076ebf987..ae57910c55b 100644 --- a/external/mmdetection/tests/test_ote_training.py +++ b/external/mmdetection/tests/test_ote_training.py @@ -102,16 +102,16 @@ def test_bunches(self) -> List[Dict[str, Any]]: test_bunches = [ dict( model_name=[ - 'Custom_Object_Detection_Gen3_ATSS', - 'Custom_Object_Detection_Gen3_SSD', + 'Custom_Object_Detection_Gen3_ATSS', + 'Custom_Object_Detection_Gen3_SSD', ], dataset_name='dataset1_tiled_shortened_500_A', usecase='precommit', ), dict( model_name=[ - 'Custom_Object_Detection_Gen3_ATSS', - 'Custom_Object_Detection_Gen3_SSD', + 'Custom_Object_Detection_Gen3_ATSS', + 'Custom_Object_Detection_Gen3_SSD', ], dataset_name=[ 'bbcd', @@ -336,6 +336,7 @@ def test_case_fx(self, current_test_parameters_fx, params_factories_for_test_act return test_case @e2e_pytest_performance + @pytest.mark.skip(reason="This test case will be deprecated soon") def test(self, test_parameters, test_case_fx, data_collector_fx, @@ -377,6 +378,7 @@ def test_case_fx(self, current_test_parameters_fx, params_factories_for_test_act return test_case @e2e_pytest_performance + @pytest.mark.skip(reason="This test case will be deprecated soon") def test(self, test_parameters, test_case_fx, data_collector_fx, diff --git a/external/mmsegmentation/segmentation_tasks/apis/segmentation/inference_task.py b/external/mmsegmentation/segmentation_tasks/apis/segmentation/inference_task.py index 9679a899f6c..02431bdfc74 100644 --- a/external/mmsegmentation/segmentation_tasks/apis/segmentation/inference_task.py +++ b/external/mmsegmentation/segmentation_tasks/apis/segmentation/inference_task.py @@ -49,7 +49,7 @@ ) from mmseg.apis import export_model -from mmseg.core.hooks.auxiliary_hooks import FeatureVectorHook, SaliencyMapHook +from mmseg.core.hooks.auxiliary_hooks import FeatureVectorHook from mmseg.datasets import build_dataloader, build_dataset from mmseg.models import build_segmentor from mmseg.parallel import MMDataCPU @@ -196,16 +196,15 @@ def hook(module, input, output): pre_hook_handle = self._model.register_forward_pre_hook(pre_hook) hook_handle = self._model.register_forward_hook(hook) - prediction_results = self._infer_segmentor(self._model, self._config, dataset, dump_features=True, - dump_saliency_map=not is_evaluation) - self._add_predictions_to_dataset(prediction_results, dataset) + prediction_results = self._infer_segmentor(self._model, self._config, dataset, dump_features=True) + self._add_predictions_to_dataset(prediction_results, dataset, dump_soft_prediction=not is_evaluation) pre_hook_handle.remove() hook_handle.remove() return dataset - def _add_predictions_to_dataset(self, prediction_results, dataset): - for dataset_item, (prediction, feature_vector, saliency_map) in zip(dataset, prediction_results): + def _add_predictions_to_dataset(self, prediction_results, dataset, dump_soft_prediction): + for dataset_item, (prediction, feature_vector) in zip(dataset, prediction_results): soft_prediction = np.transpose(prediction, axes=(1, 2, 0)) hard_prediction = create_hard_prediction_from_soft_prediction( soft_prediction=soft_prediction, @@ -223,18 +222,23 @@ def _add_predictions_to_dataset(self, prediction_results, dataset): active_score = TensorEntity(name="representation_vector", numpy=feature_vector.reshape(-1)) dataset_item.append_metadata_item(active_score, model=self._task_environment.model) - if saliency_map is not None: - class_act_map = get_activation_map(saliency_map, (dataset_item.width, dataset_item.height)) - result_media = ResultMediaEntity(name="saliency_map", - type="Saliency map", - annotation_scene=dataset_item.annotation_scene, - roi=dataset_item.roi, - numpy=class_act_map) - dataset_item.append_metadata_item(result_media, model=self._task_environment.model) - - def _infer_segmentor(self, - model: torch.nn.Module, config: Config, dataset: DatasetEntity, - dump_features: bool = False, dump_saliency_map: bool = False) -> None: + if dump_soft_prediction: + for label_index, label in self._label_dictionary.items(): + if label_index == 0: + continue + current_label_soft_prediction = soft_prediction[:, :, label_index] + + class_act_map = get_activation_map(current_label_soft_prediction) + result_media = ResultMediaEntity(name='Soft Prediction', + type='soft_prediction', + label=label, + annotation_scene=dataset_item.annotation_scene, + roi=dataset_item.roi, + numpy=class_act_map) + dataset_item.append_metadata_item(result_media, model=self._task_environment.model) + + def _infer_segmentor(self, model: torch.nn.Module, config: Config, dataset: DatasetEntity, + dump_features: bool = False) -> None: model.eval() test_config = prepare_for_testing(config, dataset) @@ -254,21 +258,18 @@ def _infer_segmentor(self, eval_predictions = [] feature_vectors = [] - saliency_maps = [] # Use a single gpu for testing. Set in both mm_val_dataloader and eval_model with FeatureVectorHook(model.module.backbone) if dump_features else nullcontext() as fhook: - with SaliencyMapHook(model.module.backbone) if dump_saliency_map else nullcontext() as shook: - for data in mm_val_dataloader: - with torch.no_grad(): - result = model(return_loss=False, output_logits=True, **data) - eval_predictions.extend(result) - feature_vectors = fhook.records if dump_features else [None] * len(dataset) - saliency_maps = shook.records if dump_saliency_map else [None] * len(dataset) - assert len(eval_predictions) == len(feature_vectors) == len(saliency_maps), \ + for data in mm_val_dataloader: + with torch.no_grad(): + result = model(return_loss=False, output_logits=True, **data) + eval_predictions.extend(result) + feature_vectors = fhook.records if dump_features else [None] * len(dataset) + assert len(eval_predictions) == len(feature_vectors), \ 'Number of elements should be the same, however, number of outputs are ' \ - f"{len(eval_predictions)}, {len(feature_vectors)}, and {len(saliency_maps)}" - predictions = zip(eval_predictions, feature_vectors, saliency_maps) + f"{len(eval_predictions)} and {len(feature_vectors)}" + predictions = zip(eval_predictions, feature_vectors) return predictions @check_input_parameters_type() diff --git a/external/mmsegmentation/segmentation_tasks/apis/segmentation/model_wrappers/blur.py b/external/mmsegmentation/segmentation_tasks/apis/segmentation/model_wrappers/blur.py index 5e4fde00296..82431788ace 100644 --- a/external/mmsegmentation/segmentation_tasks/apis/segmentation/model_wrappers/blur.py +++ b/external/mmsegmentation/segmentation_tasks/apis/segmentation/model_wrappers/blur.py @@ -26,13 +26,16 @@ @check_input_parameters_type() -def get_actmap( - features: Union[np.ndarray, Iterable, int, float], output_res: Union[tuple, list] -): - am = cv2.resize(features, output_res) - am = cv2.applyColorMap(am, cv2.COLORMAP_JET) - am = cv2.cvtColor(am, cv2.COLOR_BGR2RGB) - return am +def get_activation_map(features: Union[np.ndarray, Iterable, int, float]): + min_soft_score = np.min(features) + max_soft_score = np.max(features) + factor = 255.0 / (max_soft_score - min_soft_score + 1e-12) + + float_act_map = factor * (features - min_soft_score) + int_act_map = np.uint8(np.floor(float_act_map)) + int_act_map = cv2.applyColorMap(int_act_map, cv2.COLORMAP_JET) + int_act_map = cv2.cvtColor(int_act_map, cv2.COLOR_BGR2RGB) + return int_act_map class BlurSegmentation(SegmentationModel): @@ -78,18 +81,16 @@ def postprocess(self, outputs: Dict[str, np.ndarray], metadata: Dict[str, Any]): soft_threshold=self.soft_threshold, blur_strength=self.blur_strength ) - hard_prediction = cv2.resize(hard_prediction, metadata['original_shape'][1::-1], 0, 0, interpolation=cv2.INTER_NEAREST) - - if 'feature_vector' not in outputs or 'saliency_map' not in outputs: - warnings.warn('Could not find Feature Vector and Saliency Map in OpenVINO output. ' - 'Please rerun OpenVINO export or retrain the model.') - metadata["saliency_map"] = None + hard_prediction = cv2.resize(hard_prediction, metadata['original_shape'][1::-1], 0, 0, + interpolation=cv2.INTER_NEAREST) + soft_prediction = cv2.resize(soft_prediction, metadata['original_shape'][1::-1], 0, 0, + interpolation=cv2.INTER_NEAREST) + metadata['soft_prediction'] = soft_prediction + + if 'feature_vector' not in outputs: + warnings.warn('Could not find Feature Vector in OpenVINO output. Please rerun export or retrain the model.') metadata["feature_vector"] = None else: - metadata["saliency_map"] = get_actmap( - outputs["saliency_map"][0], - (metadata["original_shape"][1], metadata["original_shape"][0]), - ) metadata["feature_vector"] = outputs["feature_vector"].reshape(-1) return hard_prediction diff --git a/external/mmsegmentation/segmentation_tasks/apis/segmentation/openvino_task.py b/external/mmsegmentation/segmentation_tasks/apis/segmentation/openvino_task.py index 5ff7e6063a5..b2695cb2f10 100644 --- a/external/mmsegmentation/segmentation_tasks/apis/segmentation/openvino_task.py +++ b/external/mmsegmentation/segmentation_tasks/apis/segmentation/openvino_task.py @@ -105,11 +105,11 @@ def pre_process(self, image: np.ndarray) -> Tuple[Dict[str, np.ndarray], Dict[st @check_input_parameters_type() def post_process(self, prediction: Dict[str, np.ndarray], metadata: Dict[str, Any]) -> AnnotationSceneEntity: hard_prediction = self.model.postprocess(prediction, metadata) + soft_prediction = metadata['soft_prediction'] feature_vector = metadata['feature_vector'] - saliency_map = metadata['saliency_map'] predicted_scene = self.converter.convert_to_annotation(hard_prediction, metadata) - return predicted_scene, feature_vector, saliency_map + return predicted_scene, feature_vector, soft_prediction @check_input_parameters_type() def forward(self, inputs: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: @@ -164,25 +164,33 @@ def infer(self, inference_parameters: Optional[InferenceParameters] = None) -> DatasetEntity: if inference_parameters is not None: update_progress_callback = inference_parameters.update_progress - dump_saliency_map = not inference_parameters.is_evaluation + dump_soft_prediction = not inference_parameters.is_evaluation else: update_progress_callback = default_progress_callback - dump_saliency_map = True + dump_soft_prediction = True dataset_size = len(dataset) for i, dataset_item in enumerate(dataset, 1): - predicted_scene, feature_vector, saliency_map = self.inferencer.predict(dataset_item.numpy) + predicted_scene, feature_vector, soft_prediction = self.inferencer.predict(dataset_item.numpy) dataset_item.append_annotations(predicted_scene.annotations) if feature_vector is not None: feature_vector_media = TensorEntity(name="representation_vector", numpy=feature_vector.reshape(-1)) dataset_item.append_metadata_item(feature_vector_media, model=self.model) - if dump_saliency_map and saliency_map is not None: - saliency_map_media = ResultMediaEntity(name="saliency_map", type="Saliency map", - annotation_scene=dataset_item.annotation_scene, - numpy=saliency_map, roi=dataset_item.roi) - dataset_item.append_metadata_item(saliency_map_media, model=self.model) + if dump_soft_prediction: + for label_index, label in self._label_dictionary.items(): + if label_index == 0: + continue + current_label_soft_prediction = soft_prediction[:, :, label_index] + class_act_map = get_activation_map(current_label_soft_prediction) + result_media = ResultMediaEntity(name='Soft Prediction', + type='soft_prediction', + label=label, + annotation_scene=dataset_item.annotation_scene, + roi=dataset_item.roi, + numpy=class_act_map) + dataset_item.append_metadata_item(result_media, model=self.model) update_progress_callback(int(i / dataset_size * 100)) diff --git a/external/mmsegmentation/segmentation_tasks/apis/segmentation/ote_utils.py b/external/mmsegmentation/segmentation_tasks/apis/segmentation/ote_utils.py index 9250d7bc819..e567c290658 100644 --- a/external/mmsegmentation/segmentation_tasks/apis/segmentation/ote_utils.py +++ b/external/mmsegmentation/segmentation_tasks/apis/segmentation/ote_utils.py @@ -44,11 +44,16 @@ def get_task_class(path: str): @check_input_parameters_type() -def get_activation_map(features: Union[np.ndarray, Iterable, int, float], output_res: Union[tuple, list]): - am = cv2.resize(features, output_res) - am = cv2.applyColorMap(am, cv2.COLORMAP_JET) - am = cv2.cvtColor(am, cv2.COLOR_BGR2RGB) - return am +def get_activation_map(features: Union[np.ndarray, Iterable, int, float]): + min_soft_score = np.min(features) + max_soft_score = np.max(features) + factor = 255.0 / (max_soft_score - min_soft_score + 1e-12) + + float_act_map = factor * (features - min_soft_score) + int_act_map = np.uint8(np.floor(float_act_map)) + int_act_map = cv2.applyColorMap(int_act_map, cv2.COLORMAP_JET) + int_act_map = cv2.cvtColor(int_act_map, cv2.COLOR_BGR2RGB) + return int_act_map class TrainingProgressCallback(TimeMonitorCallback): diff --git a/external/mmsegmentation/submodule b/external/mmsegmentation/submodule index 337c109596a..c652095c2ff 160000 --- a/external/mmsegmentation/submodule +++ b/external/mmsegmentation/submodule @@ -1 +1 @@ -Subproject commit 337c109596acf7f06a73c199f368a8f0f18d026c +Subproject commit c652095c2ff342561bf9c9bdf8567522cf11d19a diff --git a/external/mmsegmentation/tests/ote_cli/test_segmentation.py b/external/mmsegmentation/tests/ote_cli/test_segmentation.py index 50c3bd1ea6e..ce641cdbfce 100644 --- a/external/mmsegmentation/tests/ote_cli/test_segmentation.py +++ b/external/mmsegmentation/tests/ote_cli/test_segmentation.py @@ -72,6 +72,7 @@ templates_ids = [template.model_template_id for template in templates] +@pytest.mark.skip(reason="This test case will be deprecated soon") class TestToolsSegmentation: @e2e_pytest_component def test_create_venv(self): diff --git a/external/mmsegmentation/tests/test_ote_api.py b/external/mmsegmentation/tests/test_ote_api.py index aa9ecfc98c4..b28c55a2cb2 100644 --- a/external/mmsegmentation/tests/test_ote_api.py +++ b/external/mmsegmentation/tests/test_ote_api.py @@ -46,6 +46,7 @@ DEFAULT_TEMPLATE_DIR = osp.join('configs', 'custom-sematic-segmentation', 'ocr-lite-hrnet-18-mod2') +@pytest.mark.skip(reason="This test case will be deprecated soon") class API(unittest.TestCase): """ Collection of tests for OTE API and OTE Model Templates diff --git a/external/mmsegmentation/tests/test_ote_configuration.py b/external/mmsegmentation/tests/test_ote_configuration.py index 74b97161552..e81e425dfe9 100644 --- a/external/mmsegmentation/tests/test_ote_configuration.py +++ b/external/mmsegmentation/tests/test_ote_configuration.py @@ -13,6 +13,7 @@ # and limitations under the License. import os.path as osp +import pytest from ote_sdk.configuration.helper import convert, create from ote_sdk.test_suite.e2e_test_system import e2e_pytest_api @@ -21,6 +22,7 @@ @e2e_pytest_api +@pytest.mark.skip(reason="This test case will be deprecated soon") def test_configuration_yaml(): configuration = OTESegmentationConfig() configuration_yaml_str = convert(configuration, str) diff --git a/external/mmsegmentation/tests/test_ote_training.py b/external/mmsegmentation/tests/test_ote_training.py index 20c0a70fcf3..68a6517690e 100644 --- a/external/mmsegmentation/tests/test_ote_training.py +++ b/external/mmsegmentation/tests/test_ote_training.py @@ -99,20 +99,20 @@ def test_bunches(self) -> List[Dict[str, Any]]: test_bunches = [ dict( model_name=[ - 'Custom_Semantic_Segmentation_Lite-HRNet-s-mod2_OCR', - 'Custom_Semantic_Segmentation_Lite-HRNet-18_OCR', - 'Custom_Semantic_Segmentation_Lite-HRNet-18-mod2_OCR', - 'Custom_Semantic_Segmentation_Lite-HRNet-x-mod3_OCR', + 'Custom_Semantic_Segmentation_Lite-HRNet-s-mod2_OCR', + 'Custom_Semantic_Segmentation_Lite-HRNet-18_OCR', + 'Custom_Semantic_Segmentation_Lite-HRNet-18-mod2_OCR', + 'Custom_Semantic_Segmentation_Lite-HRNet-x-mod3_OCR', ], dataset_name='kvasir_seg_shortened', usecase='precommit', ), dict( model_name=[ - 'Custom_Semantic_Segmentation_Lite-HRNet-s-mod2_OCR', - 'Custom_Semantic_Segmentation_Lite-HRNet-18_OCR', - 'Custom_Semantic_Segmentation_Lite-HRNet-18-mod2_OCR', - 'Custom_Semantic_Segmentation_Lite-HRNet-x-mod3_OCR', + 'Custom_Semantic_Segmentation_Lite-HRNet-s-mod2_OCR', + 'Custom_Semantic_Segmentation_Lite-HRNet-18_OCR', + 'Custom_Semantic_Segmentation_Lite-HRNet-18-mod2_OCR', + 'Custom_Semantic_Segmentation_Lite-HRNet-x-mod3_OCR', ], dataset_name='kvasir_seg', num_training_iters=KEEP_CONFIG_FIELD_VALUE, @@ -272,6 +272,7 @@ def data_collector_fx(self, request) -> DataCollector: logger.info('data_collector is released') @e2e_pytest_performance + @pytest.mark.skip(reason="This test case will be deprecated soon") def test(self, test_parameters, test_case_fx, data_collector_fx, diff --git a/external/model-preparation-algorithm/configs/classification/configuration.yaml b/external/model-preparation-algorithm/configs/classification/configuration.yaml index 02ba42904c8..79603e0e90a 100644 --- a/external/model-preparation-algorithm/configs/classification/configuration.yaml +++ b/external/model-preparation-algorithm/configs/classification/configuration.yaml @@ -121,17 +121,68 @@ learning_parameters: enable_early_stopping: affects_outcome_of: TRAINING default_value: true - description: Adaptive early exit from training when accuracy isn't changed or decreased for several epochs. + description: Early exit from training when validation accuracy isn't changed or decreased for several epochs. editable: true - header: Enable adaptive early stopping of the training + header: Enable early stopping of the training type: BOOLEAN ui_rules: action: DISABLE_EDITING operator: AND rules: [] type: UI_RULES - visible_in_ui: false + visible_in_ui: true warning: null + early_stop_start: + affects_outcome_of: TRAINING + default_value: 3 + editable: true + header: Start epoch for early stopping + max_value: 1000 + min_value: 0 + type: INTEGER + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: 3 + visible_in_ui: false + early_stop_patience: + affects_outcome_of: TRAINING + default_value: 8 + description: Training will stop if the model does not improve within the number of epochs of patience. + editable: true + header: Patience for early stopping + max_value: 50 + min_value: 0 + type: INTEGER + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: 8 + visible_in_ui: true + warning: This is applied exclusively when early stopping is enabled. + early_stop_iteration_patience: + affects_outcome_of: TRAINING + default_value: 0 + description: + Training will stop if the model does not improve within the number of iterations of patience. + This ensures the model is trained enough with the number of iterations of patience before early stopping. + editable: true + header: Iteration patience for early stopping + max_value: 1000 + min_value: 0 + type: INTEGER + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: 0 + visible_in_ui: true + warning: This is applied exclusively when early stopping is enabled. type: PARAMETER_GROUP visible_in_ui: true pot_parameters: @@ -208,6 +259,21 @@ nncf_optimization: value: false visible_in_ui: true warning: null + pruning_supported: + affects_outcome_of: TRAINING + default_value: false + description: Whether filter pruning is supported + editable: false + header: Whether filter pruning is supported + type: BOOLEAN + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: false + visible_in_ui: false + warning: null maximal_accuracy_degradation: affects_outcome_of: TRAINING default_value: 1.0 diff --git a/external/model-preparation-algorithm/configs/classification/efficientnet_b0_cls_incr/hpo_config.yaml b/external/model-preparation-algorithm/configs/classification/efficientnet_b0_cls_incr/hpo_config.yaml index ba221b39335..613f82e7c36 100644 --- a/external/model-preparation-algorithm/configs/classification/efficientnet_b0_cls_incr/hpo_config.yaml +++ b/external/model-preparation-algorithm/configs/classification/efficientnet_b0_cls_incr/hpo_config.yaml @@ -4,12 +4,12 @@ hp_space: learning_parameters.learning_rate: param_type: qloguniform range: - - 0.0003 - - 0.1 - - 0.0001 + - 0.00098 + - 0.0245 + - 0.00001 learning_parameters.batch_size: param_type: qloguniform range: - - 64 - - 256 + - 42 + - 96 - 2 diff --git a/external/model-preparation-algorithm/configs/classification/efficientnet_b0_cls_incr/model.py b/external/model-preparation-algorithm/configs/classification/efficientnet_b0_cls_incr/model.py index 7b536275990..3d7938498ae 100644 --- a/external/model-preparation-algorithm/configs/classification/efficientnet_b0_cls_incr/model.py +++ b/external/model-preparation-algorithm/configs/classification/efficientnet_b0_cls_incr/model.py @@ -1,4 +1,5 @@ _base_ = [ - '../../../submodule/models/classification/ote_efficientnet_b0.yaml', + "../../../submodule/models/classification/ote_efficientnet_b0.yaml", ] -fp16 = dict(loss_scale=512.) + +fp16 = dict(loss_scale=512.0) diff --git a/external/model-preparation-algorithm/configs/classification/efficientnet_b0_cls_incr/model_hierarchical.py b/external/model-preparation-algorithm/configs/classification/efficientnet_b0_cls_incr/model_hierarchical.py index 92d206b6854..f7c4499e808 100644 --- a/external/model-preparation-algorithm/configs/classification/efficientnet_b0_cls_incr/model_hierarchical.py +++ b/external/model-preparation-algorithm/configs/classification/efficientnet_b0_cls_incr/model_hierarchical.py @@ -1,3 +1,3 @@ _base_ = [ - '../../../submodule/models/classification/ote_efficientnet_b0_hierarchical.yaml', + "../../../submodule/models/classification/ote_efficientnet_b0_hierarchical.yaml", ] diff --git a/external/model-preparation-algorithm/configs/classification/efficientnet_b0_cls_incr/model_multilabel.py b/external/model-preparation-algorithm/configs/classification/efficientnet_b0_cls_incr/model_multilabel.py index 983d860df3c..41645c99294 100644 --- a/external/model-preparation-algorithm/configs/classification/efficientnet_b0_cls_incr/model_multilabel.py +++ b/external/model-preparation-algorithm/configs/classification/efficientnet_b0_cls_incr/model_multilabel.py @@ -1,4 +1,5 @@ _base_ = [ - '../../../submodule/models/classification/ote_efficientnet_b0_multilabel.yaml', + "../../../submodule/models/classification/ote_efficientnet_b0_multilabel.yaml", ] -fp16 = dict(loss_scale=512.) + +fp16 = dict(loss_scale=512.0) diff --git a/external/model-preparation-algorithm/configs/classification/efficientnet_b0_cls_incr/template.yaml b/external/model-preparation-algorithm/configs/classification/efficientnet_b0_cls_incr/template.yaml index 5d3ff638f67..f4facd27467 100644 --- a/external/model-preparation-algorithm/configs/classification/efficientnet_b0_cls_incr/template.yaml +++ b/external/model-preparation-algorithm/configs/classification/efficientnet_b0_cls_incr/template.yaml @@ -32,10 +32,21 @@ hyper_parameters: num_workers: default_value: 0 learning_rate: - default_value: 0.007 + default_value: 0.0049 auto_hpo_state: POSSIBLE + learning_rate_warmup_iters: + default_value: 0 num_iters: - default_value: 20 + default_value: 90 + nncf_optimization: + enable_quantization: + default_value: true + enable_pruning: + default_value: false + pruning_supported: + default_value: true + maximal_accuracy_degradation: + default_value: 1.0 algo_backend: train_type: default_value: Incremental diff --git a/external/model-preparation-algorithm/configs/classification/efficientnet_v2_s_cls_incr/hpo_config.yaml b/external/model-preparation-algorithm/configs/classification/efficientnet_v2_s_cls_incr/hpo_config.yaml index e5b6ddbd7d0..4b79b24194c 100644 --- a/external/model-preparation-algorithm/configs/classification/efficientnet_v2_s_cls_incr/hpo_config.yaml +++ b/external/model-preparation-algorithm/configs/classification/efficientnet_v2_s_cls_incr/hpo_config.yaml @@ -4,12 +4,12 @@ hp_space: learning_parameters.learning_rate: param_type: qloguniform range: - - 0.0014 - - 0.035 + - 0.0007 + - 0.07 - 0.0001 learning_parameters.batch_size: param_type: qloguniform range: - - 40 - - 96 + - 32 + - 128 - 2 diff --git a/external/model-preparation-algorithm/configs/classification/efficientnet_v2_s_cls_incr/model.py b/external/model-preparation-algorithm/configs/classification/efficientnet_v2_s_cls_incr/model.py index fad4ad46208..e8cdfa2849a 100644 --- a/external/model-preparation-algorithm/configs/classification/efficientnet_v2_s_cls_incr/model.py +++ b/external/model-preparation-algorithm/configs/classification/efficientnet_v2_s_cls_incr/model.py @@ -1,4 +1,5 @@ _base_ = [ - '../../../submodule/models/classification/ote_efficientnet_v2_s.yaml', + "../../../submodule/models/classification/ote_efficientnet_v2_s.yaml", ] -fp16 = dict(loss_scale=512.) + +fp16 = dict(loss_scale=512.0) diff --git a/external/model-preparation-algorithm/configs/classification/efficientnet_v2_s_cls_incr/model_hierarchical.py b/external/model-preparation-algorithm/configs/classification/efficientnet_v2_s_cls_incr/model_hierarchical.py index 2817848cc72..f9ce8f7def3 100644 --- a/external/model-preparation-algorithm/configs/classification/efficientnet_v2_s_cls_incr/model_hierarchical.py +++ b/external/model-preparation-algorithm/configs/classification/efficientnet_v2_s_cls_incr/model_hierarchical.py @@ -1,3 +1,3 @@ _base_ = [ - '../../../submodule/models/classification/ote_efficientnet_v2_s_hierarchical.yaml', + "../../../submodule/models/classification/ote_efficientnet_v2_s_hierarchical.yaml", ] diff --git a/external/model-preparation-algorithm/configs/classification/efficientnet_v2_s_cls_incr/model_multilabel.py b/external/model-preparation-algorithm/configs/classification/efficientnet_v2_s_cls_incr/model_multilabel.py index efe6977c397..7b54b4453d1 100644 --- a/external/model-preparation-algorithm/configs/classification/efficientnet_v2_s_cls_incr/model_multilabel.py +++ b/external/model-preparation-algorithm/configs/classification/efficientnet_v2_s_cls_incr/model_multilabel.py @@ -1,4 +1,5 @@ _base_ = [ - '../../../submodule/models/classification/ote_efficientnet_v2_s_multilabel.yaml', + "../../../submodule/models/classification/ote_efficientnet_v2_s_multilabel.yaml", ] -fp16 = dict(loss_scale=512.) + +fp16 = dict(loss_scale=512.0) diff --git a/external/model-preparation-algorithm/configs/classification/efficientnet_v2_s_cls_incr/template.yaml b/external/model-preparation-algorithm/configs/classification/efficientnet_v2_s_cls_incr/template.yaml index 43b8d26ec92..ea8254c8567 100644 --- a/external/model-preparation-algorithm/configs/classification/efficientnet_v2_s_cls_incr/template.yaml +++ b/external/model-preparation-algorithm/configs/classification/efficientnet_v2_s_cls_incr/template.yaml @@ -1,5 +1,5 @@ # Description. -model_template_id: Custom_Image_Classification_EfficinetNet-V2-S +model_template_id: Custom_Image_Classification_EfficientNet-V2-S name: EfficientNet-V2-S task_type: CLASSIFICATION task_family: VISION @@ -32,10 +32,21 @@ hyper_parameters: num_workers: default_value: 0 learning_rate: - default_value: 0.007 + default_value: 0.0071 auto_hpo_state: POSSIBLE + learning_rate_warmup_iters: + default_value: 0 num_iters: - default_value: 20 + default_value: 90 + nncf_optimization: + enable_quantization: + default_value: true + enable_pruning: + default_value: false + pruning_supported: + default_value: false + maximal_accuracy_degradation: + default_value: 1.0 algo_backend: train_type: default_value: Incremental diff --git a/external/model-preparation-algorithm/configs/classification/mobilenet_v3_large_075_cls_incr/model.py b/external/model-preparation-algorithm/configs/classification/mobilenet_v3_large_075_cls_incr/model.py index cf02d45f74c..3be8738318d 100644 --- a/external/model-preparation-algorithm/configs/classification/mobilenet_v3_large_075_cls_incr/model.py +++ b/external/model-preparation-algorithm/configs/classification/mobilenet_v3_large_075_cls_incr/model.py @@ -1,3 +1,3 @@ _base_ = [ - '../../../submodule/models/classification/ote_mobilenet_v3_large_075.yaml', + "../../../submodule/models/classification/ote_mobilenet_v3_large_075.yaml", ] diff --git a/external/model-preparation-algorithm/configs/classification/mobilenet_v3_large_075_cls_incr/model_hierarchical.py b/external/model-preparation-algorithm/configs/classification/mobilenet_v3_large_075_cls_incr/model_hierarchical.py index 6755480df70..24b26f7c716 100644 --- a/external/model-preparation-algorithm/configs/classification/mobilenet_v3_large_075_cls_incr/model_hierarchical.py +++ b/external/model-preparation-algorithm/configs/classification/mobilenet_v3_large_075_cls_incr/model_hierarchical.py @@ -1,3 +1,3 @@ _base_ = [ - '../../../submodule/models/classification/ote_mobilenet_v3_large_075_hierarchical.yaml', + "../../../submodule/models/classification/ote_mobilenet_v3_large_075_hierarchical.yaml", ] diff --git a/external/model-preparation-algorithm/configs/classification/mobilenet_v3_large_075_cls_incr/model_multilabel.py b/external/model-preparation-algorithm/configs/classification/mobilenet_v3_large_075_cls_incr/model_multilabel.py index b914096de47..04c2361099f 100644 --- a/external/model-preparation-algorithm/configs/classification/mobilenet_v3_large_075_cls_incr/model_multilabel.py +++ b/external/model-preparation-algorithm/configs/classification/mobilenet_v3_large_075_cls_incr/model_multilabel.py @@ -1,3 +1,3 @@ _base_ = [ - '../../../submodule/models/classification/ote_mobilenet_v3_large_075_multilabel.yaml', + "../../../submodule/models/classification/ote_mobilenet_v3_large_075_multilabel.yaml", ] diff --git a/external/model-preparation-algorithm/configs/classification/mobilenet_v3_large_1_cls_incr/hpo_config.yaml b/external/model-preparation-algorithm/configs/classification/mobilenet_v3_large_1_cls_incr/hpo_config.yaml index 58384b4c619..2202588ff32 100644 --- a/external/model-preparation-algorithm/configs/classification/mobilenet_v3_large_1_cls_incr/hpo_config.yaml +++ b/external/model-preparation-algorithm/configs/classification/mobilenet_v3_large_1_cls_incr/hpo_config.yaml @@ -4,12 +4,12 @@ hp_space: learning_parameters.learning_rate: param_type: qloguniform range: - - 0.0032 - - 0.08 - - 0.0001 + - 0.00029 + - 0.1 + - 0.00001 learning_parameters.batch_size: param_type: qloguniform range: - - 40 - - 96 + - 64 + - 256 - 2 diff --git a/external/model-preparation-algorithm/configs/classification/mobilenet_v3_large_1_cls_incr/model.py b/external/model-preparation-algorithm/configs/classification/mobilenet_v3_large_1_cls_incr/model.py index f7abdbecd1a..0d9dd183c78 100644 --- a/external/model-preparation-algorithm/configs/classification/mobilenet_v3_large_1_cls_incr/model.py +++ b/external/model-preparation-algorithm/configs/classification/mobilenet_v3_large_1_cls_incr/model.py @@ -1,4 +1,5 @@ _base_ = [ - '../../../submodule/models/classification/ote_mobilenet_v3_large.yaml', + "../../../submodule/models/classification/ote_mobilenet_v3_large.yaml", ] -fp16 = dict(loss_scale=512.) + +fp16 = dict(loss_scale=512.0) diff --git a/external/model-preparation-algorithm/configs/classification/mobilenet_v3_large_1_cls_incr/model_hierarchical.py b/external/model-preparation-algorithm/configs/classification/mobilenet_v3_large_1_cls_incr/model_hierarchical.py index 6e85a1283a3..b236efee5a0 100644 --- a/external/model-preparation-algorithm/configs/classification/mobilenet_v3_large_1_cls_incr/model_hierarchical.py +++ b/external/model-preparation-algorithm/configs/classification/mobilenet_v3_large_1_cls_incr/model_hierarchical.py @@ -1,3 +1,3 @@ _base_ = [ - '../../../submodule/models/classification/ote_mobilenet_v3_large_hierarchical.yaml', + "../../../submodule/models/classification/ote_mobilenet_v3_large_hierarchical.yaml", ] diff --git a/external/model-preparation-algorithm/configs/classification/mobilenet_v3_large_1_cls_incr/model_multilabel.py b/external/model-preparation-algorithm/configs/classification/mobilenet_v3_large_1_cls_incr/model_multilabel.py index 64e4fba8892..c2520fc91b9 100644 --- a/external/model-preparation-algorithm/configs/classification/mobilenet_v3_large_1_cls_incr/model_multilabel.py +++ b/external/model-preparation-algorithm/configs/classification/mobilenet_v3_large_1_cls_incr/model_multilabel.py @@ -1,4 +1,5 @@ _base_ = [ - '../../../submodule/models/classification/ote_mobilenet_v3_large_multilabel.yaml', + "../../../submodule/models/classification/ote_mobilenet_v3_large_multilabel.yaml", ] -fp16 = dict(loss_scale=512.) + +fp16 = dict(loss_scale=512.0) diff --git a/external/model-preparation-algorithm/configs/classification/mobilenet_v3_large_1_cls_incr/template.yaml b/external/model-preparation-algorithm/configs/classification/mobilenet_v3_large_1_cls_incr/template.yaml index 99e3bf094c9..4e3ef04a706 100644 --- a/external/model-preparation-algorithm/configs/classification/mobilenet_v3_large_1_cls_incr/template.yaml +++ b/external/model-preparation-algorithm/configs/classification/mobilenet_v3_large_1_cls_incr/template.yaml @@ -32,12 +32,21 @@ hyper_parameters: num_workers: default_value: 0 learning_rate: - default_value: 0.016 + default_value: 0.0058 auto_hpo_state: POSSIBLE learning_rate_warmup_iters: - default_value: 100 + default_value: 10 num_iters: - default_value: 20 + default_value: 90 + nncf_optimization: + enable_quantization: + default_value: true + enable_pruning: + default_value: false + pruning_supported: + default_value: true + maximal_accuracy_degradation: + default_value: 1.0 algo_backend: train_type: default_value: Incremental diff --git a/external/model-preparation-algorithm/configs/classification/mobilenet_v3_small_cls_incr/model.py b/external/model-preparation-algorithm/configs/classification/mobilenet_v3_small_cls_incr/model.py index 56333ebf120..aa49912ed02 100644 --- a/external/model-preparation-algorithm/configs/classification/mobilenet_v3_small_cls_incr/model.py +++ b/external/model-preparation-algorithm/configs/classification/mobilenet_v3_small_cls_incr/model.py @@ -1,3 +1,3 @@ _base_ = [ - '../../../submodule/models/classification/ote_mobilenet_v3_small.yaml', + "../../../submodule/models/classification/ote_mobilenet_v3_small.yaml", ] diff --git a/external/model-preparation-algorithm/configs/classification/mobilenet_v3_small_cls_incr/model_hierarchical.py b/external/model-preparation-algorithm/configs/classification/mobilenet_v3_small_cls_incr/model_hierarchical.py index de507e03fe4..d1cdd7c54ab 100644 --- a/external/model-preparation-algorithm/configs/classification/mobilenet_v3_small_cls_incr/model_hierarchical.py +++ b/external/model-preparation-algorithm/configs/classification/mobilenet_v3_small_cls_incr/model_hierarchical.py @@ -1,3 +1,3 @@ _base_ = [ - '../../../submodule/models/classification/ote_mobilenet_v3_small_hierarchical.yaml', + "../../../submodule/models/classification/ote_mobilenet_v3_small_hierarchical.yaml", ] diff --git a/external/model-preparation-algorithm/configs/classification/mobilenet_v3_small_cls_incr/model_multilabel.py b/external/model-preparation-algorithm/configs/classification/mobilenet_v3_small_cls_incr/model_multilabel.py index bfe1534c1a6..9c7b7b6703e 100644 --- a/external/model-preparation-algorithm/configs/classification/mobilenet_v3_small_cls_incr/model_multilabel.py +++ b/external/model-preparation-algorithm/configs/classification/mobilenet_v3_small_cls_incr/model_multilabel.py @@ -1,3 +1,3 @@ _base_ = [ - '../../../submodule/models/classification/ote_mobilenet_v3_small_multilabel.yaml', + "../../../submodule/models/classification/ote_mobilenet_v3_small_multilabel.yaml", ] diff --git a/external/model-preparation-algorithm/configs/detection/configuration.yaml b/external/model-preparation-algorithm/configs/detection/configuration.yaml index 2b06e633e19..81f4eb43757 100644 --- a/external/model-preparation-algorithm/configs/detection/configuration.yaml +++ b/external/model-preparation-algorithm/configs/detection/configuration.yaml @@ -119,6 +119,85 @@ learning_parameters: value: 0 visible_in_ui: true warning: null + enable_early_stopping: + affects_outcome_of: TRAINING + default_value: true + description: Early exit from training when validation accuracy isn't changed or decreased for several epochs. + editable: true + header: Enable early stopping of the training + type: BOOLEAN + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + visible_in_ui: true + warning: null + early_stop_start: + affects_outcome_of: TRAINING + default_value: 3 + editable: true + header: Start epoch for early stopping + max_value: 1000 + min_value: 0 + type: INTEGER + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: 3 + visible_in_ui: false + early_stop_patience: + affects_outcome_of: TRAINING + default_value: 10 + description: Training will stop if the model does not improve within the number of epochs of patience. + editable: true + header: Patience for early stopping + max_value: 50 + min_value: 0 + type: INTEGER + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: 10 + visible_in_ui: true + warning: This is applied exclusively when early stopping is enabled. + early_stop_iteration_patience: + affects_outcome_of: TRAINING + default_value: 0 + description: + Training will stop if the model does not improve within the number of iterations of patience. + This ensures the model is trained enough with the number of iterations of patience before early stopping. + editable: true + header: Iteration patience for early stopping + max_value: 1000 + min_value: 0 + type: INTEGER + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: 0 + visible_in_ui: true + warning: This is applied exclusively when early stopping is enabled. + use_adaptive_interval: + affects_outcome_of: TRAINING + default_value: true + description: Depending on the size of iteration per epoch, adaptively update the validation interval and related values. + editable: true + header: Use adaptive validation interval + type: BOOLEAN + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + visible_in_ui: true + warning: This will automatically control the patience and interval when early stopping is enabled. type: PARAMETER_GROUP visible_in_ui: true postprocessing: @@ -260,6 +339,21 @@ nncf_optimization: value: false visible_in_ui: true warning: null + pruning_supported: + affects_outcome_of: TRAINING + default_value: false + description: Whether filter pruning is supported + editable: false + header: Whether filter pruning is supported + type: BOOLEAN + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: false + visible_in_ui: false + warning: null maximal_accuracy_degradation: affects_outcome_of: NONE default_value: 1.0 diff --git a/external/model-preparation-algorithm/configs/detection/cspdarknet_yolox_cls_incr/data_pipeline.py b/external/model-preparation-algorithm/configs/detection/cspdarknet_yolox_cls_incr/data_pipeline.py index 028d05ba84b..a75f11cecbd 100644 --- a/external/model-preparation-algorithm/configs/detection/cspdarknet_yolox_cls_incr/data_pipeline.py +++ b/external/model-preparation-algorithm/configs/detection/cspdarknet_yolox_cls_incr/data_pipeline.py @@ -1,44 +1,42 @@ -dataset_type = 'CocoDataset' -data_root = 'data/coco/' +dataset_type = "CocoDataset" +data_root = "data/coco/" samples_per_gpu = 2 -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) img_scale = (640, 640) train_pipeline = [ - dict(type='Mosaic', img_scale=img_scale, pad_val=114.0), + dict(type="Mosaic", img_scale=img_scale, pad_val=114.0), + dict(type="RandomAffine", scaling_ratio_range=(0.5, 1.5), border=(-img_scale[0] // 2, -img_scale[1] // 2)), dict( - type='RandomAffine', - scaling_ratio_range=(0.5, 1.5), - border=(-img_scale[0] // 2, -img_scale[1] // 2)), - dict( - type='PhotoMetricDistortion', + type="PhotoMetricDistortion", brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), - hue_delta=18), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Resize', keep_ratio=True), - dict(type='Pad', pad_to_square=True, pad_val=114.0), - dict(type='Normalize', **img_norm_cfg), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) + hue_delta=18, + ), + dict(type="RandomFlip", flip_ratio=0.5), + dict(type="Resize", keep_ratio=True), + dict(type="Pad", pad_to_square=True, pad_val=114.0), + dict(type="Normalize", **img_norm_cfg), + dict(type="DefaultFormatBundle"), + dict(type="Collect", keys=["img", "gt_bboxes", "gt_labels"]), ] test_pipeline = [ - dict(type='LoadImageFromFile'), + dict(type="LoadImageFromFile"), dict( - type='MultiScaleFlipAug', + type="MultiScaleFlipAug", img_scale=(416, 416), flip=False, transforms=[ - dict(type='Resize', keep_ratio=False), - dict(type='RandomFlip'), - dict(type='Pad', size=(416, 416), pad_val=114.0), - dict(type='Normalize', **img_norm_cfg), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img']) - ]) + dict(type="Resize", keep_ratio=False), + dict(type="RandomFlip"), + dict(type="Pad", size=(416, 416), pad_val=114.0), + dict(type="Normalize", **img_norm_cfg), + dict(type="DefaultFormatBundle"), + dict(type="Collect", keys=["img"]), + ], + ), ] data = dict( @@ -46,29 +44,28 @@ workers_per_gpu=4, num_classes=2, train=dict( - type='MultiImageMixDataset', + type="MultiImageMixDataset", dataset=dict( type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - pipeline=[ - dict(type='LoadImageFromFile', to_float32=True), - dict(type='LoadAnnotations', with_bbox=True) - ], + ann_file=data_root + "annotations/instances_train2017.json", + img_prefix=data_root + "train2017/", + pipeline=[dict(type="LoadImageFromFile", to_float32=True), dict(type="LoadAnnotations", with_bbox=True)], ), pipeline=train_pipeline, dynamic_scale=img_scale, ), val=dict( type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', + ann_file=data_root + "annotations/instances_val2017.json", + img_prefix=data_root + "val2017/", test_mode=True, - pipeline=test_pipeline), + pipeline=test_pipeline, + ), test=dict( type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', + ann_file=data_root + "annotations/instances_val2017.json", + img_prefix=data_root + "val2017/", test_mode=True, - pipeline=test_pipeline) + pipeline=test_pipeline, + ), ) diff --git a/external/model-preparation-algorithm/configs/detection/cspdarknet_yolox_cls_incr/hpo_config.yaml b/external/model-preparation-algorithm/configs/detection/cspdarknet_yolox_cls_incr/hpo_config.yaml index fffaa83d8c5..a89872d9b69 100644 --- a/external/model-preparation-algorithm/configs/detection/cspdarknet_yolox_cls_incr/hpo_config.yaml +++ b/external/model-preparation-algorithm/configs/detection/cspdarknet_yolox_cls_incr/hpo_config.yaml @@ -5,12 +5,12 @@ hp_space: learning_parameters.learning_rate: param_type: qloguniform range: - - 0.00004 - - 0.001 + - 0.00002 + - 0.002 - 0.00001 learning_parameters.batch_size: param_type: qloguniform range: - 4 - - 32 + - 16 - 2 diff --git a/external/model-preparation-algorithm/configs/detection/cspdarknet_yolox_cls_incr/model.py b/external/model-preparation-algorithm/configs/detection/cspdarknet_yolox_cls_incr/model.py index 8eb56d7e377..a28cec3a9c7 100644 --- a/external/model-preparation-algorithm/configs/detection/cspdarknet_yolox_cls_incr/model.py +++ b/external/model-preparation-algorithm/configs/detection/cspdarknet_yolox_cls_incr/model.py @@ -1,4 +1,2 @@ -_base_ = [ - '../../../submodule/recipes/stages/_base_/models/detectors/yolox.custom.py' -] -fp16 = dict(loss_scale=512.) +_base_ = ["../../../submodule/recipes/stages/_base_/models/detectors/yolox.custom.py"] +fp16 = dict(loss_scale=512.0) diff --git a/external/model-preparation-algorithm/configs/detection/cspdarknet_yolox_cls_incr/template.yaml b/external/model-preparation-algorithm/configs/detection/cspdarknet_yolox_cls_incr/template.yaml index 9e1d8120ab8..fd5bc24b674 100644 --- a/external/model-preparation-algorithm/configs/detection/cspdarknet_yolox_cls_incr/template.yaml +++ b/external/model-preparation-algorithm/configs/detection/cspdarknet_yolox_cls_incr/template.yaml @@ -36,6 +36,15 @@ hyper_parameters: default_value: 3 num_iters: default_value: 200 + nncf_optimization: + enable_quantization: + default_value: true + enable_pruning: + default_value: false + pruning_supported: + default_value: false + maximal_accuracy_degradation: + default_value: 1.0 algo_backend: train_type: default_value: Incremental diff --git a/external/model-preparation-algorithm/configs/detection/mobilenetv2_atss_cls_incr/hpo_config.yaml b/external/model-preparation-algorithm/configs/detection/mobilenetv2_atss_cls_incr/hpo_config.yaml index 1ffda84eac4..8df3d6be9dd 100644 --- a/external/model-preparation-algorithm/configs/detection/mobilenetv2_atss_cls_incr/hpo_config.yaml +++ b/external/model-preparation-algorithm/configs/detection/mobilenetv2_atss_cls_incr/hpo_config.yaml @@ -12,5 +12,5 @@ hp_space: param_type: qloguniform range: - 4 - - 32 + - 16 - 2 diff --git a/external/model-preparation-algorithm/configs/detection/mobilenetv2_atss_cls_incr/model.py b/external/model-preparation-algorithm/configs/detection/mobilenetv2_atss_cls_incr/model.py index 04097761001..cc5c147b478 100644 --- a/external/model-preparation-algorithm/configs/detection/mobilenetv2_atss_cls_incr/model.py +++ b/external/model-preparation-algorithm/configs/detection/mobilenetv2_atss_cls_incr/model.py @@ -1,9 +1,16 @@ _base_ = [ - '../../../submodule/samples/cfgs/models/backbones/ote_mobilenet_v2_w1.yaml', - '../../../submodule/recipes/stages/_base_/models/detectors/atss.custom.py' + "../../../submodule/samples/cfgs/models/backbones/ote_mobilenet_v2_w1.yaml", + "../../../submodule/recipes/stages/_base_/models/detectors/atss.custom.py", ] model = dict( - backbone=dict(out_indices=(2, 3, 4, 5,)) + backbone=dict( + out_indices=( + 2, + 3, + 4, + 5, + ) + ) ) -fp16 = dict(loss_scale=512.) +fp16 = dict(loss_scale=512.0) diff --git a/external/model-preparation-algorithm/configs/detection/mobilenetv2_atss_cls_incr/template.yaml b/external/model-preparation-algorithm/configs/detection/mobilenetv2_atss_cls_incr/template.yaml index 65d1c7826c0..0060830df31 100644 --- a/external/model-preparation-algorithm/configs/detection/mobilenetv2_atss_cls_incr/template.yaml +++ b/external/model-preparation-algorithm/configs/detection/mobilenetv2_atss_cls_incr/template.yaml @@ -36,6 +36,15 @@ hyper_parameters: default_value: 3 num_iters: default_value: 200 + nncf_optimization: + enable_quantization: + default_value: true + enable_pruning: + default_value: false + pruning_supported: + default_value: true + maximal_accuracy_degradation: + default_value: 1.0 algo_backend: train_type: default_value: Incremental diff --git a/external/model-preparation-algorithm/configs/detection/mobilenetv2_ssd_cls_incr/data_pipeline.py b/external/model-preparation-algorithm/configs/detection/mobilenetv2_ssd_cls_incr/data_pipeline.py index 0a4a94e3577..aa767264acc 100644 --- a/external/model-preparation-algorithm/configs/detection/mobilenetv2_ssd_cls_incr/data_pipeline.py +++ b/external/model-preparation-algorithm/configs/detection/mobilenetv2_ssd_cls_incr/data_pipeline.py @@ -1,60 +1,64 @@ -dataset_type = 'CocoDataset' +dataset_type = "CocoDataset" img_size = (864, 864) img_norm_cfg = dict(mean=[0, 0, 0], std=[255, 255, 255], to_rgb=True) train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), - dict(type='LoadAnnotations', with_bbox=True), + dict(type="LoadImageFromFile", to_float32=True), + dict(type="LoadAnnotations", with_bbox=True), dict( - type='PhotoMetricDistortion', + type="PhotoMetricDistortion", brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), - hue_delta=18), - dict( - type='MinIoURandomCrop', - min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), - min_crop_size=0.1), - dict(type='Resize', img_scale=img_size, keep_ratio=False), - dict(type='Normalize', **img_norm_cfg), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) + hue_delta=18, + ), + dict(type="MinIoURandomCrop", min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.1), + dict(type="Resize", img_scale=img_size, keep_ratio=False), + dict(type="Normalize", **img_norm_cfg), + dict(type="RandomFlip", flip_ratio=0.5), + dict(type="DefaultFormatBundle"), + dict(type="Collect", keys=["img", "gt_bboxes", "gt_labels"]), ] test_pipeline = [ - dict(type='LoadImageFromFile'), + dict(type="LoadImageFromFile"), dict( - type='MultiScaleFlipAug', + type="MultiScaleFlipAug", img_scale=img_size, flip=False, transforms=[ - dict(type='Resize', keep_ratio=False), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) + dict(type="Resize", keep_ratio=False), + dict(type="Normalize", **img_norm_cfg), + dict(type="ImageToTensor", keys=["img"]), + dict(type="Collect", keys=["img"]), + ], + ), ] data = dict( samples_per_gpu=10, workers_per_gpu=4, train=dict( - type='RepeatDataset', + type="RepeatDataset", times=1, adaptive_repeat_times=True, dataset=dict( type=dataset_type, - ann_file='data/coco/annotations/instances_train2017.json', - img_prefix='data/coco/train2017', - pipeline=train_pipeline)), + ann_file="data/coco/annotations/instances_train2017.json", + img_prefix="data/coco/train2017", + pipeline=train_pipeline, + ), + ), val=dict( type=dataset_type, - ann_file='data/coco/annotations/instances_val2017.json', - img_prefix='data/coco/val2017', + ann_file="data/coco/annotations/instances_val2017.json", + img_prefix="data/coco/val2017", test_mode=True, - pipeline=test_pipeline), + pipeline=test_pipeline, + ), test=dict( type=dataset_type, - ann_file='data/coco/annotations/instances_val2017.json', - img_prefix='data/coco/val2017', + ann_file="data/coco/annotations/instances_val2017.json", + img_prefix="data/coco/val2017", test_mode=True, - pipeline=test_pipeline)) + pipeline=test_pipeline, + ), +) diff --git a/external/model-preparation-algorithm/configs/detection/mobilenetv2_ssd_cls_incr/hpo_config.yaml b/external/model-preparation-algorithm/configs/detection/mobilenetv2_ssd_cls_incr/hpo_config.yaml index eebed24be1e..8a5b9ff69fa 100644 --- a/external/model-preparation-algorithm/configs/detection/mobilenetv2_ssd_cls_incr/hpo_config.yaml +++ b/external/model-preparation-algorithm/configs/detection/mobilenetv2_ssd_cls_incr/hpo_config.yaml @@ -5,12 +5,12 @@ hp_space: learning_parameters.learning_rate: param_type: qloguniform range: - - 0.001 - - 0.1 + - 0.002 + - 0.05 - 0.001 learning_parameters.batch_size: param_type: qloguniform range: - - 4 - - 32 + - 6 + - 12 - 2 diff --git a/external/model-preparation-algorithm/configs/detection/mobilenetv2_ssd_cls_incr/model.py b/external/model-preparation-algorithm/configs/detection/mobilenetv2_ssd_cls_incr/model.py index a26d4ee19f3..4cb8bed9b39 100644 --- a/external/model-preparation-algorithm/configs/detection/mobilenetv2_ssd_cls_incr/model.py +++ b/external/model-preparation-algorithm/configs/detection/mobilenetv2_ssd_cls_incr/model.py @@ -1,9 +1,14 @@ _base_ = [ - '../../../submodule/samples/cfgs/models/backbones/ote_mobilenet_v2_w1.yaml', - '../../../submodule/recipes/stages/_base_/models/detectors/ssd.custom.py' + "../../../submodule/samples/cfgs/models/backbones/ote_mobilenet_v2_w1.yaml", + "../../../submodule/recipes/stages/_base_/models/detectors/ssd.custom.py", ] model = dict( - backbone=dict(out_indices=(4, 5,)) + backbone=dict( + out_indices=( + 4, + 5, + ) + ) ) -fp16 = dict(loss_scale=512.) +fp16 = dict(loss_scale=512.0) diff --git a/external/model-preparation-algorithm/configs/detection/mobilenetv2_ssd_cls_incr/template.yaml b/external/model-preparation-algorithm/configs/detection/mobilenetv2_ssd_cls_incr/template.yaml index 2f6c35ae13b..6c69e55b53a 100644 --- a/external/model-preparation-algorithm/configs/detection/mobilenetv2_ssd_cls_incr/template.yaml +++ b/external/model-preparation-algorithm/configs/detection/mobilenetv2_ssd_cls_incr/template.yaml @@ -36,6 +36,15 @@ hyper_parameters: default_value: 3 num_iters: default_value: 200 + nncf_optimization: + enable_quantization: + default_value: true + enable_pruning: + default_value: false + pruning_supported: + default_value: true + maximal_accuracy_degradation: + default_value: 1.0 algo_backend: train_type: default_value: Incremental diff --git a/external/model-preparation-algorithm/configs/detection/resnet50_vfnet_cls_incr/model.py b/external/model-preparation-algorithm/configs/detection/resnet50_vfnet_cls_incr/model.py index e322d28425b..08af20b91bf 100644 --- a/external/model-preparation-algorithm/configs/detection/resnet50_vfnet_cls_incr/model.py +++ b/external/model-preparation-algorithm/configs/detection/resnet50_vfnet_cls_incr/model.py @@ -1,4 +1,4 @@ _base_ = [ - '../../../submodule/samples/cfgs/models/backbones/resnet50.yaml', - '../../../submodule/recipes/stages/_base_/models/detectors/vfnet.custom.py' + "../../../submodule/samples/cfgs/models/backbones/resnet50.yaml", + "../../../submodule/recipes/stages/_base_/models/detectors/vfnet.custom.py", ] diff --git a/external/model-preparation-algorithm/configs/instance-segmentation/configuration.yaml b/external/model-preparation-algorithm/configs/instance-segmentation/configuration.yaml index 1493e4bb768..a60cde0f14d 100644 --- a/external/model-preparation-algorithm/configs/instance-segmentation/configuration.yaml +++ b/external/model-preparation-algorithm/configs/instance-segmentation/configuration.yaml @@ -119,6 +119,85 @@ learning_parameters: value: 0 visible_in_ui: true warning: null + enable_early_stopping: + affects_outcome_of: TRAINING + default_value: true + description: Early exit from training when validation accuracy isn't changed or decreased for several epochs. + editable: true + header: Enable early stopping of the training + type: BOOLEAN + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + visible_in_ui: true + warning: null + early_stop_start: + affects_outcome_of: TRAINING + default_value: 3 + editable: true + header: Start epoch for early stopping + max_value: 1000 + min_value: 0 + type: INTEGER + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: 3 + visible_in_ui: false + early_stop_patience: + affects_outcome_of: TRAINING + default_value: 10 + description: Training will stop if the model does not improve within the number of epochs of patience. + editable: true + header: Patience for early stopping + max_value: 50 + min_value: 0 + type: INTEGER + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: 10 + visible_in_ui: true + warning: This is applied exclusively when early stopping is enabled. + early_stop_iteration_patience: + affects_outcome_of: TRAINING + default_value: 0 + description: + Training will stop if the model does not improve within the number of iterations of patience. + This ensures the model is trained enough with the number of iterations of patience before early stopping. + editable: true + header: Iteration patience for early stopping + max_value: 1000 + min_value: 0 + type: INTEGER + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: 0 + visible_in_ui: true + warning: This is applied exclusively when early stopping is enabled. + use_adaptive_interval: + affects_outcome_of: TRAINING + default_value: true + description: Depending on the size of iteration per epoch, adaptively update the validation interval and related values. + editable: true + header: Use adaptive validation interval + type: BOOLEAN + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + visible_in_ui: true + warning: This will automatically control the patience and interval when early stopping is enabled. type: PARAMETER_GROUP visible_in_ui: true postprocessing: @@ -260,6 +339,21 @@ nncf_optimization: value: false visible_in_ui: true warning: null + pruning_supported: + affects_outcome_of: TRAINING + default_value: false + description: Whether filter pruning is supported + editable: false + header: Whether filter pruning is supported + type: BOOLEAN + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: false + visible_in_ui: false + warning: null maximal_accuracy_degradation: affects_outcome_of: NONE default_value: 1.0 diff --git a/external/model-preparation-algorithm/configs/instance-segmentation/efficientnetb2b_maskrcnn/data_pipeline.py b/external/model-preparation-algorithm/configs/instance-segmentation/efficientnetb2b_maskrcnn/data_pipeline.py index 7896df7fd54..5d9a93307a5 100644 --- a/external/model-preparation-algorithm/configs/instance-segmentation/efficientnetb2b_maskrcnn/data_pipeline.py +++ b/external/model-preparation-algorithm/configs/instance-segmentation/efficientnetb2b_maskrcnn/data_pipeline.py @@ -1,39 +1,38 @@ -dataset_type = 'CocoDataset' +dataset_type = "CocoDataset" img_size = (1024, 1024) -img_norm_cfg = dict( - mean=(103.53, 116.28, 123.675), std=(1.0, 1.0, 1.0), to_rgb=False) +img_norm_cfg = dict(mean=(103.53, 116.28, 123.675), std=(1.0, 1.0, 1.0), to_rgb=False) train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, - with_mask=True, poly2mask=False), - dict(type='Resize', img_scale=img_size, keep_ratio=False), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) + dict(type="LoadImageFromFile"), + dict(type="LoadAnnotations", with_bbox=True, with_mask=True, poly2mask=False), + dict(type="Resize", img_scale=img_size, keep_ratio=False), + dict(type="RandomFlip", flip_ratio=0.5), + dict(type="Normalize", **img_norm_cfg), + dict(type="Pad", size_divisor=32), + dict(type="DefaultFormatBundle"), + dict(type="Collect", keys=["img", "gt_bboxes", "gt_labels", "gt_masks"]), ] test_pipeline = [ - dict(type='LoadImageFromFile'), + dict(type="LoadImageFromFile"), dict( - type='MultiScaleFlipAug', + type="MultiScaleFlipAug", img_scale=img_size, flip=False, transforms=[ - dict(type='Resize', keep_ratio=False), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) + dict(type="Resize", keep_ratio=False), + dict(type="RandomFlip"), + dict(type="Normalize", **img_norm_cfg), + dict(type="Pad", size_divisor=32), + dict(type="ImageToTensor", keys=["img"]), + dict(type="Collect", keys=["img"]), + ], + ), ] -__dataset_type = 'CocoDataset' -__data_root = 'data/coco/' +__dataset_type = "CocoDataset" +__data_root = "data/coco/" __samples_per_gpu = 4 @@ -42,19 +41,22 @@ workers_per_gpu=2, train=dict( type=__dataset_type, - ann_file=__data_root + 'annotations/instances_train2017.json', - img_prefix=__data_root + 'train2017/', - pipeline=train_pipeline), + ann_file=__data_root + "annotations/instances_train2017.json", + img_prefix=__data_root + "train2017/", + pipeline=train_pipeline, + ), val=dict( type=__dataset_type, - ann_file=__data_root + 'annotations/instances_val2017.json', - img_prefix=__data_root + 'val2017/', + ann_file=__data_root + "annotations/instances_val2017.json", + img_prefix=__data_root + "val2017/", test_mode=True, - pipeline=test_pipeline), + pipeline=test_pipeline, + ), test=dict( type=__dataset_type, - ann_file=__data_root + 'annotations/instances_val2017.json', - img_prefix=__data_root + 'val2017/', + ann_file=__data_root + "annotations/instances_val2017.json", + img_prefix=__data_root + "val2017/", test_mode=True, - pipeline=test_pipeline) + pipeline=test_pipeline, + ), ) diff --git a/external/model-preparation-algorithm/configs/instance-segmentation/efficientnetb2b_maskrcnn/model.py b/external/model-preparation-algorithm/configs/instance-segmentation/efficientnetb2b_maskrcnn/model.py index 62dc4adabe0..4bece262367 100644 --- a/external/model-preparation-algorithm/configs/instance-segmentation/efficientnetb2b_maskrcnn/model.py +++ b/external/model-preparation-algorithm/configs/instance-segmentation/efficientnetb2b_maskrcnn/model.py @@ -1,5 +1,5 @@ _base_ = [ - '../../../submodule/samples/cfgs/models/backbones/efficientnet_b2b.yaml', - '../../../submodule/recipes/stages/_base_/models/detectors/efficientnetb2b_maskrcnn.custom.py' + "../../../submodule/samples/cfgs/models/backbones/efficientnet_b2b.yaml", + "../../../submodule/recipes/stages/_base_/models/detectors/efficientnetb2b_maskrcnn.custom.py", ] -fp16 = dict(loss_scale=512.) +fp16 = dict(loss_scale=512.0) diff --git a/external/model-preparation-algorithm/configs/instance-segmentation/efficientnetb2b_maskrcnn/template.yaml b/external/model-preparation-algorithm/configs/instance-segmentation/efficientnetb2b_maskrcnn/template.yaml index 9fb62a62f55..7798ab5de57 100644 --- a/external/model-preparation-algorithm/configs/instance-segmentation/efficientnetb2b_maskrcnn/template.yaml +++ b/external/model-preparation-algorithm/configs/instance-segmentation/efficientnetb2b_maskrcnn/template.yaml @@ -36,6 +36,15 @@ hyper_parameters: default_value: 100 num_iters: default_value: 100 + nncf_optimization: + enable_quantization: + default_value: true + enable_pruning: + default_value: false + pruning_supported: + default_value: false + maximal_accuracy_degradation: + default_value: 1.0 algo_backend: train_type: default_value: Incremental diff --git a/external/model-preparation-algorithm/configs/instance-segmentation/resnet50_maskrcnn/data_pipeline.py b/external/model-preparation-algorithm/configs/instance-segmentation/resnet50_maskrcnn/data_pipeline.py index 8b77048690d..7faf95a389f 100644 --- a/external/model-preparation-algorithm/configs/instance-segmentation/resnet50_maskrcnn/data_pipeline.py +++ b/external/model-preparation-algorithm/configs/instance-segmentation/resnet50_maskrcnn/data_pipeline.py @@ -1,51 +1,55 @@ -dataset_type = 'CocoDataset' +dataset_type = "CocoDataset" img_size = (1344, 800) -img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True) +img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, - with_mask=True, poly2mask=False), - dict(type='Resize', img_scale=img_size, keep_ratio=False), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) + dict(type="LoadImageFromFile"), + dict(type="LoadAnnotations", with_bbox=True, with_mask=True, poly2mask=False), + dict(type="Resize", img_scale=img_size, keep_ratio=False), + dict(type="RandomFlip", flip_ratio=0.5), + dict(type="Normalize", **img_norm_cfg), + dict(type="DefaultFormatBundle"), + dict(type="Collect", keys=["img", "gt_bboxes", "gt_labels", "gt_masks"]), ] test_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='MultiScaleFlipAug', - img_scale=img_size, - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=False), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) + dict(type="LoadImageFromFile"), + dict( + type="MultiScaleFlipAug", + img_scale=img_size, + flip=False, + transforms=[ + dict(type="Resize", keep_ratio=False), + dict(type="RandomFlip"), + dict(type="Normalize", **img_norm_cfg), + dict(type="ImageToTensor", keys=["img"]), + dict(type="Collect", keys=["img"]), + ], + ), ] -data = dict(samples_per_gpu=4, - workers_per_gpu=2, - train=dict( - type=dataset_type, - ann_file='data/coco/annotations/instances_train2017.json', - img_prefix='data/coco/train2017', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - test_mode=True, - ann_file='data/coco/annotations/instances_val2017.json', - img_prefix='data/coco/val2017', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - test_mode=True, - ann_file='data/coco/annotations/instances_val2017.json', - img_prefix='data/coco/val2017', - pipeline=test_pipeline)) +data = dict( + samples_per_gpu=4, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file="data/coco/annotations/instances_train2017.json", + img_prefix="data/coco/train2017", + pipeline=train_pipeline, + ), + val=dict( + type=dataset_type, + test_mode=True, + ann_file="data/coco/annotations/instances_val2017.json", + img_prefix="data/coco/val2017", + pipeline=test_pipeline, + ), + test=dict( + type=dataset_type, + test_mode=True, + ann_file="data/coco/annotations/instances_val2017.json", + img_prefix="data/coco/val2017", + pipeline=test_pipeline, + ), +) diff --git a/external/model-preparation-algorithm/configs/instance-segmentation/resnet50_maskrcnn/model.py b/external/model-preparation-algorithm/configs/instance-segmentation/resnet50_maskrcnn/model.py index 156b917dbe7..0838604070b 100644 --- a/external/model-preparation-algorithm/configs/instance-segmentation/resnet50_maskrcnn/model.py +++ b/external/model-preparation-algorithm/configs/instance-segmentation/resnet50_maskrcnn/model.py @@ -1,4 +1,4 @@ _base_ = [ - '../../../submodule/samples/cfgs/models/backbones/resnet50.yaml', - '../../../submodule/recipes/stages/_base_/models/detectors/resnet50_maskrcnn.custom.py' + "../../../submodule/samples/cfgs/models/backbones/resnet50.yaml", + "../../../submodule/recipes/stages/_base_/models/detectors/resnet50_maskrcnn.custom.py", ] diff --git a/external/model-preparation-algorithm/configs/instance-segmentation/resnet50_maskrcnn/template.yaml b/external/model-preparation-algorithm/configs/instance-segmentation/resnet50_maskrcnn/template.yaml index 715a129158a..c5fea3f690e 100644 --- a/external/model-preparation-algorithm/configs/instance-segmentation/resnet50_maskrcnn/template.yaml +++ b/external/model-preparation-algorithm/configs/instance-segmentation/resnet50_maskrcnn/template.yaml @@ -36,6 +36,15 @@ hyper_parameters: default_value: 100 num_iters: default_value: 100 + nncf_optimization: + enable_quantization: + default_value: true + enable_pruning: + default_value: false + pruning_supported: + default_value: false + maximal_accuracy_degradation: + default_value: 1.0 algo_backend: train_type: default_value: Incremental diff --git a/external/model-preparation-algorithm/configs/rotated-detection/configuration.yaml b/external/model-preparation-algorithm/configs/rotated-detection/configuration.yaml index 129453c5243..2d835c6a0e3 100644 --- a/external/model-preparation-algorithm/configs/rotated-detection/configuration.yaml +++ b/external/model-preparation-algorithm/configs/rotated-detection/configuration.yaml @@ -119,6 +119,71 @@ learning_parameters: value: 0 visible_in_ui: true warning: null + enable_early_stopping: + affects_outcome_of: TRAINING + default_value: true + description: Early exit from training when validation accuracy isn't changed or decreased for several epochs. + editable: true + header: Enable early stopping of the training + type: BOOLEAN + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + visible_in_ui: true + warning: null + early_stop_start: + affects_outcome_of: TRAINING + default_value: 3 + editable: true + header: Start epoch for early stopping + max_value: 1000 + min_value: 0 + type: INTEGER + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: 3 + visible_in_ui: false + early_stop_patience: + affects_outcome_of: TRAINING + default_value: 10 + description: Training will stop if the model does not improve within the number of epochs of patience. + editable: true + header: Patience for early stopping + max_value: 50 + min_value: 0 + type: INTEGER + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: 10 + visible_in_ui: true + warning: This is applied exclusively when early stopping is enabled. + early_stop_iteration_patience: + affects_outcome_of: TRAINING + default_value: 0 + description: + Training will stop if the model does not improve within the number of iterations of patience. + This ensures the model is trained enough with the number of iterations of patience before early stopping. + editable: true + header: Iteration patience for early stopping + max_value: 1000 + min_value: 0 + type: INTEGER + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: 0 + visible_in_ui: true + warning: This is applied exclusively when early stopping is enabled. type: PARAMETER_GROUP visible_in_ui: true postprocessing: @@ -260,6 +325,21 @@ nncf_optimization: value: false visible_in_ui: true warning: null + pruning_supported: + affects_outcome_of: TRAINING + default_value: false + description: Whether filter pruning is supported + editable: false + header: Whether filter pruning is supported + type: BOOLEAN + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: false + visible_in_ui: false + warning: null maximal_accuracy_degradation: affects_outcome_of: NONE default_value: 1.0 diff --git a/external/model-preparation-algorithm/configs/rotated-detection/efficientnetb2b_maskrcnn/data_pipeline.py b/external/model-preparation-algorithm/configs/rotated-detection/efficientnetb2b_maskrcnn/data_pipeline.py index 7896df7fd54..5d9a93307a5 100644 --- a/external/model-preparation-algorithm/configs/rotated-detection/efficientnetb2b_maskrcnn/data_pipeline.py +++ b/external/model-preparation-algorithm/configs/rotated-detection/efficientnetb2b_maskrcnn/data_pipeline.py @@ -1,39 +1,38 @@ -dataset_type = 'CocoDataset' +dataset_type = "CocoDataset" img_size = (1024, 1024) -img_norm_cfg = dict( - mean=(103.53, 116.28, 123.675), std=(1.0, 1.0, 1.0), to_rgb=False) +img_norm_cfg = dict(mean=(103.53, 116.28, 123.675), std=(1.0, 1.0, 1.0), to_rgb=False) train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, - with_mask=True, poly2mask=False), - dict(type='Resize', img_scale=img_size, keep_ratio=False), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) + dict(type="LoadImageFromFile"), + dict(type="LoadAnnotations", with_bbox=True, with_mask=True, poly2mask=False), + dict(type="Resize", img_scale=img_size, keep_ratio=False), + dict(type="RandomFlip", flip_ratio=0.5), + dict(type="Normalize", **img_norm_cfg), + dict(type="Pad", size_divisor=32), + dict(type="DefaultFormatBundle"), + dict(type="Collect", keys=["img", "gt_bboxes", "gt_labels", "gt_masks"]), ] test_pipeline = [ - dict(type='LoadImageFromFile'), + dict(type="LoadImageFromFile"), dict( - type='MultiScaleFlipAug', + type="MultiScaleFlipAug", img_scale=img_size, flip=False, transforms=[ - dict(type='Resize', keep_ratio=False), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) + dict(type="Resize", keep_ratio=False), + dict(type="RandomFlip"), + dict(type="Normalize", **img_norm_cfg), + dict(type="Pad", size_divisor=32), + dict(type="ImageToTensor", keys=["img"]), + dict(type="Collect", keys=["img"]), + ], + ), ] -__dataset_type = 'CocoDataset' -__data_root = 'data/coco/' +__dataset_type = "CocoDataset" +__data_root = "data/coco/" __samples_per_gpu = 4 @@ -42,19 +41,22 @@ workers_per_gpu=2, train=dict( type=__dataset_type, - ann_file=__data_root + 'annotations/instances_train2017.json', - img_prefix=__data_root + 'train2017/', - pipeline=train_pipeline), + ann_file=__data_root + "annotations/instances_train2017.json", + img_prefix=__data_root + "train2017/", + pipeline=train_pipeline, + ), val=dict( type=__dataset_type, - ann_file=__data_root + 'annotations/instances_val2017.json', - img_prefix=__data_root + 'val2017/', + ann_file=__data_root + "annotations/instances_val2017.json", + img_prefix=__data_root + "val2017/", test_mode=True, - pipeline=test_pipeline), + pipeline=test_pipeline, + ), test=dict( type=__dataset_type, - ann_file=__data_root + 'annotations/instances_val2017.json', - img_prefix=__data_root + 'val2017/', + ann_file=__data_root + "annotations/instances_val2017.json", + img_prefix=__data_root + "val2017/", test_mode=True, - pipeline=test_pipeline) + pipeline=test_pipeline, + ), ) diff --git a/external/model-preparation-algorithm/configs/rotated-detection/efficientnetb2b_maskrcnn/model.py b/external/model-preparation-algorithm/configs/rotated-detection/efficientnetb2b_maskrcnn/model.py index 7a196dc6320..1c214ce03c0 100644 --- a/external/model-preparation-algorithm/configs/rotated-detection/efficientnetb2b_maskrcnn/model.py +++ b/external/model-preparation-algorithm/configs/rotated-detection/efficientnetb2b_maskrcnn/model.py @@ -1,4 +1,4 @@ _base_ = [ - '../../../submodule/samples/cfgs/models/backbones/efficientnet_b2b.yaml', - '../../../submodule/recipes/stages/_base_/models/detectors/efficientnetb2b_maskrcnn.custom.py' + "../../../submodule/samples/cfgs/models/backbones/efficientnet_b2b.yaml", + "../../../submodule/recipes/stages/_base_/models/detectors/efficientnetb2b_maskrcnn.custom.py", ] diff --git a/external/model-preparation-algorithm/configs/rotated-detection/efficientnetb2b_maskrcnn/template.yaml b/external/model-preparation-algorithm/configs/rotated-detection/efficientnetb2b_maskrcnn/template.yaml index d17ecf6b490..098e7c0b600 100644 --- a/external/model-preparation-algorithm/configs/rotated-detection/efficientnetb2b_maskrcnn/template.yaml +++ b/external/model-preparation-algorithm/configs/rotated-detection/efficientnetb2b_maskrcnn/template.yaml @@ -36,6 +36,15 @@ hyper_parameters: default_value: 100 num_iters: default_value: 100 + nncf_optimization: + enable_quantization: + default_value: true + enable_pruning: + default_value: false + pruning_supported: + default_value: false + maximal_accuracy_degradation: + default_value: 1.0 algo_backend: train_type: default_value: Incremental diff --git a/external/model-preparation-algorithm/configs/rotated-detection/resnet50_maskrcnn/data_pipeline.py b/external/model-preparation-algorithm/configs/rotated-detection/resnet50_maskrcnn/data_pipeline.py index 8b77048690d..7faf95a389f 100644 --- a/external/model-preparation-algorithm/configs/rotated-detection/resnet50_maskrcnn/data_pipeline.py +++ b/external/model-preparation-algorithm/configs/rotated-detection/resnet50_maskrcnn/data_pipeline.py @@ -1,51 +1,55 @@ -dataset_type = 'CocoDataset' +dataset_type = "CocoDataset" img_size = (1344, 800) -img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True) +img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, - with_mask=True, poly2mask=False), - dict(type='Resize', img_scale=img_size, keep_ratio=False), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) + dict(type="LoadImageFromFile"), + dict(type="LoadAnnotations", with_bbox=True, with_mask=True, poly2mask=False), + dict(type="Resize", img_scale=img_size, keep_ratio=False), + dict(type="RandomFlip", flip_ratio=0.5), + dict(type="Normalize", **img_norm_cfg), + dict(type="DefaultFormatBundle"), + dict(type="Collect", keys=["img", "gt_bboxes", "gt_labels", "gt_masks"]), ] test_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='MultiScaleFlipAug', - img_scale=img_size, - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=False), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) + dict(type="LoadImageFromFile"), + dict( + type="MultiScaleFlipAug", + img_scale=img_size, + flip=False, + transforms=[ + dict(type="Resize", keep_ratio=False), + dict(type="RandomFlip"), + dict(type="Normalize", **img_norm_cfg), + dict(type="ImageToTensor", keys=["img"]), + dict(type="Collect", keys=["img"]), + ], + ), ] -data = dict(samples_per_gpu=4, - workers_per_gpu=2, - train=dict( - type=dataset_type, - ann_file='data/coco/annotations/instances_train2017.json', - img_prefix='data/coco/train2017', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - test_mode=True, - ann_file='data/coco/annotations/instances_val2017.json', - img_prefix='data/coco/val2017', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - test_mode=True, - ann_file='data/coco/annotations/instances_val2017.json', - img_prefix='data/coco/val2017', - pipeline=test_pipeline)) +data = dict( + samples_per_gpu=4, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file="data/coco/annotations/instances_train2017.json", + img_prefix="data/coco/train2017", + pipeline=train_pipeline, + ), + val=dict( + type=dataset_type, + test_mode=True, + ann_file="data/coco/annotations/instances_val2017.json", + img_prefix="data/coco/val2017", + pipeline=test_pipeline, + ), + test=dict( + type=dataset_type, + test_mode=True, + ann_file="data/coco/annotations/instances_val2017.json", + img_prefix="data/coco/val2017", + pipeline=test_pipeline, + ), +) diff --git a/external/model-preparation-algorithm/configs/rotated-detection/resnet50_maskrcnn/model.py b/external/model-preparation-algorithm/configs/rotated-detection/resnet50_maskrcnn/model.py index 156b917dbe7..0838604070b 100644 --- a/external/model-preparation-algorithm/configs/rotated-detection/resnet50_maskrcnn/model.py +++ b/external/model-preparation-algorithm/configs/rotated-detection/resnet50_maskrcnn/model.py @@ -1,4 +1,4 @@ _base_ = [ - '../../../submodule/samples/cfgs/models/backbones/resnet50.yaml', - '../../../submodule/recipes/stages/_base_/models/detectors/resnet50_maskrcnn.custom.py' + "../../../submodule/samples/cfgs/models/backbones/resnet50.yaml", + "../../../submodule/recipes/stages/_base_/models/detectors/resnet50_maskrcnn.custom.py", ] diff --git a/external/model-preparation-algorithm/configs/rotated-detection/resnet50_maskrcnn/template.yaml b/external/model-preparation-algorithm/configs/rotated-detection/resnet50_maskrcnn/template.yaml index 9235510af24..038f99d7c5a 100644 --- a/external/model-preparation-algorithm/configs/rotated-detection/resnet50_maskrcnn/template.yaml +++ b/external/model-preparation-algorithm/configs/rotated-detection/resnet50_maskrcnn/template.yaml @@ -36,6 +36,15 @@ hyper_parameters: default_value: 100 num_iters: default_value: 100 + nncf_optimization: + enable_quantization: + default_value: true + enable_pruning: + default_value: false + pruning_supported: + default_value: false + maximal_accuracy_degradation: + default_value: 1.0 algo_backend: train_type: default_value: Incremental diff --git a/external/model-preparation-algorithm/configs/segmentation/configuration.yaml b/external/model-preparation-algorithm/configs/segmentation/configuration.yaml index 4c691840ebb..76ff2f03415 100644 --- a/external/model-preparation-algorithm/configs/segmentation/configuration.yaml +++ b/external/model-preparation-algorithm/configs/segmentation/configuration.yaml @@ -147,6 +147,71 @@ learning_parameters: value: 0 visible_in_ui: true warning: null + enable_early_stopping: + affects_outcome_of: TRAINING + default_value: true + description: Early exit from training when validation accuracy isn't changed or decreased for several epochs. + editable: true + header: Enable early stopping of the training + type: BOOLEAN + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + visible_in_ui: true + warning: null + early_stop_start: + affects_outcome_of: TRAINING + default_value: 3 + editable: true + header: Start epoch for early stopping + max_value: 1000 + min_value: 0 + type: INTEGER + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: 3 + visible_in_ui: false + early_stop_patience: + affects_outcome_of: TRAINING + default_value: 8 + description: Training will stop if the model does not improve within the number of epochs of patience. + editable: true + header: Patience for early stopping + max_value: 50 + min_value: 0 + type: INTEGER + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: 8 + visible_in_ui: true + warning: This is applied exclusively when early stopping is enabled. + early_stop_iteration_patience: + affects_outcome_of: TRAINING + default_value: 0 + description: + Training will stop if the model does not improve within the number of iterations of patience. + This ensures the model is trained enough with the number of iterations of patience before early stopping. + editable: true + header: Iteration patience for early stopping + max_value: 1000 + min_value: 0 + type: INTEGER + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: 0 + visible_in_ui: true + warning: This is applied exclusively when early stopping is enabled. type: PARAMETER_GROUP visible_in_ui: true postprocessing: @@ -287,6 +352,21 @@ nncf_optimization: value: false visible_in_ui: true warning: null + pruning_supported: + affects_outcome_of: TRAINING + default_value: false + description: Whether filter pruning is supported + editable: false + header: Whether filter pruning is supported + type: BOOLEAN + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: false + visible_in_ui: false + warning: null maximal_accuracy_degradation: affects_outcome_of: NONE default_value: 1.0 diff --git a/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-18-mod2/hpo_config.yaml b/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-18-mod2/hpo_config.yaml index abec751c13c..819a3ac0368 100644 --- a/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-18-mod2/hpo_config.yaml +++ b/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-18-mod2/hpo_config.yaml @@ -2,14 +2,14 @@ metric: mDice search_algorithm: asha hp_space: learning_parameters.learning_rate: - param_type: quniform + param_type: qloguniform range: - - 0.001 - - 0.1 - - 0.001 + - 0.0002 + - 0.005 + - 0.0001 learning_parameters.batch_size: param_type: qloguniform range: - - 4 - - 16 + - 6 + - 12 - 2 diff --git a/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-18-mod2/model.py b/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-18-mod2/model.py index 9c3717c1f5f..16b246fe372 100644 --- a/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-18-mod2/model.py +++ b/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-18-mod2/model.py @@ -1,6 +1,7 @@ _base_ = [ - '../../../submodule/models/segmentation/ocr_litehrnet18_mod2.yaml', + "../../../submodule/models/segmentation/ocr_litehrnet18_mod2.yaml", ] -load_from = 'https://storage.openvinotoolkit.org/repositories/openvino_training_extensions/models/custom_semantic_segmentation/litehrnet18_imagenet1k_rsc.pth' -fp16 = dict(loss_scale=512.) +load_from = "https://storage.openvinotoolkit.org/repositories/openvino_training_extensions\ +/models/custom_semantic_segmentation/litehrnet18_imagenet1k_rsc.pth" +fp16 = dict(loss_scale=512.0) diff --git a/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-18-mod2/template.yaml b/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-18-mod2/template.yaml index e10e2b18141..9398ff74866 100644 --- a/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-18-mod2/template.yaml +++ b/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-18-mod2/template.yaml @@ -37,6 +37,15 @@ hyper_parameters: default_value: 100 num_iters: default_value: 300 + nncf_optimization: + enable_quantization: + default_value: true + enable_pruning: + default_value: false + pruning_supported: + default_value: false + maximal_accuracy_degradation: + default_value: 1.0 algo_backend: train_type: default_value: Incremental diff --git a/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-18/hpo_config.yaml b/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-18/hpo_config.yaml new file mode 100644 index 00000000000..819a3ac0368 --- /dev/null +++ b/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-18/hpo_config.yaml @@ -0,0 +1,15 @@ +metric: mDice +search_algorithm: asha +hp_space: + learning_parameters.learning_rate: + param_type: qloguniform + range: + - 0.0002 + - 0.005 + - 0.0001 + learning_parameters.batch_size: + param_type: qloguniform + range: + - 6 + - 12 + - 2 diff --git a/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-18/model.py b/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-18/model.py new file mode 100644 index 00000000000..96e25b78f01 --- /dev/null +++ b/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-18/model.py @@ -0,0 +1,7 @@ +_base_ = [ + "../../../submodule/models/segmentation/ocr_litehrnet18_mod2.yaml", +] + +load_from = "https://storage.openvinotoolkit.org/repositories/openvino_training_extensions\ +/models/custom_semantic_segmentation/litehrnet18_imagenet1k_rsc.pth" +fp16 = dict(loss_scale=512.0) diff --git a/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-18/template.yaml b/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-18/template.yaml new file mode 100644 index 00000000000..4763d1d219d --- /dev/null +++ b/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-18/template.yaml @@ -0,0 +1,52 @@ +# Description. +model_template_id: Custom_Semantic_Segmentation_Lite-HRNet-18_OCR +name: Lite-HRNet-18 +task_type: SEGMENTATION +task_family: VISION +instantiation: "CLASS" +summary: Class-Incremental Semantic Segmentation with middle-sized architecture which based on the Lite-HRNet backbone for the balance between the fast inference and long training. (deprecated in next version) +application: ~ + +# Algo backend. +framework: OTESegmentation v0.14.0 + +# Task implementations. +entrypoints: + base: mpa_tasks.apis.segmentation.SegmentationTrainTask + openvino: segmentation_tasks.apis.segmentation.OpenVINOSegmentationTask + nncf: mpa_tasks.apis.segmentation.SegmentationNNCFTask +base_model_path: ../../../../mmsegmentation/configs/custom-sematic-segmentation/ocr-lite-hrnet-18/template_experimental.yaml + +# Capabilities. +capabilities: + - compute_representations + +# Hyperparameters. +hyper_parameters: + base_path: ../configuration.yaml + parameter_overrides: + learning_parameters: + batch_size: + default_value: 8 + learning_rate: + default_value: 0.001 + auto_hpo_state: POSSIBLE + learning_rate_fixed_iters: + default_value: 0 + learning_rate_warmup_iters: + default_value: 100 + num_iters: + default_value: 300 + algo_backend: + train_type: + default_value: Incremental + +# Training resources. +max_nodes: 1 +training_targets: + - GPU + - CPU + +# Stats. +gigaflops: 3.45 +size: 4.5 diff --git a/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-s-mod2/hpo_config.yaml b/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-s-mod2/hpo_config.yaml index 01d052bd2f1..819a3ac0368 100644 --- a/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-s-mod2/hpo_config.yaml +++ b/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-s-mod2/hpo_config.yaml @@ -1,16 +1,15 @@ metric: mDice search_algorithm: asha -# max_iterations: 30 hp_space: learning_parameters.learning_rate: - param_type: quniform + param_type: qloguniform range: - - 0.001 - - 0.1 - - 0.001 + - 0.0002 + - 0.005 + - 0.0001 learning_parameters.batch_size: param_type: qloguniform range: - - 4 - - 16 + - 6 + - 12 - 2 diff --git a/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-s-mod2/model.py b/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-s-mod2/model.py index 0d44ff65067..92c2fd86ff5 100644 --- a/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-s-mod2/model.py +++ b/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-s-mod2/model.py @@ -1,6 +1,7 @@ _base_ = [ - '../../../submodule/models/segmentation/ocr_litehrnet_s_mod2.yaml', + "../../../submodule/models/segmentation/ocr_litehrnet_s_mod2.yaml", ] -load_from = 'https://storage.openvinotoolkit.org/repositories/openvino_training_extensions/models/custom_semantic_segmentation/litehrnetsv2_imagenet1k_rsc.pth' -fp16 = dict(loss_scale=512.) +load_from = "https://storage.openvinotoolkit.org/repositories/openvino_training_extensions\ +/models/custom_semantic_segmentation/litehrnetsv2_imagenet1k_rsc.pth" +fp16 = dict(loss_scale=512.0) diff --git a/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-s-mod2/template.yaml b/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-s-mod2/template.yaml index 049e3934abf..cdee5517a83 100644 --- a/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-s-mod2/template.yaml +++ b/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-s-mod2/template.yaml @@ -38,6 +38,15 @@ hyper_parameters: default_value: 100 num_iters: default_value: 300 + nncf_optimization: + enable_quantization: + default_value: true + enable_pruning: + default_value: false + pruning_supported: + default_value: false + maximal_accuracy_degradation: + default_value: 1.0 algo_backend: train_type: default_value: Incremental diff --git a/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-x-mod3/hpo_config.yaml b/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-x-mod3/hpo_config.yaml index 01d052bd2f1..3b8a4838ecc 100644 --- a/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-x-mod3/hpo_config.yaml +++ b/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-x-mod3/hpo_config.yaml @@ -1,13 +1,12 @@ metric: mDice search_algorithm: asha -# max_iterations: 30 hp_space: learning_parameters.learning_rate: - param_type: quniform + param_type: qloguniform range: - - 0.001 - - 0.1 - - 0.001 + - 0.0001 + - 0.01 + - 0.0001 learning_parameters.batch_size: param_type: qloguniform range: diff --git a/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-x-mod3/model.py b/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-x-mod3/model.py index 1c5df9fdb60..80043bd77fa 100644 --- a/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-x-mod3/model.py +++ b/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-x-mod3/model.py @@ -1,6 +1,7 @@ _base_ = [ - '../../../submodule/models/segmentation/ocr_litehrnet_x_mod3.yaml', + "../../../submodule/models/segmentation/ocr_litehrnet_x_mod3.yaml", ] -load_from = 'https://storage.openvinotoolkit.org/repositories/openvino_training_extensions/models/custom_semantic_segmentation/litehrnetxv3_imagenet1k_rsc.pth' -fp16 = dict(loss_scale=512.) +load_from = "https://storage.openvinotoolkit.org/repositories/openvino_training_extensions\ +/models/custom_semantic_segmentation/litehrnetxv3_imagenet1k_rsc.pth" +fp16 = dict(loss_scale=512.0) diff --git a/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-x-mod3/template.yaml b/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-x-mod3/template.yaml index 0bbbde4bf95..7c1b6971a96 100644 --- a/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-x-mod3/template.yaml +++ b/external/model-preparation-algorithm/configs/segmentation/ocr-lite-hrnet-x-mod3/template.yaml @@ -38,6 +38,15 @@ hyper_parameters: default_value: 100 num_iters: default_value: 300 + nncf_optimization: + enable_quantization: + default_value: true + enable_pruning: + default_value: false + pruning_supported: + default_value: false + maximal_accuracy_degradation: + default_value: 1.0 algo_backend: train_type: default_value: Incremental diff --git a/external/model-preparation-algorithm/constraints.txt b/external/model-preparation-algorithm/constraints.txt index e69de29bb2d..3bc76e52a30 100644 --- a/external/model-preparation-algorithm/constraints.txt +++ b/external/model-preparation-algorithm/constraints.txt @@ -0,0 +1 @@ +optuna==2.10.1 # remedy for fixed optuna version incompatible in OTE CI diff --git a/external/model-preparation-algorithm/init_venv.sh b/external/model-preparation-algorithm/init_venv.sh index 34c0a720dda..5397d2f372f 100755 --- a/external/model-preparation-algorithm/init_venv.sh +++ b/external/model-preparation-algorithm/init_venv.sh @@ -129,12 +129,6 @@ pip install -e ../mmdetection/submodule || exit 1 pip install -e ../mmsegmentation/submodule || exit 1 pip install -e submodule || exit 1 -MPA_DIR=$(realpath submodule) -echo "export MPA_DIR=${MPA_DIR}" >> "${venv_dir}"/bin/activate - -# Install OTE CLI -pip install -e ../../ote_cli || exit 1 - # Build NNCF extensions echo "Build NNCF extensions ..." python -c "import nncf" diff --git a/external/model-preparation-algorithm/mpa_tasks/apis/__init__.py b/external/model-preparation-algorithm/mpa_tasks/apis/__init__.py index 46a0ca657b4..ec807f35999 100644 --- a/external/model-preparation-algorithm/mpa_tasks/apis/__init__.py +++ b/external/model-preparation-algorithm/mpa_tasks/apis/__init__.py @@ -2,5 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 # -from .config import BaseConfig, TrainType, LearningRateSchedule +from .config import BaseConfig, LearningRateSchedule, TrainType from .task import BaseTask + +__all__ = [BaseConfig, TrainType, LearningRateSchedule, BaseTask] diff --git a/external/model-preparation-algorithm/mpa_tasks/apis/classification/__init__.py b/external/model-preparation-algorithm/mpa_tasks/apis/classification/__init__.py index 8be685ef5a6..039650fa7c4 100644 --- a/external/model-preparation-algorithm/mpa_tasks/apis/classification/__init__.py +++ b/external/model-preparation-algorithm/mpa_tasks/apis/classification/__init__.py @@ -2,11 +2,25 @@ # SPDX-License-Identifier: Apache-2.0 # -from .config import ClassificationConfig -from .task import ClassificationInferenceTask, ClassificationTrainTask, ClassificationNNCFTask +import mpa.cls # Load relevant extensions to registry import mpa_tasks.extensions.datasets.mpa_cls_dataset import mpa_tasks.extensions.datasets.pipelines.mpa_cls_pipeline -import mpa.cls +from .config import ClassificationConfig +from .task import ( + ClassificationInferenceTask, + ClassificationNNCFTask, + ClassificationTrainTask, +) + +__all__ = [ + ClassificationConfig, + ClassificationInferenceTask, + ClassificationTrainTask, + ClassificationNNCFTask, + mpa_tasks.extensions.datasets.mpa_cls_dataset, + mpa_tasks.extensions.datasets.pipelines.mpa_cls_pipeline, + mpa.cls, +] diff --git a/external/model-preparation-algorithm/mpa_tasks/apis/classification/config.py b/external/model-preparation-algorithm/mpa_tasks/apis/classification/config.py index 2d9702ce534..f556c438da2 100644 --- a/external/model-preparation-algorithm/mpa_tasks/apis/classification/config.py +++ b/external/model-preparation-algorithm/mpa_tasks/apis/classification/config.py @@ -3,28 +3,20 @@ # from attr import attrs - -from ote_sdk.configuration.elements import (add_parameter_group, - # ParameterGroup, - # configurable_boolean, - # configurable_float, - # configurable_integer, - # selectable, - string_attribute) - from mpa_tasks.apis import BaseConfig +from ote_sdk.configuration.elements import add_parameter_group, string_attribute @attrs class ClassificationConfig(BaseConfig): @attrs class __LearningParameters(BaseConfig.BaseLearningParameters): - header = string_attribute('Learning Parameters') + header = string_attribute("Learning Parameters") description = header @attrs class __AlgoBackend(BaseConfig.BaseAlgoBackendParameters): - header = string_attribute('Parameters for the MPA algo-backend') + header = string_attribute("Parameters for the MPA algo-backend") description = header learning_parameters = add_parameter_group(__LearningParameters) diff --git a/external/model-preparation-algorithm/mpa_tasks/apis/classification/task.py b/external/model-preparation-algorithm/mpa_tasks/apis/classification/task.py index 8bb1399d2bd..70fafc5703a 100644 --- a/external/model-preparation-algorithm/mpa_tasks/apis/classification/task.py +++ b/external/model-preparation-algorithm/mpa_tasks/apis/classification/task.py @@ -17,7 +17,6 @@ from mpa.utils.logger import get_logger from mpa_tasks.apis import BaseTask, TrainType from mpa_tasks.apis.classification import ClassificationConfig -from mpa_tasks.utils.data_utils import get_actmap from ote_sdk.configuration import cfg_helper from ote_sdk.configuration.helper.utils import ids_to_strings from ote_sdk.entities.datasets import DatasetEntity @@ -60,6 +59,7 @@ from ote_sdk.usecases.tasks.interfaces.unload_interface import IUnload from ote_sdk.utils.argument_checks import check_input_parameters_type from ote_sdk.utils.labels_utils import get_empty_label +from ote_sdk.utils.vis_utils import get_actmap from torchreid_tasks.nncf_task import OTEClassificationNNCFTask # from torchreid_tasks.utils import TrainingProgressCallback @@ -83,14 +83,14 @@ def on_epoch_end(self, epoch, logs=None): self.past_epoch_duration.append(time.time() - self.start_epoch_time) self._calculate_average_epoch() score = None - if hasattr(self.update_progress_callback, 'metric') and isinstance(logs, dict): + if hasattr(self.update_progress_callback, "metric") and isinstance(logs, dict): score = logs.get(self.update_progress_callback.metric, None) logger.info(f"logged score for metric {self.update_progress_callback.metric} = {score}") score = 0.01 * float(score) if score is not None else None if score is not None: - iter_num = logs.get('current_iters', None) + iter_num = logs.get("current_iters", None) if iter_num is not None: - logger.info(f'score = {score} at epoch {epoch} / {int(iter_num)}') + logger.info(f"score = {score} at epoch {epoch} / {int(iter_num)}") # as a trick, score (at least if it's accuracy not the loss) and iteration number # could be assembled just using summation and then disassembeled. if 1.0 > score: @@ -114,32 +114,43 @@ def __init__(self, task_environment: TaskEnvironment): self._multilabel = False self._hierarchical = False - self._multilabel = len(task_environment.label_schema.get_groups(False)) > 1 and \ - len(task_environment.label_schema.get_groups(False)) == \ - len(task_environment.get_labels(include_empty=False)) # noqa:E127 + self._multilabel = len(task_environment.label_schema.get_groups(False)) > 1 and len( + task_environment.label_schema.get_groups(False) + ) == len( + task_environment.get_labels(include_empty=False) + ) # noqa:E127 self._hierarchical_info = None if not self._multilabel and len(task_environment.label_schema.get_groups(False)) > 1: self._hierarchical = True self._hierarchical_info = get_hierarchical_info(task_environment.label_schema) - def infer(self, - dataset: DatasetEntity, - inference_parameters: Optional[InferenceParameters] = None - ) -> DatasetEntity: - logger.info('called infer()') - stage_module = 'ClsInferrer' + def infer( + self, + dataset: DatasetEntity, + inference_parameters: Optional[InferenceParameters] = None, + ) -> DatasetEntity: + logger.info("called infer()") + stage_module = "ClsInferrer" self._data_cfg = self._init_test_data_cfg(dataset) dataset = dataset.with_empty_annotations() dump_features = True dump_saliency_map = not inference_parameters.is_evaluation if inference_parameters else True - results = self._run_task(stage_module, mode='train', dataset=dataset, dump_features=dump_features, - dump_saliency_map=dump_saliency_map) - logger.debug(f'result of run_task {stage_module} module = {results}') - predictions = results['outputs'] - prediction_results = zip(predictions['eval_predictions'], predictions['feature_vectors'], - predictions['saliency_maps']) + results = self._run_task( + stage_module, + mode="train", + dataset=dataset, + dump_features=dump_features, + dump_saliency_map=dump_saliency_map, + ) + logger.debug(f"result of run_task {stage_module} module = {results}") + predictions = results["outputs"] + prediction_results = zip( + predictions["eval_predictions"], + predictions["feature_vectors"], + predictions["saliency_maps"], + ) update_progress_callback = default_progress_callback if inference_parameters is not None: @@ -148,63 +159,65 @@ def infer(self, self._add_predictions_to_dataset(prediction_results, dataset, update_progress_callback) return dataset - def evaluate(self, - output_result_set: ResultSetEntity, - evaluation_metric: Optional[str] = None): - logger.info('called evaluate()') + def evaluate( + self, + output_result_set: ResultSetEntity, + evaluation_metric: Optional[str] = None, + ): + logger.info("called evaluate()") metric = MetricsHelper.compute_accuracy(output_result_set) logger.info(f"Accuracy after evaluation: {metric.accuracy.value}") output_result_set.performance = metric.get_performance() - logger.info('Evaluation completed') + logger.info("Evaluation completed") def unload(self): - logger.info('called unload()') + logger.info("called unload()") self.finalize() - def export(self, - export_type: ExportType, - output_model: ModelEntity): - logger.info('Exporting the model') + def export(self, export_type: ExportType, output_model: ModelEntity): + logger.info("Exporting the model") if export_type != ExportType.OPENVINO: - raise RuntimeError(f'not supported export type {export_type}') + raise RuntimeError(f"not supported export type {export_type}") output_model.model_format = ModelFormat.OPENVINO output_model.optimization_type = ModelOptimizationType.MO - stage_module = 'ClsExporter' - self._initialize() - results = self._run_task(stage_module, mode='train', precision=self._precision[0].name) - logger.debug(f'results of run_task = {results}') - results = results.get('outputs') - logger.debug(f'results of run_task = {results}') + stage_module = "ClsExporter" + results = self._run_task(stage_module, mode="train", precision="FP32", export=True) + logger.debug(f"results of run_task = {results}") + results = results.get("outputs") + logger.debug(f"results of run_task = {results}") if results is None: logger.error(f"error while exporting model {results.get('msg')}") else: - bin_file = results.get('bin') - xml_file = results.get('xml') + bin_file = results.get("bin") + xml_file = results.get("xml") if xml_file is None or bin_file is None: - raise RuntimeError('invalid status of exporting. bin and xml should not be None') + raise RuntimeError("invalid status of exporting. bin and xml should not be None") with open(bin_file, "rb") as f: - output_model.set_data('openvino.bin', f.read()) + output_model.set_data("openvino.bin", f.read()) with open(xml_file, "rb") as f: - output_model.set_data('openvino.xml', f.read()) - output_model.precision = self._precision - output_model.set_data("label_schema.json", label_schema_to_bytes(self._task_environment.label_schema)) - logger.info('Exporting completed') + output_model.set_data("openvino.xml", f.read()) + output_model.precision = [ModelPrecision.FP32] + output_model.set_data( + "label_schema.json", + label_schema_to_bytes(self._task_environment.label_schema), + ) + logger.info("Exporting completed") def _add_predictions_to_dataset(self, prediction_results, dataset, update_progress_callback): - """ Loop over dataset again to assign predictions. Convert from MMClassification format to OTE format. """ + """Loop over dataset again to assign predictions. Convert from MMClassification format to OTE format.""" dataset_size = len(dataset) for i, (dataset_item, prediction_items) in enumerate(zip(dataset, prediction_results)): item_labels = [] pos_thr = 0.5 prediction_item, feature_vector, saliency_map = prediction_items if any(np.isnan(prediction_item)): - logger.info('Nan in prediction_item.') + logger.info("Nan in prediction_item.") if self._multilabel: if max(prediction_item) < pos_thr: - logger.info('Confidence is smaller than pos_thr, empty_label will be appended to item_labels.') - item_labels.append(ScoredLabel(self._empty_label, probability=1.)) + logger.info("Confidence is smaller than pos_thr, empty_label will be appended to item_labels.") + item_labels.append(ScoredLabel(self._empty_label, probability=1.0)) else: for cls_idx, pred_item in enumerate(prediction_item): if pred_item > pos_thr: @@ -212,31 +225,37 @@ def _add_predictions_to_dataset(self, prediction_results, dataset, update_progre item_labels.append(cls_label) elif self._hierarchical: - for head_idx in range(self._hierarchical_info['num_multiclass_heads']): - logits_begin, logits_end = self._hierarchical_info['head_idx_to_logits_range'][head_idx] - head_logits = prediction_item[logits_begin : logits_end] + for head_idx in range(self._hierarchical_info["num_multiclass_heads"]): + logits_begin, logits_end = self._hierarchical_info["head_idx_to_logits_range"][head_idx] + head_logits = prediction_item[logits_begin:logits_end] head_pred = np.argmax(head_logits) # Assume logits already passed softmax - label_str = self._hierarchical_info['all_groups'][head_idx][head_pred] + label_str = self._hierarchical_info["all_groups"][head_idx][head_pred] ote_label = next(x for x in self._labels if x.name == label_str) item_labels.append(ScoredLabel(label=ote_label, probability=float(head_logits[head_pred]))) - if self._hierarchical_info['num_multilabel_classes']: - logits_begin, logits_end = self._hierarchical_info['num_single_label_classes'], -1 - head_logits = prediction_item[logits_begin : logits_end] + if self._hierarchical_info["num_multilabel_classes"]: + logits_begin, logits_end = ( + self._hierarchical_info["num_single_label_classes"], + -1, + ) + head_logits = prediction_item[logits_begin:logits_end] for logit_idx, logit in enumerate(head_logits): if logit > pos_thr: # Assume logits already passed sigmoid - label_str_idx = self._hierarchical_info['num_multiclass_heads'] + logit_idx - label_str = self._hierarchical_info['all_groups'][label_str_idx][0] + label_str_idx = self._hierarchical_info["num_multiclass_heads"] + logit_idx + label_str = self._hierarchical_info["all_groups"][label_str_idx][0] ote_label = next(x for x in self._labels if x.name == label_str) item_labels.append(ScoredLabel(label=ote_label, probability=float(logit))) item_labels = self._task_environment.label_schema.resolve_labels_probabilistic(item_labels) if not item_labels: - logger.info('item_labels is empty.') - item_labels.append(ScoredLabel(self._empty_label, probability=1.)) + logger.info("item_labels is empty.") + item_labels.append(ScoredLabel(self._empty_label, probability=1.0)) else: label_idx = prediction_item.argmax() - cls_label = ScoredLabel(self._labels[label_idx], probability=float(prediction_item[label_idx])) + cls_label = ScoredLabel( + self._labels[label_idx], + probability=float(prediction_item[label_idx]), + ) item_labels.append(cls_label) dataset_item.append_labels(item_labels) @@ -247,21 +266,39 @@ def _add_predictions_to_dataset(self, prediction_results, dataset, update_progre if saliency_map is not None: saliency_map = get_actmap(saliency_map, (dataset_item.width, dataset_item.height)) - saliency_map_media = ResultMediaEntity(name="saliency_map", type="Saliency map", - annotation_scene=dataset_item.annotation_scene, - numpy=saliency_map, roi=dataset_item.roi, - label=item_labels[0].label) + saliency_map_media = ResultMediaEntity( + name="Saliency Map", + type="saliency_map", + annotation_scene=dataset_item.annotation_scene, + numpy=saliency_map, + roi=dataset_item.roi, + label=item_labels[0].label, + ) dataset_item.append_metadata_item(saliency_map_media, model=self._task_environment.model) update_progress_callback(int(i / dataset_size * 100)) def _init_recipe_hparam(self) -> dict: warmup_iters = int(self._hyperparams.learning_parameters.learning_rate_warmup_iters) - lr_config = ConfigDict(warmup_iters=warmup_iters) if warmup_iters > 0 \ + lr_config = ( + ConfigDict(warmup_iters=warmup_iters) + if warmup_iters > 0 else ConfigDict(warmup_iters=warmup_iters, warmup=None) + ) + + if self._hyperparams.learning_parameters.enable_early_stopping: + early_stop = ConfigDict( + start=int(self._hyperparams.learning_parameters.early_stop_start), + patience=int(self._hyperparams.learning_parameters.early_stop_patience), + iteration_patience=int(self._hyperparams.learning_parameters.early_stop_iteration_patience), + ) + else: + early_stop = False + return ConfigDict( optimizer=ConfigDict(lr=self._hyperparams.learning_parameters.learning_rate), lr_config=lr_config, + early_stop=early_stop, data=ConfigDict( samples_per_gpu=int(self._hyperparams.learning_parameters.batch_size), workers_per_gpu=int(self._hyperparams.learning_parameters.num_workers), @@ -270,36 +307,39 @@ def _init_recipe_hparam(self) -> dict: ) def _init_recipe(self): - logger.info('called _init_recipe()') + logger.info("called _init_recipe()") - recipe_root = os.path.join(MPAConstants.RECIPES_PATH, 'stages/classification') + recipe_root = os.path.join(MPAConstants.RECIPES_PATH, "stages/classification") train_type = self._hyperparams.algo_backend.train_type - logger.info(f'train type = {train_type}') + logger.info(f"train type = {train_type}") - recipe = os.path.join(recipe_root, 'class_incr.yaml') + recipe = os.path.join(recipe_root, "class_incr.yaml") if train_type == TrainType.SemiSupervised: - raise NotImplementedError(f'train type {train_type} is not implemented yet.') + raise NotImplementedError(f"train type {train_type} is not implemented yet.") elif train_type == TrainType.SelfSupervised: - raise NotImplementedError(f'train type {train_type} is not implemented yet.') + raise NotImplementedError(f"train type {train_type} is not implemented yet.") elif train_type == TrainType.Incremental: - recipe = os.path.join(recipe_root, 'class_incr.yaml') + recipe = os.path.join(recipe_root, "class_incr.yaml") else: - raise NotImplementedError(f'train type {train_type} is not implemented yet.') + # raise NotImplementedError(f'train type {train_type} is not implemented yet.') + # FIXME: Temporary remedy for CVS-88098 + logger.warning(f"train type {train_type} is not implemented yet.") self._recipe_cfg = MPAConfig.fromfile(recipe) self._patch_datasets(self._recipe_cfg) # for OTE compatibility self._patch_evaluation(self._recipe_cfg) # for OTE compatibility - logger.info(f'initialized recipe = {recipe}') + logger.info(f"initialized recipe = {recipe}") def _init_model_cfg(self): base_dir = os.path.abspath(os.path.dirname(self.template_file_path)) if self._multilabel: - cfg_path = os.path.join(base_dir, 'model_multilabel.py') + cfg_path = os.path.join(base_dir, "model_multilabel.py") elif self._hierarchical: - cfg_path = os.path.join(base_dir, 'model_hierarchical.py') + cfg_path = os.path.join(base_dir, "model_hierarchical.py") else: - cfg_path = os.path.join(base_dir, 'model.py') + cfg_path = os.path.join(base_dir, "model.py") cfg = MPAConfig.fromfile(cfg_path) + cfg.model.multilabel = self._multilabel cfg.model.hierarchical = self._hierarchical if self._hierarchical: @@ -316,7 +356,7 @@ def _init_test_data_cfg(self, dataset: DatasetEntity): test=ConfigDict( ote_dataset=dataset, labels=self._labels, - ) + ), ) ) return data_cfg @@ -325,37 +365,37 @@ def _patch_datasets(self, config: MPAConfig, domain=Domain.CLASSIFICATION): def patch_color_conversion(pipeline): # Default data format for OTE is RGB, while mmdet uses BGR, so negate the color conversion flag. for pipeline_step in pipeline: - if pipeline_step.type == 'Normalize': + if pipeline_step.type == "Normalize": to_rgb = False - if 'to_rgb' in pipeline_step: + if "to_rgb" in pipeline_step: to_rgb = pipeline_step.to_rgb to_rgb = not bool(to_rgb) pipeline_step.to_rgb = to_rgb - elif pipeline_step.type == 'MultiScaleFlipAug': + elif pipeline_step.type == "MultiScaleFlipAug": patch_color_conversion(pipeline_step.transforms) - assert 'data' in config - for subset in ('train', 'val', 'test'): + assert "data" in config + for subset in ("train", "val", "test"): cfg = config.data.get(subset, None) if not cfg: continue - if cfg.type == 'RepeatDataset': + if cfg.type == "RepeatDataset": cfg = cfg.dataset if self._multilabel: - cfg.type = 'MPAMultilabelClsDataset' + cfg.type = "MPAMultilabelClsDataset" elif self._hierarchical: - cfg.type = 'MPAHierarchicalClsDataset' + cfg.type = "MPAHierarchicalClsDataset" cfg.hierarchical_info = self._hierarchical_info - if subset == 'train': + if subset == "train": cfg.drop_last = True # For stable hierarchical information indexing else: - cfg.type = 'MPAClsDataset' + cfg.type = "MPAClsDataset" # In train dataset, when sample size is smaller than batch size - if subset == 'train' and self._data_cfg: + if subset == "train" and self._data_cfg: train_data_cfg = Stage.get_train_data_cfg(self._data_cfg) - if (len(train_data_cfg.get('ote_dataset', [])) < self._recipe_cfg.data.get('samples_per_gpu', 2)): + if len(train_data_cfg.get("ote_dataset", [])) < self._recipe_cfg.data.get("samples_per_gpu", 2): cfg.drop_last = False cfg.domain = domain @@ -363,32 +403,43 @@ def patch_color_conversion(pipeline): cfg.labels = None cfg.empty_label = self._empty_label for pipeline_step in cfg.pipeline: - if subset == 'train' and pipeline_step.type == 'Collect': + if subset == "train" and pipeline_step.type == "Collect": pipeline_step = BaseTask._get_meta_keys(pipeline_step) patch_color_conversion(cfg.pipeline) def _patch_evaluation(self, config: MPAConfig): cfg = config.evaluation if self._multilabel: - cfg.metric = ['accuracy-mlc', 'mAP', 'CP', 'OP', 'CR', 'OR', 'CF1', 'OF1'] + cfg.metric = ["accuracy-mlc", "mAP", "CP", "OP", "CR", "OR", "CF1", "OF1"] + config.early_stop_metric = "mAP" elif self._hierarchical: - cfg.metric = ['MHAcc', 'avgClsAcc', 'mAP'] + cfg.metric = ["MHAcc", "avgClsAcc", "mAP"] + config.early_stop_metric = "MHAcc" else: - cfg.metric = ['accuracy', 'class_accuracy'] + cfg.metric = ["accuracy", "class_accuracy"] + config.early_stop_metric = "accuracy" class ClassificationTrainTask(ClassificationInferenceTask): def save_model(self, output_model: ModelEntity): - logger.info('called save_model') + logger.info("called save_model") buffer = io.BytesIO() hyperparams_str = ids_to_strings(cfg_helper.convert(self._hyperparams, dict, enum_to_str=True)) labels = {label.name: label.color.rgb_tuple for label in self._labels} model_ckpt = torch.load(self._model_ckpt) - modelinfo = {'model': model_ckpt['state_dict'], 'config': hyperparams_str, 'labels': labels, 'VERSION': 1} + modelinfo = { + "model": model_ckpt["state_dict"], + "config": hyperparams_str, + "labels": labels, + "VERSION": 1, + } torch.save(modelinfo, buffer) output_model.set_data("weights.pth", buffer.getvalue()) - output_model.set_data("label_schema.json", label_schema_to_bytes(self._task_environment.label_schema)) + output_model.set_data( + "label_schema.json", + label_schema_to_bytes(self._task_environment.label_schema), + ) output_model.precision = self._precision def cancel_training(self): @@ -403,18 +454,20 @@ def cancel_training(self): if self.cancel_interface is not None: self.cancel_interface.cancel() else: - logger.info('but training was not started yet. reserved it to cancel') + logger.info("but training was not started yet. reserved it to cancel") self.reserved_cancel = True - def train(self, - dataset: DatasetEntity, - output_model: ModelEntity, - train_parameters: Optional[TrainParameters] = None): - logger.info('train()') + def train( + self, + dataset: DatasetEntity, + output_model: ModelEntity, + train_parameters: Optional[TrainParameters] = None, + ): + logger.info("train()") # Check for stop signal between pre-eval and training. # If training is cancelled at this point, if self._should_stop: - logger.info('Training cancelled.') + logger.info("Training cancelled.") self._should_stop = False self._is_training = False return @@ -426,23 +479,23 @@ def train(self, self._time_monitor = TrainingProgressCallback(update_progress_callback) self._learning_curves = defaultdict(OTELoggerHook.Curve) - stage_module = 'ClsTrainer' + stage_module = "ClsTrainer" self._data_cfg = self._init_train_data_cfg(dataset) self._is_training = True - results = self._run_task(stage_module, mode='train', dataset=dataset, parameters=train_parameters) + results = self._run_task(stage_module, mode="train", dataset=dataset, parameters=train_parameters) # Check for stop signal between pre-eval and training. # If training is cancelled at this point, if self._should_stop: - logger.info('Training cancelled.') + logger.info("Training cancelled.") self._should_stop = False self._is_training = False return # get output model - model_ckpt = results.get('final_ckpt') + model_ckpt = results.get("final_ckpt") if model_ckpt is None: - logger.error('cannot find final checkpoint from the results.') + logger.error("cannot find final checkpoint from the results.") return else: # update checkpoint to the newly trained model @@ -452,15 +505,17 @@ def train(self, training_metrics, final_acc = self._generate_training_metrics_group(self._learning_curves) # save resulting model self.save_model(output_model) - performance = Performance(score=ScoreMetric(value=final_acc, name="accuracy"), - dashboard_metrics=training_metrics) - logger.info(f'Final model performance: {str(performance)}') + performance = Performance( + score=ScoreMetric(value=final_acc, name="accuracy"), + dashboard_metrics=training_metrics, + ) + logger.info(f"Final model performance: {str(performance)}") output_model.performance = performance self._is_training = False - logger.info('train done.') + logger.info("train done.") def _init_train_data_cfg(self, dataset: DatasetEntity): - logger.info('init data cfg.') + logger.info("init data cfg.") data_cfg = ConfigDict( data=ConfigDict( train=ConfigDict( @@ -476,7 +531,7 @@ def _init_train_data_cfg(self, dataset: DatasetEntity): ) for label in self._labels: - label.hotkey = 'a' + label.hotkey = "a" return data_cfg def _generate_training_metrics_group(self, learning_curves) -> Optional[List[MetricsGroup]]: @@ -487,11 +542,11 @@ def _generate_training_metrics_group(self, learning_curves) -> Optional[List[Met output: List[MetricsGroup] = [] if self._multilabel: - metric_key = 'val/accuracy-mlc' + metric_key = "val/accuracy-mlc" elif self._hierarchical: - metric_key = 'val/MHAcc' + metric_key = "val/MHAcc" else: - metric_key = 'val/accuracy_top-1' + metric_key = "val/accuracy_top-1" # Learning curves best_acc = -1 @@ -499,8 +554,7 @@ def _generate_training_metrics_group(self, learning_curves) -> Optional[List[Met return output for key, curve in learning_curves.items(): - metric_curve = CurveMetric(xs=curve.x, - ys=curve.y, name=key) + metric_curve = CurveMetric(xs=curve.x, ys=curve.y, name=key) if key == metric_key: best_acc = max(curve.y) visualization_info = LineChartInfo(name=key, x_axis_label="Timestamp", y_axis_label=key) @@ -510,19 +564,18 @@ def _generate_training_metrics_group(self, learning_curves) -> Optional[List[Met class ClassificationNNCFTask(OTEClassificationNNCFTask): - @check_input_parameters_type() def __init__(self, task_environment: TaskEnvironment): - """" + """ " Task for compressing classification models using NNCF. """ curr_model_path = task_environment.model_template.model_template_path base_model_path = os.path.join( os.path.dirname(os.path.abspath(curr_model_path)), - task_environment.model_template.base_model_path + task_environment.model_template.base_model_path, ) if os.path.isfile(base_model_path): - logger.info(f'Base model for NNCF: {base_model_path}') + logger.info(f"Base model for NNCF: {base_model_path}") # Redirect to base model task_environment.model_template = parse_model_template(base_model_path) super().__init__(task_environment) diff --git a/external/model-preparation-algorithm/mpa_tasks/apis/config.py b/external/model-preparation-algorithm/mpa_tasks/apis/config.py index f91f683e8e6..12358c5eb37 100644 --- a/external/model-preparation-algorithm/mpa_tasks/apis/config.py +++ b/external/model-preparation-algorithm/mpa_tasks/apis/config.py @@ -4,34 +4,38 @@ from sys import maxsize -from ote_sdk.configuration.elements import (ParameterGroup, - selectable, - configurable_boolean, - configurable_float, - configurable_integer) -from ote_sdk.configuration import ConfigurableParameters -from ote_sdk.configuration import ConfigurableEnum +from attr import attrs +from ote_sdk.configuration import ConfigurableEnum, ConfigurableParameters +from ote_sdk.configuration.elements import ( + ParameterGroup, + configurable_boolean, + configurable_float, + configurable_integer, + selectable, +) from ote_sdk.configuration.model_lifecycle import ModelLifecycle class TrainType(ConfigurableEnum): - FineTune = 'FineTune' - SemiSupervised = 'SemiSupervised' - SelfSupervised = 'SelfSupervised' - Incremental = 'Incremental' - FutureWork = 'FutureWork' + FineTune = "FineTune" + SemiSupervised = "SemiSupervised" + SelfSupervised = "SelfSupervised" + Incremental = "Incremental" + FutureWork = "FutureWork" class LearningRateSchedule(ConfigurableEnum): - FIXED = 'fixed' - EXPONENTIAL = 'exponential' - COSINE = 'cosine' - STEP_WISE = 'step_wise' - CYCLIC = 'cyclic' - CUSTOM = 'custom' + FIXED = "fixed" + EXPONENTIAL = "exponential" + COSINE = "cosine" + STEP_WISE = "step_wise" + CYCLIC = "cyclic" + CUSTOM = "custom" +@attrs class BaseConfig(ConfigurableParameters): + @attrs class BaseLearningParameters(ParameterGroup): batch_size = configurable_integer( default_value=5, @@ -43,7 +47,7 @@ class BaseLearningParameters(ParameterGroup): "memory requirements.", warning="Increasing this value may cause the system to use more memory than available, " "potentially causing out of memory errors, please update with caution.", - affects_outcome_of=ModelLifecycle.TRAINING + affects_outcome_of=ModelLifecycle.TRAINING, ) num_iters = configurable_integer( @@ -52,7 +56,7 @@ class BaseLearningParameters(ParameterGroup): max_value=100000, header="Number of training iterations", description="Increasing this value causes the results to be more robust but training time will be longer.", - affects_outcome_of=ModelLifecycle.TRAINING + affects_outcome_of=ModelLifecycle.TRAINING, ) learning_rate = configurable_float( @@ -61,7 +65,7 @@ class BaseLearningParameters(ParameterGroup): max_value=1e-01, header="Learning rate", description="Increasing this value will speed up training convergence but might make it unstable.", - affects_outcome_of=ModelLifecycle.TRAINING + affects_outcome_of=ModelLifecycle.TRAINING, ) learning_rate_warmup_iters = configurable_integer( @@ -70,7 +74,53 @@ class BaseLearningParameters(ParameterGroup): max_value=10000, header="Number of iterations for learning rate warmup", description="", - affects_outcome_of=ModelLifecycle.TRAINING + affects_outcome_of=ModelLifecycle.TRAINING, + ) + + use_adaptive_interval = configurable_boolean( + default_value=False, + header="Use adaptive validation interval", + description="Depending on the size of iteration per epoch, \ + adaptively update the validation interval and related values.", + warning="This will automatically control the patience and interval when early stopping is enabled.", + affects_outcome_of=ModelLifecycle.TRAINING, + ) + + enable_early_stopping = configurable_boolean( + default_value=True, + header="Enable early stopping of the training", + description="Early exit from training when validation accuracy isn't \ + changed or decreased for several epochs.", + affects_outcome_of=ModelLifecycle.TRAINING, + ) + + early_stop_start = configurable_integer( + default_value=3, + min_value=0, + max_value=1000, + header="Start epoch for early stopping", + affects_outcome_of=ModelLifecycle.TRAINING, + ) + + early_stop_patience = configurable_integer( + default_value=5, + min_value=0, + max_value=50, + header="Patience for early stopping", + description="Training will stop if the model does not improve within the number of epochs of patience.", + warning="This is applied exclusively when early stopping is enabled.", + affects_outcome_of=ModelLifecycle.TRAINING, + ) + + early_stop_iteration_patience = configurable_integer( + default_value=0, + min_value=0, + max_value=1000, + header="Iteration patience for early stopping", + description="Training will stop if the model does not improve within the number of iterations of patience. \ + the model is trained enough with the number of iterations of patience before early stopping.", + warning="This is applied exclusively when early stopping is enabled.", + affects_outcome_of=ModelLifecycle.TRAINING, ) num_workers = configurable_integer( @@ -79,9 +129,9 @@ class BaseLearningParameters(ParameterGroup): max_value=8, header="Number of cpu threads to use during batch generation", description="Increasing this value might improve training speed however it might cause out of memory " - "errors. If the number of workers is set to zero, data loading will happen in the main " - "training thread.", - affects_outcome_of=ModelLifecycle.NONE + "errors. If the number of workers is set to zero, data loading will happen in the main " + "training thread.", + affects_outcome_of=ModelLifecycle.NONE, ) num_checkpoints = configurable_integer( @@ -90,15 +140,16 @@ class BaseLearningParameters(ParameterGroup): max_value=100, header="Number of checkpoints that is done during the single training round", description="", - affects_outcome_of=ModelLifecycle.NONE + affects_outcome_of=ModelLifecycle.NONE, ) + @attrs class BasePostprocessing(ParameterGroup): result_based_confidence_threshold = configurable_boolean( default_value=True, header="Result based confidence threshold", description="Confidence threshold is derived from the results", - affects_outcome_of=ModelLifecycle.INFERENCE + affects_outcome_of=ModelLifecycle.INFERENCE, ) confidence_threshold = configurable_float( @@ -107,22 +158,30 @@ class BasePostprocessing(ParameterGroup): max_value=1, header="Confidence threshold", description="This threshold only takes effect if the threshold is not set based on the result.", - affects_outcome_of=ModelLifecycle.INFERENCE + affects_outcome_of=ModelLifecycle.INFERENCE, ) + @attrs class BaseNNCFOptimization(ParameterGroup): enable_quantization = configurable_boolean( default_value=True, header="Enable quantization algorithm", description="Enable quantization algorithm", - affects_outcome_of=ModelLifecycle.TRAINING + affects_outcome_of=ModelLifecycle.TRAINING, ) enable_pruning = configurable_boolean( default_value=False, header="Enable filter pruning algorithm", description="Enable filter pruning algorithm", - affects_outcome_of=ModelLifecycle.TRAINING + affects_outcome_of=ModelLifecycle.TRAINING, + ) + + pruning_supported = configurable_boolean( + default_value=False, + header="Whether filter pruning is supported", + description="Whether filter pruning is supported", + affects_outcome_of=ModelLifecycle.TRAINING, ) maximal_accuracy_degradation = configurable_float( @@ -131,21 +190,25 @@ class BaseNNCFOptimization(ParameterGroup): max_value=100.0, header="Maximum accuracy degradation", description="The maximal allowed accuracy metric drop", - affects_outcome_of=ModelLifecycle.TRAINING + affects_outcome_of=ModelLifecycle.TRAINING, ) + @attrs class BasePOTParameter(ParameterGroup): stat_subset_size = configurable_integer( header="Number of data samples", description="Number of data samples used for post-training optimization", default_value=300, min_value=1, - max_value=maxsize + max_value=maxsize, ) + @attrs class BaseAlgoBackendParameters(ParameterGroup): - train_type = selectable(default_value=TrainType.Incremental, - header='train type', - description='training schema for the MPA task', - editable=False, - visible_in_ui=True) + train_type = selectable( + default_value=TrainType.Incremental, + header="train type", + description="training schema for the MPA task", + editable=False, + visible_in_ui=True, + ) diff --git a/external/model-preparation-algorithm/mpa_tasks/apis/detection/__init__.py b/external/model-preparation-algorithm/mpa_tasks/apis/detection/__init__.py index ba204e88bee..6978897fa27 100644 --- a/external/model-preparation-algorithm/mpa_tasks/apis/detection/__init__.py +++ b/external/model-preparation-algorithm/mpa_tasks/apis/detection/__init__.py @@ -2,10 +2,19 @@ # SPDX-License-Identifier: Apache-2.0 # -from .config import DetectionConfig -from .task import DetectionInferenceTask, DetectionTrainTask, DetectionNNCFTask +import mpa.det # Load relevant extensions to registry import mpa_tasks.extensions.datasets.mpa_det_dataset -import mpa.det +from .config import DetectionConfig +from .task import DetectionInferenceTask, DetectionNNCFTask, DetectionTrainTask + +__all__ = [ + DetectionConfig, + DetectionInferenceTask, + DetectionTrainTask, + DetectionNNCFTask, + mpa_tasks.extensions.datasets.mpa_det_dataset, + mpa.det, +] diff --git a/external/model-preparation-algorithm/mpa_tasks/apis/detection/config.py b/external/model-preparation-algorithm/mpa_tasks/apis/detection/config.py index b0af3d00faa..3af4c90ce6d 100644 --- a/external/model-preparation-algorithm/mpa_tasks/apis/detection/config.py +++ b/external/model-preparation-algorithm/mpa_tasks/apis/detection/config.py @@ -3,16 +3,12 @@ # from attr import attrs - -from ote_sdk.configuration.elements import (add_parameter_group, - # ParameterGroup, - # configurable_boolean, - # configurable_float, - # configurable_integer, - selectable, - string_attribute) - from mpa_tasks.apis import BaseConfig, LearningRateSchedule +from ote_sdk.configuration.elements import ( + add_parameter_group, + selectable, + string_attribute, +) @attrs @@ -22,19 +18,21 @@ class DetectionConfig(BaseConfig): @attrs class __LearningParameters(BaseConfig.BaseLearningParameters): - header = string_attribute('Learning Parameters') + header = string_attribute("Learning Parameters") description = header learning_rate_schedule = selectable( default_value=LearningRateSchedule.COSINE, - header='Learning rate schedule', - description='Specify learning rate scheduling for the MMDetection task. ' - 'When training for a small number of epochs (N < 10), the fixed ' - 'schedule is recommended. For training for 10 < N < 25 epochs, ' - 'step-wise or exponential annealing might give better results. ' - 'Finally, for training on large datasets for at least 20 ' - 'epochs, cyclic annealing could result in the best model.', - editable=True, visible_in_ui=True) + header="Learning rate schedule", + description="Specify learning rate scheduling for the MMDetection task. " + "When training for a small number of epochs (N < 10), the fixed " + "schedule is recommended. For training for 10 < N < 25 epochs, " + "step-wise or exponential annealing might give better results. " + "Finally, for training on large datasets for at least 20 " + "epochs, cyclic annealing could result in the best model.", + editable=True, + visible_in_ui=True, + ) @attrs class __Postprocessing(BaseConfig.BasePostprocessing): @@ -53,7 +51,7 @@ class __POTParameter(BaseConfig.BasePOTParameter): @attrs class __AlgoBackend(BaseConfig.BaseAlgoBackendParameters): - header = string_attribute('Parameters for the MPA algo-backend') + header = string_attribute("Parameters for the MPA algo-backend") description = header learning_parameters = add_parameter_group(__LearningParameters) diff --git a/external/model-preparation-algorithm/mpa_tasks/apis/detection/task.py b/external/model-preparation-algorithm/mpa_tasks/apis/detection/task.py index 15a577ec9f8..c77de594ddf 100644 --- a/external/model-preparation-algorithm/mpa_tasks/apis/detection/task.py +++ b/external/model-preparation-algorithm/mpa_tasks/apis/detection/task.py @@ -5,20 +5,24 @@ import io import os from collections import defaultdict -from typing import List, Optional, Tuple, Iterable +from typing import Iterable, List, Optional, Tuple import cv2 import numpy as np import torch -from mmcv.utils import ConfigDict +from detection_tasks.apis.detection import OTEDetectionNNCFTask from detection_tasks.apis.detection.config_utils import remove_from_config -from detection_tasks.apis.detection.ote_utils import TrainingProgressCallback, InferenceProgressCallback +from detection_tasks.apis.detection.ote_utils import ( + InferenceProgressCallback, + TrainingProgressCallback, +) from detection_tasks.extension.utils.hooks import OTELoggerHook -from mpa_tasks.apis import BaseTask, TrainType -from mpa_tasks.apis.detection import DetectionConfig +from mmcv.utils import ConfigDict from mpa import MPAConstants from mpa.utils.config_utils import MPAConfig from mpa.utils.logger import get_logger +from mpa_tasks.apis import BaseTask, TrainType +from mpa_tasks.apis.detection import DetectionConfig from ote_sdk.configuration import cfg_helper from ote_sdk.configuration.helper.utils import ids_to_strings from ote_sdk.entities.annotation import Annotation @@ -26,15 +30,25 @@ from ote_sdk.entities.id import ID from ote_sdk.entities.inference_parameters import InferenceParameters from ote_sdk.entities.label import Domain -from ote_sdk.entities.metrics import (BarChartInfo, BarMetricsGroup, - CurveMetric, LineChartInfo, - LineMetricsGroup, MetricsGroup, - ScoreMetric, VisualizationType) -from ote_sdk.entities.model import (ModelEntity, ModelFormat, - ModelOptimizationType) -from ote_sdk.entities.model_template import TaskType -from ote_sdk.entities.resultset import ResultSetEntity +from ote_sdk.entities.metrics import ( + BarChartInfo, + BarMetricsGroup, + CurveMetric, + LineChartInfo, + LineMetricsGroup, + MetricsGroup, + ScoreMetric, + VisualizationType, +) +from ote_sdk.entities.model import ( + ModelEntity, + ModelFormat, + ModelOptimizationType, + ModelPrecision, +) +from ote_sdk.entities.model_template import TaskType, parse_model_template from ote_sdk.entities.result_media import ResultMediaEntity +from ote_sdk.entities.resultset import ResultSetEntity from ote_sdk.entities.scored_label import ScoredLabel from ote_sdk.entities.shapes.polygon import Point, Polygon from ote_sdk.entities.shapes.rectangle import Rectangle @@ -44,19 +58,13 @@ from ote_sdk.entities.train_parameters import TrainParameters, default_progress_callback from ote_sdk.serialization.label_mapper import label_schema_to_bytes from ote_sdk.usecases.evaluation.metrics_helper import MetricsHelper -from ote_sdk.usecases.tasks.interfaces.evaluate_interface import \ - IEvaluationTask -from ote_sdk.usecases.tasks.interfaces.export_interface import (ExportType, - IExportTask) -from ote_sdk.usecases.tasks.interfaces.inference_interface import \ - IInferenceTask +from ote_sdk.usecases.tasks.interfaces.evaluate_interface import IEvaluationTask +from ote_sdk.usecases.tasks.interfaces.export_interface import ExportType, IExportTask +from ote_sdk.usecases.tasks.interfaces.inference_interface import IInferenceTask from ote_sdk.usecases.tasks.interfaces.training_interface import ITrainingTask from ote_sdk.usecases.tasks.interfaces.unload_interface import IUnload - -from detection_tasks.apis.detection import OTEDetectionNNCFTask from ote_sdk.utils.argument_checks import check_input_parameters_type -from ote_sdk.entities.model_template import parse_model_template - +from ote_sdk.utils.vis_utils import get_actmap logger = get_logger() @@ -68,11 +76,12 @@ def __init__(self, task_environment: TaskEnvironment): # self._should_stop = False super().__init__(TASK_CONFIG, task_environment) - def infer(self, - dataset: DatasetEntity, - inference_parameters: Optional[InferenceParameters] = None - ) -> DatasetEntity: - logger.info('infer()') + def infer( + self, + dataset: DatasetEntity, + inference_parameters: Optional[InferenceParameters] = None, + ) -> DatasetEntity: + logger.info("infer()") update_progress_callback = default_progress_callback if inference_parameters is not None: @@ -83,16 +92,19 @@ def infer(self, # and should not be changed during inference. Otherwise user-specified value should be taken. if not self._hyperparams.postprocessing.result_based_confidence_threshold: self.confidence_threshold = self._hyperparams.postprocessing.confidence_threshold - logger.info(f'Confidence threshold {self.confidence_threshold}') + logger.info(f"Confidence threshold {self.confidence_threshold}") prediction_results, _ = self._infer_detector(dataset, inference_parameters) self._add_predictions_to_dataset(prediction_results, dataset, self.confidence_threshold) - logger.info('Inference completed') + logger.info("Inference completed") return dataset - def _infer_detector(self, dataset: DatasetEntity, - inference_parameters: Optional[InferenceParameters] = None) -> Tuple[Iterable, float]: - """ Inference wrapper + def _infer_detector( + self, + dataset: DatasetEntity, + inference_parameters: Optional[InferenceParameters] = None, + ) -> Tuple[Iterable, float]: + """Inference wrapper This method triggers the inference and returns `prediction_results` zipped with prediction results, feature vectors, and saliency maps. `metric` is returned as a float value if InferenceParameters.is_evaluation @@ -107,38 +119,44 @@ def _infer_detector(self, dataset: DatasetEntity, Returns: Tuple[Iterable, float]: Iterable prediction results for each sample and metric for on the given dataset """ - stage_module = 'DetectionInferrer' + stage_module = "DetectionInferrer" self._data_cfg = self._init_test_data_cfg(dataset) dump_features = True dump_saliency_map = not inference_parameters.is_evaluation if inference_parameters else True - results = self._run_task(stage_module, - mode='train', - dataset=dataset, - eval=inference_parameters.is_evaluation if inference_parameters else False, - dump_features=dump_features, - dump_saliency_map=dump_saliency_map) + results = self._run_task( + stage_module, + mode="train", + dataset=dataset, + eval=inference_parameters.is_evaluation if inference_parameters else False, + dump_features=dump_features, + dump_saliency_map=dump_saliency_map, + ) # TODO: InferenceProgressCallback register - logger.debug(f'result of run_task {stage_module} module = {results}') - output = results['outputs'] - metric = output['metric'] - predictions = output['detections'] - assert len(output['detections']) == len(output['feature_vectors']) == len(output['saliency_maps']), \ - 'Number of elements should be the same, however, number of outputs are ' \ - f"{len(output['detections'])}, {len(output['feature_vectors'])}, and {len(output['saliency_maps'])}" - prediction_results = zip(predictions, output['feature_vectors'], output['saliency_maps']) + logger.debug(f"result of run_task {stage_module} module = {results}") + output = results["outputs"] + metric = output["metric"] + predictions = output["detections"] + assert len(output["detections"]) == len(output["feature_vectors"]) == len(output["saliency_maps"]), ( + "Number of elements should be the same, however, number of outputs are " + f"{len(output['detections'])}, {len(output['feature_vectors'])}, and {len(output['saliency_maps'])}" + ) + prediction_results = zip(predictions, output["feature_vectors"], output["saliency_maps"]) return prediction_results, metric - def evaluate(self, - output_result_set: ResultSetEntity, - evaluation_metric: Optional[str] = None): - logger.info('called evaluate()') + def evaluate( + self, + output_result_set: ResultSetEntity, + evaluation_metric: Optional[str] = None, + ): + logger.info("called evaluate()") if evaluation_metric is not None: - logger.warning(f'Requested to use {evaluation_metric} metric, ' - 'but parameter is ignored. Use F-measure instead.') + logger.warning( + f"Requested to use {evaluation_metric} metric, " "but parameter is ignored. Use F-measure instead." + ) metric = MetricsHelper.compute_f_measure(output_result_set) logger.info(f"F-measure after evaluation: {metric.f_measure.value}") output_result_set.performance = metric.get_performance() - logger.info('Evaluation completed') + logger.info("Evaluation completed") def unload(self): """ @@ -146,47 +164,63 @@ def unload(self): """ self.finalize() - def export(self, - export_type: ExportType, - output_model: ModelEntity): + def export(self, export_type: ExportType, output_model: ModelEntity): # copied from OTE inference_task.py - logger.info('Exporting the model') + logger.info("Exporting the model") if export_type != ExportType.OPENVINO: - raise RuntimeError(f'not supported export type {export_type}') + raise RuntimeError(f"not supported export type {export_type}") output_model.model_format = ModelFormat.OPENVINO output_model.optimization_type = ModelOptimizationType.MO - stage_module = 'DetectionExporter' - self._model_cfg = self._initialize() - results = self._run_task(stage_module, mode='train', precision=self._precision[0].name) - results = results.get('outputs') - logger.debug(f'results of run_task = {results}') + stage_module = "DetectionExporter" + results = self._run_task(stage_module, mode="train", precision="FP32", export=True) + results = results.get("outputs") + logger.debug(f"results of run_task = {results}") if results is None: logger.error(f"error while exporting model {results.get('msg')}") else: - bin_file = results.get('bin') - xml_file = results.get('xml') + bin_file = results.get("bin") + xml_file = results.get("xml") if xml_file is None or bin_file is None: - raise RuntimeError('invalid status of exporting. bin and xml should not be None') + raise RuntimeError("invalid status of exporting. bin and xml should not be None") with open(bin_file, "rb") as f: - output_model.set_data('openvino.bin', f.read()) + output_model.set_data("openvino.bin", f.read()) with open(xml_file, "rb") as f: - output_model.set_data('openvino.xml', f.read()) + output_model.set_data("openvino.xml", f.read()) output_model.set_data( - 'confidence_threshold', - np.array([self.confidence_threshold], dtype=np.float32).tobytes()) - output_model.precision = self._precision + "confidence_threshold", + np.array([self.confidence_threshold], dtype=np.float32).tobytes(), + ) + output_model.precision = [ModelPrecision.FP32] output_model.optimization_methods = self._optimization_methods - output_model.set_data("label_schema.json", label_schema_to_bytes(self._task_environment.label_schema)) - logger.info('Exporting completed') + output_model.set_data( + "label_schema.json", + label_schema_to_bytes(self._task_environment.label_schema), + ) + logger.info("Exporting completed") def _init_recipe_hparam(self) -> dict: warmup_iters = int(self._hyperparams.learning_parameters.learning_rate_warmup_iters) - lr_config = ConfigDict(warmup_iters=warmup_iters) if warmup_iters > 0 \ + lr_config = ( + ConfigDict(warmup_iters=warmup_iters) + if warmup_iters > 0 else ConfigDict(warmup_iters=warmup_iters, warmup=None) + ) + + if self._hyperparams.learning_parameters.enable_early_stopping: + early_stop = ConfigDict( + start=int(self._hyperparams.learning_parameters.early_stop_start), + patience=int(self._hyperparams.learning_parameters.early_stop_patience), + iteration_patience=int(self._hyperparams.learning_parameters.early_stop_iteration_patience), + ) + else: + early_stop = False + return ConfigDict( optimizer=ConfigDict(lr=self._hyperparams.learning_parameters.learning_rate), lr_config=lr_config, + early_stop=early_stop, + use_adaptive_interval=self._hyperparams.learning_parameters.use_adaptive_interval, data=ConfigDict( samples_per_gpu=int(self._hyperparams.learning_parameters.batch_size), workers_per_gpu=int(self._hyperparams.learning_parameters.num_workers), @@ -195,35 +229,40 @@ def _init_recipe_hparam(self) -> dict: ) def _init_recipe(self): - logger.info('called _init_recipe()') + logger.info("called _init_recipe()") - recipe_root = os.path.join(MPAConstants.RECIPES_PATH, 'stages/detection') - if self._task_type.domain in {Domain.INSTANCE_SEGMENTATION, Domain.ROTATED_DETECTION}: - recipe_root = os.path.join(MPAConstants.RECIPES_PATH, 'stages/instance-segmentation') + recipe_root = os.path.join(MPAConstants.RECIPES_PATH, "stages/detection") + if self._task_type.domain in { + Domain.INSTANCE_SEGMENTATION, + Domain.ROTATED_DETECTION, + }: + recipe_root = os.path.join(MPAConstants.RECIPES_PATH, "stages/instance-segmentation") train_type = self._hyperparams.algo_backend.train_type - logger.info(f'train type = {train_type}') + logger.info(f"train type = {train_type}") - recipe = os.path.join(recipe_root, 'unbiased_teacher.py') + recipe = os.path.join(recipe_root, "imbalance.py") if train_type == TrainType.SemiSupervised: - recipe = os.path.join(recipe_root, 'unbiased_teacher.py') + recipe = os.path.join(recipe_root, "unbiased_teacher.py") elif train_type == TrainType.SelfSupervised: # recipe = os.path.join(recipe_root, 'pretrain.yaml') - raise NotImplementedError(f'train type {train_type} is not implemented yet.') + raise NotImplementedError(f"train type {train_type} is not implemented yet.") elif train_type == TrainType.Incremental: - recipe = os.path.join(recipe_root, 'imbalance.py') + recipe = os.path.join(recipe_root, "imbalance.py") else: - raise NotImplementedError(f'train type {train_type} is not implemented yet.') + # raise NotImplementedError(f'train type {train_type} is not implemented yet.') + # FIXME: Temporary remedy for CVS-88098 + logger.warning(f"train type {train_type} is not implemented yet.") self._recipe_cfg = MPAConfig.fromfile(recipe) self._patch_data_pipeline() self._patch_datasets(self._recipe_cfg, self._task_type.domain) # for OTE compatibility self._patch_evaluation(self._recipe_cfg) # for OTE compatibility - logger.info(f'initialized recipe = {recipe}') + logger.info(f"initialized recipe = {recipe}") def _init_model_cfg(self): base_dir = os.path.abspath(os.path.dirname(self.template_file_path)) - model_cfg = MPAConfig.fromfile(os.path.join(base_dir, 'model.py')) + model_cfg = MPAConfig.fromfile(os.path.join(base_dir, "model.py")) if len(self._anchors) != 0: self._update_anchors(model_cfg.model.bbox_head.anchor_generator, self._anchors) return model_cfg @@ -238,13 +277,13 @@ def _init_test_data_cfg(self, dataset: DatasetEntity): test=ConfigDict( ote_dataset=dataset, labels=self._labels, - ) + ), ) ) return data_cfg def _add_predictions_to_dataset(self, prediction_results, dataset, confidence_threshold=0.0): - """ Loop over dataset again to assign predictions. Convert from MMDetection format to OTE format. """ + """Loop over dataset again to assign predictions. Convert from MMDetection format to OTE format.""" for dataset_item, (all_results, feature_vector, saliency_map) in zip(dataset, prediction_results): width = dataset_item.width height = dataset_item.height @@ -252,11 +291,13 @@ def _add_predictions_to_dataset(self, prediction_results, dataset, confidence_th shapes = [] if self._task_type == TaskType.DETECTION: shapes = self._det_add_predictions_to_dataset(all_results, width, height, confidence_threshold) - elif self._task_type in {TaskType.INSTANCE_SEGMENTATION, TaskType.ROTATED_DETECTION}: + elif self._task_type in { + TaskType.INSTANCE_SEGMENTATION, + TaskType.ROTATED_DETECTION, + }: shapes = self._ins_seg_add_predictions_to_dataset(all_results, width, height, confidence_threshold) else: - raise RuntimeError( - f"MPA results assignment not implemented for task: {self._task_type}") + raise RuntimeError(f"MPA results assignment not implemented for task: {self._task_type}") dataset_item.append_annotations(shapes) @@ -265,16 +306,19 @@ def _add_predictions_to_dataset(self, prediction_results, dataset, confidence_th dataset_item.append_metadata_item(active_score, model=self._task_environment.model) if saliency_map is not None: - width, height = dataset_item.width, dataset_item.height - saliency_map = cv2.resize(saliency_map, (width, height), interpolation=cv2.INTER_NEAREST) - saliency_map_media = ResultMediaEntity(name="saliency_map", type="Saliency map", - annotation_scene=dataset_item.annotation_scene, - numpy=saliency_map, roi=dataset_item.roi) + saliency_map = get_actmap(saliency_map, (dataset_item.width, dataset_item.height)) + saliency_map_media = ResultMediaEntity( + name="Saliency Map", + type="saliency_map", + annotation_scene=dataset_item.annotation_scene, + numpy=saliency_map, + roi=dataset_item.roi, + ) dataset_item.append_metadata_item(saliency_map_media, model=self._task_environment.model) def _patch_data_pipeline(self): base_dir = os.path.abspath(os.path.dirname(self.template_file_path)) - data_pipeline_path = os.path.join(base_dir, 'data_pipeline.py') + data_pipeline_path = os.path.join(base_dir, "data_pipeline.py") if os.path.exists(data_pipeline_path): data_pipeline_cfg = MPAConfig.fromfile(data_pipeline_path) self._recipe_cfg.merge_from_dict(data_pipeline_cfg) @@ -287,37 +331,37 @@ def _patch_datasets(config: MPAConfig, domain=Domain.DETECTION): def patch_color_conversion(pipeline): # Default data format for OTE is RGB, while mmdet uses BGR, so negate the color conversion flag. for pipeline_step in pipeline: - if pipeline_step.type == 'Normalize': + if pipeline_step.type == "Normalize": to_rgb = False - if 'to_rgb' in pipeline_step: + if "to_rgb" in pipeline_step: to_rgb = pipeline_step.to_rgb to_rgb = not bool(to_rgb) pipeline_step.to_rgb = to_rgb - elif pipeline_step.type == 'MultiScaleFlipAug': + elif pipeline_step.type == "MultiScaleFlipAug": patch_color_conversion(pipeline_step.transforms) - assert 'data' in config - for subset in ('train', 'val', 'test', 'unlabeled'): + assert "data" in config + for subset in ("train", "val", "test", "unlabeled"): cfg = config.data.get(subset, None) if not cfg: continue - if cfg.type == 'RepeatDataset' or cfg.type == 'MultiImageMixDataset': + if cfg.type == "RepeatDataset" or cfg.type == "MultiImageMixDataset": cfg = cfg.dataset - cfg.type = 'MPADetDataset' + cfg.type = "MPADetDataset" cfg.domain = domain cfg.ote_dataset = None cfg.labels = None - remove_from_config(cfg, 'ann_file') - remove_from_config(cfg, 'img_prefix') - remove_from_config(cfg, 'classes') # Get from DatasetEntity + remove_from_config(cfg, "ann_file") + remove_from_config(cfg, "img_prefix") + remove_from_config(cfg, "classes") # Get from DatasetEntity for pipeline_step in cfg.pipeline: - if pipeline_step.type == 'LoadImageFromFile': - pipeline_step.type = 'LoadImageFromOTEDataset' - if pipeline_step.type == 'LoadAnnotations': - pipeline_step.type = 'LoadAnnotationFromOTEDataset' + if pipeline_step.type == "LoadImageFromFile": + pipeline_step.type = "LoadImageFromOTEDataset" + if pipeline_step.type == "LoadAnnotations": + pipeline_step.type = "LoadAnnotationFromOTEDataset" pipeline_step.domain = domain - pipeline_step.min_size = cfg.pop('min_size', -1) - if subset == 'train' and pipeline_step.type == 'Collect': + pipeline_step.min_size = cfg.pop("min_size", -1) + if subset == "train" and pipeline_step.type == "Collect": pipeline_step = BaseTask._get_meta_keys(pipeline_step) patch_color_conversion(cfg.pipeline) @@ -325,13 +369,11 @@ def patch_color_conversion(pipeline): def _patch_evaluation(config: MPAConfig): cfg = config.evaluation # CocoDataset.evaluate -> CustomDataset.evaluate - cfg.pop('classwise', None) - cfg.metric = 'mAP' - cfg.save_best = 'mAP' + cfg.pop("classwise", None) + cfg.metric = "mAP" + cfg.save_best = "mAP" # EarlyStoppingHook - for cfg in config.get('custom_hooks', []): - if 'EarlyStoppingHook' in cfg.type: - cfg.metric = 'mAP' + config.early_stop_metric = "mAP" def _det_add_predictions_to_dataset(self, all_results, width, height, confidence_threshold): shapes = [] @@ -345,14 +387,16 @@ def _det_add_predictions_to_dataset(self, all_results, width, height, confidence if probability < confidence_threshold: continue - assigned_label = [ScoredLabel(self._labels[label_idx], - probability=probability)] + assigned_label = [ScoredLabel(self._labels[label_idx], probability=probability)] if coords[3] - coords[1] <= 0 or coords[2] - coords[0] <= 0: continue - shapes.append(Annotation( - Rectangle(x1=coords[0], y1=coords[1], x2=coords[2], y2=coords[3]), - labels=assigned_label)) + shapes.append( + Annotation( + Rectangle(x1=coords[0], y1=coords[1], x2=coords[2], y2=coords[3]), + labels=assigned_label, + ) + ) return shapes def _ins_seg_add_predictions_to_dataset(self, all_results, width, height, confidence_threshold): @@ -383,29 +427,42 @@ def _ins_seg_add_predictions_to_dataset(self, all_results, width, height, confid @staticmethod def _update_anchors(origin, new): logger.info("Updating anchors") - origin['heights'] = new['heights'] - origin['widths'] = new['widths'] + origin["heights"] = new["heights"] + origin["widths"] = new["widths"] class DetectionTrainTask(DetectionInferenceTask, ITrainingTask): def save_model(self, output_model: ModelEntity): - logger.info('called save_model') + logger.info("called save_model") buffer = io.BytesIO() hyperparams_str = ids_to_strings(cfg_helper.convert(self._hyperparams, dict, enum_to_str=True)) labels = {label.name: label.color.rgb_tuple for label in self._labels} model_ckpt = torch.load(self._model_ckpt) modelinfo = { - 'model': model_ckpt['state_dict'], 'config': hyperparams_str, 'labels': labels, - 'confidence_threshold': self.confidence_threshold, 'VERSION': 1 + "model": model_ckpt["state_dict"], + "config": hyperparams_str, + "labels": labels, + "confidence_threshold": self.confidence_threshold, + "VERSION": 1, } - if hasattr(self._model_cfg.model, 'bbox_head') and hasattr(self._model_cfg.model.bbox_head, 'anchor_generator'): - if getattr(self._model_cfg.model.bbox_head.anchor_generator, 'reclustering_anchors', False): - modelinfo['anchors'] = {} - self._update_anchors(modelinfo['anchors'], self._model_cfg.model.bbox_head.anchor_generator) + if hasattr(self._model_cfg.model, "bbox_head") and hasattr(self._model_cfg.model.bbox_head, "anchor_generator"): + if getattr( + self._model_cfg.model.bbox_head.anchor_generator, + "reclustering_anchors", + False, + ): + modelinfo["anchors"] = {} + self._update_anchors( + modelinfo["anchors"], + self._model_cfg.model.bbox_head.anchor_generator, + ) torch.save(modelinfo, buffer) output_model.set_data("weights.pth", buffer.getvalue()) - output_model.set_data("label_schema.json", label_schema_to_bytes(self._task_environment.label_schema)) + output_model.set_data( + "label_schema.json", + label_schema_to_bytes(self._task_environment.label_schema), + ) output_model.precision = self._precision def cancel_training(self): @@ -420,18 +477,20 @@ def cancel_training(self): if self.cancel_interface is not None: self.cancel_interface.cancel() else: - logger.info('but training was not started yet. reserved it to cancel') + logger.info("but training was not started yet. reserved it to cancel") self.reserved_cancel = True - def train(self, - dataset: DatasetEntity, - output_model: ModelEntity, - train_parameters: Optional[TrainParameters] = None): - logger.info('train()') + def train( + self, + dataset: DatasetEntity, + output_model: ModelEntity, + train_parameters: Optional[TrainParameters] = None, + ): + logger.info("train()") # Check for stop signal when training has stopped. # If should_stop is true, training was cancelled and no new if self._should_stop: - logger.info('Training cancelled.') + logger.info("Training cancelled.") self._should_stop = False self._is_training = False return @@ -443,22 +502,22 @@ def train(self, self._time_monitor = TrainingProgressCallback(update_progress_callback) self._learning_curves = defaultdict(OTELoggerHook.Curve) - stage_module = 'DetectionTrainer' + stage_module = "DetectionTrainer" self._data_cfg = self._init_train_data_cfg(dataset) self._is_training = True - results = self._run_task(stage_module, mode='train', dataset=dataset, parameters=train_parameters) + results = self._run_task(stage_module, mode="train", dataset=dataset, parameters=train_parameters) # Check for stop signal when training has stopped. If should_stop is true, training was cancelled and no new if self._should_stop: - logger.info('Training cancelled.') + logger.info("Training cancelled.") self._should_stop = False self._is_training = False return # get output model - model_ckpt = results.get('final_ckpt') + model_ckpt = results.get("final_ckpt") if model_ckpt is None: - logger.error('cannot find final checkpoint from the results.') + logger.error("cannot find final checkpoint from the results.") # output_model.model_status = ModelStatus.FAILED return else: @@ -466,8 +525,12 @@ def train(self, self._model_ckpt = model_ckpt # Update anchors - if hasattr(self._model_cfg.model, 'bbox_head') and hasattr(self._model_cfg.model.bbox_head, 'anchor_generator'): - if getattr(self._model_cfg.model.bbox_head.anchor_generator, 'reclustering_anchors', False): + if hasattr(self._model_cfg.model, "bbox_head") and hasattr(self._model_cfg.model.bbox_head, "anchor_generator"): + if getattr( + self._model_cfg.model.bbox_head.anchor_generator, + "reclustering_anchors", + False, + ): self._update_anchors(self._anchors, self._model_cfg.model.bbox_head.anchor_generator) # get prediction on validation set @@ -480,12 +543,12 @@ def train(self, result_set = ResultSetEntity( model=output_model, ground_truth_dataset=val_dataset, - prediction_dataset=preds_val_dataset + prediction_dataset=preds_val_dataset, ) # adjust confidence threshold if self._hyperparams.postprocessing.result_based_confidence_threshold: - logger.info('Adjusting the confidence threshold') + logger.info("Adjusting the confidence threshold") metric = MetricsHelper.compute_f_measure(result_set, vary_confidence_threshold=True) best_confidence_threshold = metric.best_confidence_threshold.value if best_confidence_threshold is None: @@ -498,16 +561,16 @@ def train(self, # compose performance statistics performance = metric.get_performance() performance.dashboard_metrics.extend(self._generate_training_metrics(self._learning_curves, val_map)) - logger.info(f'Final model performance: {str(performance)}') + logger.info(f"Final model performance: {str(performance)}") # save resulting model self.save_model(output_model) output_model.performance = performance # output_model.model_status = ModelStatus.SUCCESS self._is_training = False - logger.info('train done.') + logger.info("train done.") def _init_train_data_cfg(self, dataset: DatasetEntity): - logger.info('init data cfg.') + logger.info("init data cfg.") data_cfg = ConfigDict( data=ConfigDict( train=ConfigDict( @@ -526,7 +589,7 @@ def _init_train_data_cfg(self, dataset: DatasetEntity): ) # Temparory remedy for cfg.pretty_text error for label in self._labels: - label.hotkey = 'a' + label.hotkey = "a" return data_cfg def _generate_training_metrics(self, learning_curves, map) -> Optional[List[MetricsGroup]]: @@ -547,7 +610,8 @@ def _generate_training_metrics(self, learning_curves, map) -> Optional[List[Metr metric_curve = CurveMetric( xs=np.nan_to_num(curve.x).tolist(), ys=np.nan_to_num(curve.y).tolist(), - name=key) + name=key, + ) visualization_info = LineChartInfo(name=key, x_axis_label="Epoch", y_axis_label=key) output.append(LineMetricsGroup(metrics=[metric_curve], visualization_info=visualization_info)) @@ -555,7 +619,7 @@ def _generate_training_metrics(self, learning_curves, map) -> Optional[List[Metr output.append( BarMetricsGroup( metrics=[ScoreMetric(value=map, name="mAP")], - visualization_info=BarChartInfo("Validation score", visualization_type=VisualizationType.RADIAL_BAR) + visualization_info=BarChartInfo("Validation score", visualization_type=VisualizationType.RADIAL_BAR), ) ) @@ -563,19 +627,18 @@ def _generate_training_metrics(self, learning_curves, map) -> Optional[List[Metr class DetectionNNCFTask(OTEDetectionNNCFTask): - @check_input_parameters_type() def __init__(self, task_environment: TaskEnvironment): - """" + """ " Task for compressing detection models using NNCF. """ curr_model_path = task_environment.model_template.model_template_path base_model_path = os.path.join( os.path.dirname(os.path.abspath(curr_model_path)), - task_environment.model_template.base_model_path + task_environment.model_template.base_model_path, ) if os.path.isfile(base_model_path): - logger.info(f'Base model for NNCF: {base_model_path}') + logger.info(f"Base model for NNCF: {base_model_path}") # Redirect to base model task_environment.model_template = parse_model_template(base_model_path) super().__init__(task_environment) diff --git a/external/model-preparation-algorithm/mpa_tasks/apis/segmentation/__init__.py b/external/model-preparation-algorithm/mpa_tasks/apis/segmentation/__init__.py index 7fc33b70b5b..45b615b04e5 100644 --- a/external/model-preparation-algorithm/mpa_tasks/apis/segmentation/__init__.py +++ b/external/model-preparation-algorithm/mpa_tasks/apis/segmentation/__init__.py @@ -2,10 +2,19 @@ # SPDX-License-Identifier: Apache-2.0 # -from .config import SegmentationConfig -from .task import SegmentationInferenceTask, SegmentationTrainTask, SegmentationNNCFTask +import mpa.seg # Load relevant extensions to registry import mpa_tasks.extensions.datasets.mpa_seg_dataset -import mpa.seg +from .config import SegmentationConfig +from .task import SegmentationInferenceTask, SegmentationNNCFTask, SegmentationTrainTask + +__all__ = [ + SegmentationConfig, + SegmentationInferenceTask, + SegmentationTrainTask, + SegmentationNNCFTask, + mpa_tasks.extensions.datasets.mpa_seg_dataset, + mpa.seg, +] diff --git a/external/model-preparation-algorithm/mpa_tasks/apis/segmentation/config.py b/external/model-preparation-algorithm/mpa_tasks/apis/segmentation/config.py index be4fab86660..2599a9a7fe3 100644 --- a/external/model-preparation-algorithm/mpa_tasks/apis/segmentation/config.py +++ b/external/model-preparation-algorithm/mpa_tasks/apis/segmentation/config.py @@ -3,16 +3,15 @@ # from attr import attrs - -from ote_sdk.configuration.elements import (add_parameter_group, - ParameterGroup, - # configurable_boolean, - configurable_float, - configurable_integer, - selectable, - string_attribute) - from mpa_tasks.apis import BaseConfig, LearningRateSchedule +from ote_sdk.configuration.elements import ( # configurable_boolean, + ParameterGroup, + add_parameter_group, + configurable_float, + configurable_integer, + selectable, + string_attribute, +) from ote_sdk.configuration.model_lifecycle import ModelLifecycle from segmentation_tasks.apis.segmentation.configuration_enums import Models @@ -24,51 +23,55 @@ class SegmentationConfig(BaseConfig): @attrs class __LearningParameters(BaseConfig.BaseLearningParameters): - header = string_attribute('Learning Parameters') + header = string_attribute("Learning Parameters") description = header learning_rate_schedule = selectable( default_value=LearningRateSchedule.COSINE, - header='Learning rate schedule', - description='Specify learning rate scheduling for the MMDetection task. ' - 'When training for a small number of epochs (N < 10), the fixed ' - 'schedule is recommended. For training for 10 < N < 25 epochs, ' - 'step-wise or exponential annealing might give better results. ' - 'Finally, for training on large datasets for at least 20 ' - 'epochs, cyclic annealing could result in the best model.', - editable=True, visible_in_ui=True) + header="Learning rate schedule", + description="Specify learning rate scheduling for the MMDetection task. " + "When training for a small number of epochs (N < 10), the fixed " + "schedule is recommended. For training for 10 < N < 25 epochs, " + "step-wise or exponential annealing might give better results. " + "Finally, for training on large datasets for at least 20 " + "epochs, cyclic annealing could result in the best model.", + editable=True, + visible_in_ui=True, + ) @attrs class __Postprocessing(ParameterGroup): header = string_attribute("Postprocessing") description = header - class_name = selectable(default_value=Models.BlurSegmentation, - header="Model class for inference", - description="Model classes with defined pre- and postprocessing", - editable=False, - visible_in_ui=True) + class_name = selectable( + default_value=Models.BlurSegmentation, + header="Model class for inference", + description="Model classes with defined pre- and postprocessing", + editable=False, + visible_in_ui=True, + ) blur_strength = configurable_integer( header="Blur strength", description="With a higher value, the segmentation output will be smoother, but less accurate.", default_value=1, min_value=1, max_value=25, - affects_outcome_of=ModelLifecycle.INFERENCE + affects_outcome_of=ModelLifecycle.INFERENCE, ) soft_threshold = configurable_float( default_value=0.5, header="Soft threshold", description="The threshold to apply to the probability output of the model, for each pixel. A higher value " - "means a stricter segmentation prediction.", + "means a stricter segmentation prediction.", min_value=0.0, max_value=1.0, - affects_outcome_of=ModelLifecycle.INFERENCE + affects_outcome_of=ModelLifecycle.INFERENCE, ) @attrs class __AlgoBackend(BaseConfig.BaseAlgoBackendParameters): - header = string_attribute('Parameters for the MPA algo-backend') + header = string_attribute("Parameters for the MPA algo-backend") description = header learning_parameters = add_parameter_group(__LearningParameters) diff --git a/external/model-preparation-algorithm/mpa_tasks/apis/segmentation/task.py b/external/model-preparation-algorithm/mpa_tasks/apis/segmentation/task.py index 1e9222a8917..fb801bde36b 100644 --- a/external/model-preparation-algorithm/mpa_tasks/apis/segmentation/task.py +++ b/external/model-preparation-algorithm/mpa_tasks/apis/segmentation/task.py @@ -10,26 +10,36 @@ import numpy as np import torch from mmcv.utils import ConfigDict -from segmentation_tasks.apis.segmentation.config_utils import remove_from_config -from segmentation_tasks.apis.segmentation.ote_utils import TrainingProgressCallback, InferenceProgressCallback -from segmentation_tasks.extension.utils.hooks import OTELoggerHook from mpa import MPAConstants -from mpa_tasks.apis import BaseTask, TrainType -from mpa_tasks.apis.segmentation import SegmentationConfig -from mpa_tasks.utils.data_utils import get_actmap from mpa.utils.config_utils import MPAConfig from mpa.utils.logger import get_logger +from mpa_tasks.apis import BaseTask, TrainType +from mpa_tasks.apis.segmentation import SegmentationConfig from ote_sdk.configuration import cfg_helper from ote_sdk.configuration.helper.utils import ids_to_strings from ote_sdk.entities.datasets import DatasetEntity from ote_sdk.entities.inference_parameters import InferenceParameters -from ote_sdk.entities.inference_parameters import default_progress_callback as default_infer_progress_callback +from ote_sdk.entities.inference_parameters import ( + default_progress_callback as default_infer_progress_callback, +) from ote_sdk.entities.label import Domain -from ote_sdk.entities.metrics import (CurveMetric, InfoMetric, LineChartInfo, - MetricsGroup, Performance, ScoreMetric, - VisualizationInfo, VisualizationType) -from ote_sdk.entities.model import (ModelEntity, ModelFormat, - ModelOptimizationType, ModelPrecision) +from ote_sdk.entities.metrics import ( + CurveMetric, + InfoMetric, + LineChartInfo, + MetricsGroup, + Performance, + ScoreMetric, + VisualizationInfo, + VisualizationType, +) +from ote_sdk.entities.model import ( + ModelEntity, + ModelFormat, + ModelOptimizationType, + ModelPrecision, +) +from ote_sdk.entities.model_template import parse_model_template from ote_sdk.entities.result_media import ResultMediaEntity from ote_sdk.entities.resultset import ResultSetEntity from ote_sdk.entities.subset import Subset @@ -38,22 +48,24 @@ from ote_sdk.entities.train_parameters import TrainParameters, default_progress_callback from ote_sdk.serialization.label_mapper import label_schema_to_bytes from ote_sdk.usecases.evaluation.metrics_helper import MetricsHelper -from ote_sdk.usecases.tasks.interfaces.evaluate_interface import \ - IEvaluationTask -from ote_sdk.usecases.tasks.interfaces.export_interface import (ExportType, - IExportTask) -from ote_sdk.usecases.tasks.interfaces.inference_interface import \ - IInferenceTask +from ote_sdk.usecases.tasks.interfaces.evaluate_interface import IEvaluationTask +from ote_sdk.usecases.tasks.interfaces.export_interface import ExportType, IExportTask +from ote_sdk.usecases.tasks.interfaces.inference_interface import IInferenceTask from ote_sdk.usecases.tasks.interfaces.training_interface import ITrainingTask from ote_sdk.usecases.tasks.interfaces.unload_interface import IUnload +from ote_sdk.utils.argument_checks import check_input_parameters_type from ote_sdk.utils.segmentation_utils import ( create_annotation_from_segmentation_map, - create_hard_prediction_from_soft_prediction) - + create_hard_prediction_from_soft_prediction, +) from segmentation_tasks.apis.segmentation import OTESegmentationNNCFTask -from ote_sdk.utils.argument_checks import check_input_parameters_type -from ote_sdk.entities.model_template import parse_model_template - +from segmentation_tasks.apis.segmentation.config_utils import remove_from_config +from segmentation_tasks.apis.segmentation.ote_utils import ( + InferenceProgressCallback, + TrainingProgressCallback, + get_activation_map, +) +from segmentation_tasks.extension.utils.hooks import OTELoggerHook logger = get_logger() @@ -64,16 +76,14 @@ class SegmentationInferenceTask(BaseTask, IInferenceTask, IExportTask, IEvaluati def __init__(self, task_environment: TaskEnvironment): # self._should_stop = False self.freeze = True - self.metric = 'mDice' + self.metric = "mDice" super().__init__(TASK_CONFIG, task_environment) - def infer(self, - dataset: DatasetEntity, - inference_parameters: Optional[InferenceParameters] = None - ) -> DatasetEntity: - logger.info('infer()') + def infer( + self, dataset: DatasetEntity, inference_parameters: Optional[InferenceParameters] = None + ) -> DatasetEntity: + logger.info("infer()") dump_features = True - dump_saliency_map = not inference_parameters.is_evaluation if inference_parameters else True if inference_parameters is not None: update_progress_callback = inference_parameters.update_progress @@ -84,30 +94,25 @@ def infer(self, self._time_monitor = InferenceProgressCallback(len(dataset), update_progress_callback) - stage_module = 'SegInferrer' + stage_module = "SegInferrer" self._data_cfg = self._init_test_data_cfg(dataset) self._label_dictionary = dict(enumerate(self._labels, 1)) - results = self._run_task(stage_module, mode='train', dataset=dataset, dump_features=dump_features, - dump_saliency_map=dump_saliency_map) - logger.debug(f'result of run_task {stage_module} module = {results}') - predictions = results['outputs'] - prediction_results = zip(predictions['eval_predictions'], predictions['feature_vectors'], - predictions['saliency_maps']) - self._add_predictions_to_dataset(prediction_results, dataset, dump_saliency_map=not is_evaluation) + results = self._run_task(stage_module, mode="train", dataset=dataset, dump_features=dump_features) + logger.debug(f"result of run_task {stage_module} module = {results}") + predictions = results["outputs"] + prediction_results = zip(predictions["eval_predictions"], predictions["feature_vectors"]) + self._add_predictions_to_dataset(prediction_results, dataset, dump_soft_prediction=not is_evaluation) return dataset - def evaluate(self, - output_result_set: ResultSetEntity, - evaluation_metric: Optional[str] = None): - logger.info('called evaluate()') + def evaluate(self, output_result_set: ResultSetEntity, evaluation_metric: Optional[str] = None): + logger.info("called evaluate()") if evaluation_metric is not None: - logger.warning(f'Requested to use {evaluation_metric} metric, ' - 'but parameter is ignored. Use mDice instead.') - logger.info('Computing mDice') - metrics = MetricsHelper.compute_dice_averaged_over_pixels( - output_result_set - ) + logger.warning( + f"Requested to use {evaluation_metric} metric, " "but parameter is ignored. Use mDice instead." + ) + logger.info("Computing mDice") + metrics = MetricsHelper.compute_dice_averaged_over_pixels(output_result_set) logger.info(f"mDice after evaluation: {metrics.overall_dice.value}") output_result_set.performance = metrics.get_performance() @@ -117,45 +122,56 @@ def unload(self): """ self.finalize() - def export(self, - export_type: ExportType, - output_model: ModelEntity): + def export(self, export_type: ExportType, output_model: ModelEntity): # copied from OTE inference_task.py - logger.info('Exporting the model') + logger.info("Exporting the model") if export_type != ExportType.OPENVINO: - raise RuntimeError(f'not supported export type {export_type}') + raise RuntimeError(f"not supported export type {export_type}") output_model.model_format = ModelFormat.OPENVINO output_model.optimization_type = ModelOptimizationType.MO - stage_module = 'SegExporter' - self._initialize() - results = self._run_task(stage_module, mode='train', precision=self._precision[0].name) - results = results.get('outputs') - logger.debug(f'results of run_task = {results}') + stage_module = "SegExporter" + results = self._run_task(stage_module, mode="train", precision="FP32", export=True) + results = results.get("outputs") + logger.debug(f"results of run_task = {results}") if results is None: logger.error(f"error while exporting model {results.get('msg')}") # output_model.model_status = ModelStatus.FAILED else: - bin_file = results.get('bin') - xml_file = results.get('xml') + bin_file = results.get("bin") + xml_file = results.get("xml") if xml_file is None or bin_file is None: - raise RuntimeError('invalid status of exporting. bin and xml should not be None') + raise RuntimeError("invalid status of exporting. bin and xml should not be None") with open(bin_file, "rb") as f: - output_model.set_data('openvino.bin', f.read()) + output_model.set_data("openvino.bin", f.read()) with open(xml_file, "rb") as f: - output_model.set_data('openvino.xml', f.read()) - output_model.precision = self._precision + output_model.set_data("openvino.xml", f.read()) + output_model.precision = [ModelPrecision.FP32] output_model.optimization_methods = self._optimization_methods output_model.set_data("label_schema.json", label_schema_to_bytes(self._task_environment.label_schema)) - logger.info('Exporting completed') + logger.info("Exporting completed") def _init_recipe_hparam(self) -> dict: warmup_iters = int(self._hyperparams.learning_parameters.learning_rate_warmup_iters) - lr_config = ConfigDict(warmup_iters=warmup_iters) if warmup_iters > 0 \ + lr_config = ( + ConfigDict(warmup_iters=warmup_iters) + if warmup_iters > 0 else ConfigDict(warmup_iters=warmup_iters, warmup=None) + ) + + if self._hyperparams.learning_parameters.enable_early_stopping: + early_stop = ConfigDict( + start=int(self._hyperparams.learning_parameters.early_stop_start), + patience=int(self._hyperparams.learning_parameters.early_stop_patience), + iteration_patience=int(self._hyperparams.learning_parameters.early_stop_iteration_patience), + ) + else: + early_stop = False + return ConfigDict( optimizer=ConfigDict(lr=self._hyperparams.learning_parameters.learning_rate), lr_config=lr_config, + early_stop=early_stop, data=ConfigDict( samples_per_gpu=int(self._hyperparams.learning_parameters.batch_size), workers_per_gpu=int(self._hyperparams.learning_parameters.num_workers), @@ -164,34 +180,36 @@ def _init_recipe_hparam(self) -> dict: ) def _init_recipe(self): - logger.info('called _init_recipe()') + logger.info("called _init_recipe()") - recipe_root = os.path.join(MPAConstants.RECIPES_PATH, 'stages/segmentation') + recipe_root = os.path.join(MPAConstants.RECIPES_PATH, "stages/segmentation") train_type = self._hyperparams.algo_backend.train_type - logger.info(f'train type = {train_type}') + logger.info(f"train type = {train_type}") - recipe = os.path.join(recipe_root, 'class_incr.py') + recipe = os.path.join(recipe_root, "class_incr.py") if train_type == TrainType.SemiSupervised: - recipe = os.path.join(recipe_root, 'cutmix_seg.py') + recipe = os.path.join(recipe_root, "cutmix_seg.py") elif train_type == TrainType.SelfSupervised: # recipe = os.path.join(recipe_root, 'pretrain.yaml') - raise NotImplementedError(f'train type {train_type} is not implemented yet.') + raise NotImplementedError(f"train type {train_type} is not implemented yet.") elif train_type == TrainType.Incremental: - recipe = os.path.join(recipe_root, 'class_incr.py') + recipe = os.path.join(recipe_root, "class_incr.py") else: - raise NotImplementedError(f'train type {train_type} is not implemented yet.') + # raise NotImplementedError(f'train type {train_type} is not implemented yet.') + # FIXME: Temporary remedy for CVS-88098 + logger.warning(f"train type {train_type} is not implemented yet.") self._recipe_cfg = MPAConfig.fromfile(recipe) self._patch_datasets(self._recipe_cfg) # for OTE compatibility self._patch_evaluation(self._recipe_cfg) # for OTE compatibility self.metric = self._recipe_cfg.evaluation.metric if not self.freeze: - remove_from_config(self._recipe_cfg, 'params_config') - logger.info(f'initialized recipe = {recipe}') + remove_from_config(self._recipe_cfg, "params_config") + logger.info(f"initialized recipe = {recipe}") def _init_model_cfg(self): base_dir = os.path.abspath(os.path.dirname(self.template_file_path)) - return MPAConfig.fromfile(os.path.join(base_dir, 'model.py')) + return MPAConfig.fromfile(os.path.join(base_dir, "model.py")) def _init_test_data_cfg(self, dataset: DatasetEntity): data_cfg = ConfigDict( @@ -205,15 +223,15 @@ def _init_test_data_cfg(self, dataset: DatasetEntity): test=ConfigDict( ote_dataset=dataset, labels=self._labels, - ) + ), ) ) return data_cfg - def _add_predictions_to_dataset(self, prediction_results, dataset, dump_saliency_map): - """ Loop over dataset again to assign predictions. Convert from MMSegmentation format to OTE format. """ + def _add_predictions_to_dataset(self, prediction_results, dataset, dump_soft_prediction): + """Loop over dataset again to assign predictions. Convert from MMSegmentation format to OTE format.""" - for dataset_item, (prediction, feature_vector, saliency_map) in zip(dataset, prediction_results): + for dataset_item, (prediction, feature_vector) in zip(dataset, prediction_results): soft_prediction = np.transpose(prediction[0], axes=(1, 2, 0)) hard_prediction = create_hard_prediction_from_soft_prediction( soft_prediction=soft_prediction, @@ -231,74 +249,82 @@ def _add_predictions_to_dataset(self, prediction_results, dataset, dump_saliency active_score = TensorEntity(name="representation_vector", numpy=feature_vector.reshape(-1)) dataset_item.append_metadata_item(active_score, model=self._task_environment.model) - if dump_saliency_map and saliency_map is not None: - saliency_map = get_actmap(saliency_map, (dataset_item.width, dataset_item.height) ) - saliency_map_media = ResultMediaEntity(name="saliency_map", type="Saliency map", - annotation_scene=dataset_item.annotation_scene, - numpy=saliency_map, roi=dataset_item.roi) - dataset_item.append_metadata_item(saliency_map_media, model=self._task_environment.model) + if dump_soft_prediction: + for label_index, label in self._label_dictionary.items(): + if label_index == 0: + continue + current_label_soft_prediction = soft_prediction[:, :, label_index] + class_act_map = get_activation_map(current_label_soft_prediction) + result_media = ResultMediaEntity( + name="Soft Prediction", + type="soft_prediction", + label=label, + annotation_scene=dataset_item.annotation_scene, + roi=dataset_item.roi, + numpy=class_act_map, + ) + dataset_item.append_metadata_item(result_media, model=self._task_environment.model) @staticmethod def _patch_datasets(config: MPAConfig, domain=Domain.SEGMENTATION): def patch_color_conversion(pipeline): # Default data format for OTE is RGB, while mmdet uses BGR, so negate the color conversion flag. for pipeline_step in pipeline: - if pipeline_step.type == 'Normalize': + if pipeline_step.type == "Normalize": to_rgb = False - if 'to_rgb' in pipeline_step: + if "to_rgb" in pipeline_step: to_rgb = pipeline_step.to_rgb to_rgb = not bool(to_rgb) pipeline_step.to_rgb = to_rgb - elif pipeline_step.type == 'MultiScaleFlipAug': + elif pipeline_step.type == "MultiScaleFlipAug": patch_color_conversion(pipeline_step.transforms) - assert 'data' in config - for subset in ('train', 'val', 'test'): + + assert "data" in config + for subset in ("train", "val", "test"): cfg = config.data.get(subset, None) if not cfg: continue - if cfg.type == 'RepeatDataset': + if cfg.type == "RepeatDataset": cfg = cfg.dataset - cfg.type = 'MPASegIncrDataset' + cfg.type = "MPASegIncrDataset" cfg.domain = domain cfg.ote_dataset = None cfg.labels = None - remove_from_config(cfg, 'ann_dir') - remove_from_config(cfg, 'img_dir') - remove_from_config(cfg, 'data_root') - remove_from_config(cfg, 'split') - remove_from_config(cfg, 'classes') + remove_from_config(cfg, "ann_dir") + remove_from_config(cfg, "img_dir") + remove_from_config(cfg, "data_root") + remove_from_config(cfg, "split") + remove_from_config(cfg, "classes") for pipeline_step in cfg.pipeline: - if pipeline_step.type == 'LoadImageFromFile': - pipeline_step.type = 'LoadImageFromOTEDataset' - elif pipeline_step.type == 'LoadAnnotations': - pipeline_step.type = 'LoadAnnotationFromOTEDataset' + if pipeline_step.type == "LoadImageFromFile": + pipeline_step.type = "LoadImageFromOTEDataset" + elif pipeline_step.type == "LoadAnnotations": + pipeline_step.type = "LoadAnnotationFromOTEDataset" pipeline_step.domain = domain - if subset == 'train' and pipeline_step.type == 'Collect': + if subset == "train" and pipeline_step.type == "Collect": pipeline_step = BaseTask._get_meta_keys(pipeline_step) patch_color_conversion(cfg.pipeline) @staticmethod def _patch_evaluation(config: MPAConfig): cfg = config.evaluation - cfg.pop('classwise', None) - cfg.metric = 'mDice' - cfg.save_best = 'mDice' - cfg.rule = 'greater' + cfg.pop("classwise", None) + cfg.metric = "mDice" + cfg.save_best = "mDice" + cfg.rule = "greater" # EarlyStoppingHook - for cfg in config.get('custom_hooks', []): - if 'EarlyStoppingHook' in cfg.type: - cfg.metric = 'mDice' + config.early_stop_metric = "mDice" class SegmentationTrainTask(SegmentationInferenceTask, ITrainingTask): def save_model(self, output_model: ModelEntity): - logger.info(f'called save_model: {self._model_ckpt}') + logger.info(f"called save_model: {self._model_ckpt}") buffer = io.BytesIO() hyperparams_str = ids_to_strings(cfg_helper.convert(self._hyperparams, dict, enum_to_str=True)) labels = {label.name: label.color.rgb_tuple for label in self._labels} model_ckpt = torch.load(self._model_ckpt) - modelinfo = {'model': model_ckpt['state_dict'], 'config': hyperparams_str, 'labels': labels, 'VERSION': 1} + modelinfo = {"model": model_ckpt["state_dict"], "config": hyperparams_str, "labels": labels, "VERSION": 1} torch.save(modelinfo, buffer) output_model.set_data("weights.pth", buffer.getvalue()) @@ -319,18 +345,17 @@ def cancel_training(self): if self.cancel_interface is not None: self.cancel_interface.cancel() else: - logger.info('but training was not started yet. reserved it to cancel') + logger.info("but training was not started yet. reserved it to cancel") self.reserved_cancel = True - def train(self, - dataset: DatasetEntity, - output_model: ModelEntity, - train_parameters: Optional[TrainParameters] = None): - logger.info('train()') + def train( + self, dataset: DatasetEntity, output_model: ModelEntity, train_parameters: Optional[TrainParameters] = None + ): + logger.info("train()") # Check for stop signal between pre-eval and training. # If training is cancelled at this point, if self._should_stop: - logger.info('Training cancelled.') + logger.info("Training cancelled.") self._should_stop = False self._is_training = False return @@ -344,23 +369,23 @@ def train(self, self._learning_curves = defaultdict(OTELoggerHook.Curve) # learning_curves = defaultdict(OTELoggerHook.Curve) - stage_module = 'SegTrainer' + stage_module = "SegTrainer" self._data_cfg = self._init_train_data_cfg(dataset) self._is_training = True - results = self._run_task(stage_module, mode='train', dataset=dataset, parameters=train_parameters) + results = self._run_task(stage_module, mode="train", dataset=dataset, parameters=train_parameters) # Check for stop signal when training has stopped. # If should_stop is true, training was cancelled and no new if self._should_stop: - logger.info('Training cancelled.') + logger.info("Training cancelled.") self._should_stop = False self._is_training = False return # get output model - model_ckpt = results.get('final_ckpt') + model_ckpt = results.get("final_ckpt") if model_ckpt is None: - logger.error('cannot find final checkpoint from the results.') + logger.error("cannot find final checkpoint from the results.") # output_model.model_status = ModelStatus.FAILED return else: @@ -369,19 +394,20 @@ def train(self, # Get training metrics group from learning curves training_metrics, best_score = self._generate_training_metrics_group(self._learning_curves) - performance = Performance(score=ScoreMetric(value=best_score, name=self.metric), - dashboard_metrics=training_metrics) + performance = Performance( + score=ScoreMetric(value=best_score, name=self.metric), dashboard_metrics=training_metrics + ) - logger.info(f'Final model performance: {str(performance)}') + logger.info(f"Final model performance: {str(performance)}") # save resulting model self.save_model(output_model) output_model.performance = performance # output_model.model_status = ModelStatus.SUCCESS self._is_training = False - logger.info('train done.') + logger.info("train done.") def _init_train_data_cfg(self, dataset: DatasetEntity): - logger.info('init data cfg.') + logger.info("init data cfg.") data_cfg = ConfigDict( data=ConfigDict( train=ConfigDict( @@ -399,7 +425,7 @@ def _init_train_data_cfg(self, dataset: DatasetEntity): # Temparory remedy for cfg.pretty_text error for label in self._labels: - label.hotkey = 'a' + label.hotkey = "a" return data_cfg def _generate_training_metrics_group(self, learning_curves) -> Optional[List[MetricsGroup]]: @@ -409,16 +435,16 @@ def _generate_training_metrics_group(self, learning_curves) -> Optional[List[Met """ output: List[MetricsGroup] = [] # Model architecture - architecture = InfoMetric(name='Model architecture', value=self._model_name) - visualization_info_architecture = VisualizationInfo(name="Model architecture", - visualisation_type=VisualizationType.TEXT) - output.append(MetricsGroup(metrics=[architecture], - visualization_info=visualization_info_architecture)) + architecture = InfoMetric(name="Model architecture", value=self._model_name) + visualization_info_architecture = VisualizationInfo( + name="Model architecture", visualisation_type=VisualizationType.TEXT + ) + output.append(MetricsGroup(metrics=[architecture], visualization_info=visualization_info_architecture)) # Learning curves best_score = -1 for key, curve in learning_curves.items(): metric_curve = CurveMetric(xs=curve.x, ys=curve.y, name=key) - if key == f'val/{self.metric}': + if key == f"val/{self.metric}": best_score = max(curve.y) visualization_info = LineChartInfo(name=key, x_axis_label="Epoch", y_axis_label=key) output.append(MetricsGroup(metrics=[metric_curve], visualization_info=visualization_info)) @@ -426,19 +452,17 @@ def _generate_training_metrics_group(self, learning_curves) -> Optional[List[Met class SegmentationNNCFTask(OTESegmentationNNCFTask): - @check_input_parameters_type() def __init__(self, task_environment: TaskEnvironment): - """" + """ " Task for compressing segmentation models using NNCF. """ curr_model_path = task_environment.model_template.model_template_path base_model_path = os.path.join( - os.path.dirname(os.path.abspath(curr_model_path)), - task_environment.model_template.base_model_path + os.path.dirname(os.path.abspath(curr_model_path)), task_environment.model_template.base_model_path ) if os.path.isfile(base_model_path): - logger.info(f'Base model for NNCF: {base_model_path}') + logger.info(f"Base model for NNCF: {base_model_path}") # Redirect to base model task_environment.model_template = parse_model_template(base_model_path) super().__init__(task_environment) diff --git a/external/model-preparation-algorithm/mpa_tasks/apis/task.py b/external/model-preparation-algorithm/mpa_tasks/apis/task.py index 37547275baa..62a3b28e3be 100644 --- a/external/model-preparation-algorithm/mpa_tasks/apis/task.py +++ b/external/model-preparation-algorithm/mpa_tasks/apis/task.py @@ -8,25 +8,31 @@ import shutil import tempfile from typing import Union + import numpy as np import torch from mmcv.utils.config import Config, ConfigDict from mpa.builder import build from mpa.modules.hooks.cancel_interface_hook import CancelInterfaceHook from mpa.stage import Stage -from mpa.utils.config_utils import update_or_add_custom_hook +from mpa.utils.config_utils import remove_custom_hook, update_or_add_custom_hook from mpa.utils.logger import get_logger from ote_sdk.entities.datasets import DatasetEntity from ote_sdk.entities.model import ModelEntity, ModelPrecision from ote_sdk.entities.task_environment import TaskEnvironment from ote_sdk.serialization.label_mapper import LabelSchemaMapper - logger = get_logger() DEFAULT_META_KEYS = ( - 'filename', 'ori_filename', 'ori_shape', - 'img_shape', 'pad_shape', 'scale_factor', - 'flip', 'flip_direction', 'img_norm_cfg' + "filename", + "ori_filename", + "ori_shape", + "img_shape", + "pad_shape", + "scale_factor", + "flip", + "flip_direction", + "img_norm_cfg", ) @@ -38,8 +44,8 @@ def __init__(self, task_config, task_environment: TaskEnvironment): self._model_name = task_environment.model_template.name self._task_type = task_environment.model_template.task_type self._labels = task_environment.get_labels(include_empty=False) - self._output_path = tempfile.mkdtemp(prefix='MPA-task-') - logger.info(f'created output path at {self._output_path}') + self._output_path = tempfile.mkdtemp(prefix="MPA-task-") + logger.info(f"created output path at {self._output_path}") self.confidence_threshold = self._get_confidence_threshold(self._hyperparams) # Set default model attributes. self._model_label_schema = [] @@ -47,10 +53,10 @@ def __init__(self, task_config, task_environment: TaskEnvironment): self._model_ckpt = None self._anchors = {} if task_environment.model is not None: - logger.info('loading the model from the task env.') + logger.info("loading the model from the task env.") state_dict = self._load_model_state_dict(self._task_environment.model) if state_dict: - self._model_ckpt = os.path.join(self._output_path, 'env_model_ckpt.pth') + self._model_ckpt = os.path.join(self._output_path, "env_model_ckpt.pth") if os.path.exists(self._model_ckpt): os.remove(self._model_ckpt) torch.save(state_dict, self._model_ckpt) @@ -75,22 +81,24 @@ def __init__(self, task_config, task_environment: TaskEnvironment): self.override_configs = {} def _run_task(self, stage_module, mode=None, dataset=None, parameters=None, **kwargs): - self._initialize(dataset) + # FIXME: Temporary remedy for CVS-88098 + export = kwargs.get("export", False) + self._initialize(dataset, export=export) # update model config -> model label schema data_classes = [label.name for label in self._labels] model_classes = [label.name for label in self._model_label_schema] - self._model_cfg['model_classes'] = model_classes + self._model_cfg["model_classes"] = model_classes if dataset is not None: train_data_cfg = Stage.get_train_data_cfg(self._data_cfg) - train_data_cfg['data_classes'] = data_classes + train_data_cfg["data_classes"] = data_classes new_classes = np.setdiff1d(data_classes, model_classes).tolist() - train_data_cfg['new_classes'] = new_classes + train_data_cfg["new_classes"] = new_classes - logger.info(f'running task... kwargs = {kwargs}') + logger.info(f"running task... kwargs = {kwargs}") if self._recipe_cfg is None: raise RuntimeError( - "'recipe_cfg' is not initialized yet." - "call prepare() method before calling this method") + "'recipe_cfg' is not initialized yet." "call prepare() method before calling this method" + ) if mode is not None: self._mode = mode @@ -107,14 +115,14 @@ def _run_task(self, stage_module, mode=None, dataset=None, parameters=None, **kw ir_path=None, model_ckpt=self._model_ckpt, mode=self._mode, - **kwargs + **kwargs, ) - logger.info('run task done.') + logger.info("run task done.") return output def finalize(self): if self._recipe_cfg is not None: - if self._recipe_cfg.get('cleanup_outputs', False): + if self._recipe_cfg.get("cleanup_outputs", False): if os.path.exists(self._output_path): shutil.rmtree(self._output_path, ignore_errors=False) @@ -148,14 +156,16 @@ def template_file_path(self): def hyperparams(self): return self._hyperparams - def _initialize(self, dataset=None, output_model=None): - """ prepare configurations to run a task through MPA's stage - """ - logger.info('initializing....') + def _initialize(self, dataset=None, output_model=None, export=False): + """prepare configurations to run a task through MPA's stage""" + logger.info("initializing....") self._init_recipe() - recipe_hparams = self._init_recipe_hparam() - if len(recipe_hparams) > 0: - self._recipe_cfg.merge_from_dict(recipe_hparams) + + if not export: + recipe_hparams = self._init_recipe_hparam() + if len(recipe_hparams) > 0: + self._recipe_cfg.merge_from_dict(recipe_hparams) + if "custom_hooks" in self.override_configs: override_custom_hooks = self.override_configs.pop("custom_hooks") for override_custom_hook in override_custom_hooks: @@ -170,33 +180,66 @@ def _initialize(self, dataset=None, output_model=None): # Remove FP16 config if running on CPU device and revert to FP32 # https://github.com/pytorch/pytorch/issues/23377 - if not torch.cuda.is_available() and 'fp16' in self._model_cfg: - logger.info('Revert FP16 to FP32 on CPU device') + if not torch.cuda.is_available() and "fp16" in self._model_cfg: + logger.info("Revert FP16 to FP32 on CPU device") if isinstance(self._model_cfg, Config): - del self._model_cfg._cfg_dict['fp16'] + del self._model_cfg._cfg_dict["fp16"] elif isinstance(self._model_cfg, ConfigDict): - del self._model_cfg['fp16'] + del self._model_cfg["fp16"] self._precision = [ModelPrecision.FP32] + # Add/remove adaptive interval hook + if self._recipe_cfg.get("use_adaptive_interval", False): + self._recipe_cfg.adaptive_validation_interval = self._recipe_cfg.get( + "adaptive_validation_interval", dict(max_interval=5) + ) + else: + self._recipe_cfg.pop("adaptive_validation_interval", None) + + # Add/remove early stop hook + if "early_stop" in self._recipe_cfg: + remove_custom_hook(self._recipe_cfg, "EarlyStoppingHook") + early_stop = self._recipe_cfg.get("early_stop", False) + if early_stop: + early_stop_hook = ConfigDict( + type="LazyEarlyStoppingHook", + start=early_stop.start, + patience=early_stop.patience, + iteration_patience=early_stop.iteration_patience, + interval=1, + metric=self._recipe_cfg.early_stop_metric, + priority=75, + ) + update_or_add_custom_hook(self._recipe_cfg, early_stop_hook) + else: + remove_custom_hook(self._recipe_cfg, "LazyEarlyStoppingHook") + # add Cancel tranining hook - update_or_add_custom_hook(self._recipe_cfg, ConfigDict( - type='CancelInterfaceHook', init_callback=self.on_hook_initialized)) + update_or_add_custom_hook( + self._recipe_cfg, + ConfigDict(type="CancelInterfaceHook", init_callback=self.on_hook_initialized), + ) if self._time_monitor is not None: - update_or_add_custom_hook(self._recipe_cfg, ConfigDict( - type='OTEProgressHook', time_monitor=self._time_monitor, verbose=True, priority=71)) - if self._learning_curves is not None: - self._recipe_cfg.log_config.hooks.append( - {'type': 'OTELoggerHook', 'curves': self._learning_curves} + update_or_add_custom_hook( + self._recipe_cfg, + ConfigDict( + type="OTEProgressHook", + time_monitor=self._time_monitor, + verbose=True, + priority=71, + ), ) + if self._learning_curves is not None: + self._recipe_cfg.log_config.hooks.append({"type": "OTELoggerHook", "curves": self._learning_curves}) - logger.info('initialized.') + logger.info("initialized.") @abc.abstractmethod def _init_recipe(self): """ initialize the MPA's target recipe. (inclusive of stage type) """ - raise NotImplementedError('this method should be implemented') + raise NotImplementedError("this method should be implemented") def _init_model_cfg(self) -> Union[Config, None]: """ @@ -226,17 +269,17 @@ def _init_recipe_hparam(self) -> dict: return dict() def _load_model_state_dict(self, model: ModelEntity): - if 'weights.pth' in model.model_adapters: + if "weights.pth" in model.model_adapters: # If a model has been trained and saved for the task already, create empty model and load weights here buffer = io.BytesIO(model.get_data("weights.pth")) - model_data = torch.load(buffer, map_location=torch.device('cpu')) + model_data = torch.load(buffer, map_location=torch.device("cpu")) # set confidence_threshold as well - self.confidence_threshold = model_data.get('confidence_threshold', self.confidence_threshold) - if model_data.get('anchors'): - self._anchors = model_data['anchors'] + self.confidence_threshold = model_data.get("confidence_threshold", self.confidence_threshold) + if model_data.get("anchors"): + self._anchors = model_data["anchors"] - return model_data.get('model', model_data.get('state_dict', None)) + return model_data.get("model", model_data.get("state_dict", None)) else: return None @@ -244,7 +287,8 @@ def _load_model_label_schema(self, model: ModelEntity): # If a model has been trained and saved for the task already, create empty model and load weights here if "label_schema.json" in model.model_adapters: import json - buffer = json.loads(model.get_data("label_schema.json").decode('utf-8')) + + buffer = json.loads(model.get_data("label_schema.json").decode("utf-8")) model_label_schema = LabelSchemaMapper().backward(buffer) return model_label_schema.get_labels(include_empty=False) else: @@ -252,20 +296,20 @@ def _load_model_label_schema(self, model: ModelEntity): @staticmethod def _get_meta_keys(pipeline_step): - meta_keys = list(pipeline_step.get('meta_keys', DEFAULT_META_KEYS)) - meta_keys.append('ignored_labels') - pipeline_step['meta_keys'] = set(meta_keys) + meta_keys = list(pipeline_step.get("meta_keys", DEFAULT_META_KEYS)) + meta_keys.append("ignored_labels") + pipeline_step["meta_keys"] = set(meta_keys) return pipeline_step @staticmethod def _get_confidence_threshold(hyperparams): confidence_threshold = 0.3 - if hasattr(hyperparams, 'postprocessing') and hasattr(hyperparams.postprocessing, 'confidence_threshold'): + if hasattr(hyperparams, "postprocessing") and hasattr(hyperparams.postprocessing, "confidence_threshold"): confidence_threshold = hyperparams.postprocessing.confidence_threshold return confidence_threshold def cancel_hook_initialized(self, cancel_interface: CancelInterfaceHook): - logger.info('cancel hook is initialized') + logger.info("cancel hook is initialized") self.cancel_interface = cancel_interface if self.reserved_cancel: self.cancel_interface.cancel() diff --git a/external/model-preparation-algorithm/mpa_tasks/extensions/datasets/mpa_cls_dataset.py b/external/model-preparation-algorithm/mpa_tasks/extensions/datasets/mpa_cls_dataset.py index 411eff85ffb..6cb11e17058 100644 --- a/external/model-preparation-algorithm/mpa_tasks/extensions/datasets/mpa_cls_dataset.py +++ b/external/model-preparation-algorithm/mpa_tasks/extensions/datasets/mpa_cls_dataset.py @@ -3,21 +3,20 @@ # import numpy as np -from sklearn.metrics import confusion_matrix as sklearn_confusion_matrix -from mmcv.utils.registry import build_from_cfg from mmcls.core import average_performance, mAP +from mmcls.datasets.base_dataset import BaseDataset from mmcls.datasets.builder import DATASETS, PIPELINES from mmcls.datasets.pipelines import Compose -from mmcls.datasets.base_dataset import BaseDataset -from mpa_tasks.utils.data_utils import get_cls_img_indices, get_old_new_img_indices +from mmcv.utils.registry import build_from_cfg from mpa.utils.logger import get_logger +from mpa_tasks.utils.data_utils import get_cls_img_indices, get_old_new_img_indices +from sklearn.metrics import confusion_matrix as sklearn_confusion_matrix logger = get_logger() @DATASETS.register_module() class MPAClsDataset(BaseDataset): - def __init__(self, ote_dataset=None, labels=None, empty_label=None, **kwargs): self.ote_dataset = ote_dataset self.labels = labels @@ -27,24 +26,24 @@ def __init__(self, ote_dataset=None, labels=None, empty_label=None, **kwargs): self.CLASSES = list(label.name for label in labels) self.gt_labels = [] - pipeline = kwargs['pipeline'] + pipeline = kwargs["pipeline"] self.num_classes = len(self.CLASSES) - test_mode = kwargs.get('test_mode', False) + test_mode = kwargs.get("test_mode", False) if test_mode is False: - new_classes = kwargs.pop('new_classes', []) + new_classes = kwargs.pop("new_classes", []) self.img_indices = self.get_indices(new_classes) if isinstance(pipeline, dict): self.pipeline = {} for k, v in pipeline.items(): - _pipeline = [dict(type='LoadImageFromOTEDataset'), *v] + _pipeline = [dict(type="LoadImageFromOTEDataset"), *v] _pipeline = [build_from_cfg(p, PIPELINES) for p in _pipeline] self.pipeline[k] = Compose(_pipeline) self.num_pipes = len(pipeline) elif isinstance(pipeline, list): self.num_pipes = 1 - _pipeline = [dict(type='LoadImageFromOTEDataset'), *pipeline] + _pipeline = [dict(type="LoadImageFromOTEDataset"), *pipeline] self.pipeline = Compose([build_from_cfg(p, PIPELINES) for p in _pipeline]) self.load_annotations() @@ -75,8 +74,14 @@ def __getitem__(self, index): height, width = item.height, item.width - data_info = dict(dataset_item=item, width=width, height=height, index=index, - gt_label=self.gt_labels[index], ignored_labels=ignored_labels) + data_info = dict( + dataset_item=item, + width=width, + height=height, + index=index, + gt_label=self.gt_labels[index], + ignored_labels=ignored_labels, + ) if self.pipeline is None: return data_info @@ -95,11 +100,7 @@ def get_gt_labels(self): def __len__(self): return len(self.ote_dataset) - def evaluate(self, - results, - metric='accuracy', - metric_options=None, - logger=None): + def evaluate(self, results, metric="accuracy", metric_options=None, logger=None): """Evaluate the dataset with new metric 'class_accuracy' Args: @@ -117,15 +118,15 @@ def evaluate(self, """ if metric_options is None: - metric_options = {'topk': (1, 5) if self.num_classes >= 5 else (1, )} + metric_options = {"topk": (1, 5) if self.num_classes >= 5 else (1,)} if isinstance(metric, str): metrics = [metric] else: metrics = metric - if 'class_accuracy' in metrics: - metrics.remove('class_accuracy') + if "class_accuracy" in metrics: + metrics.remove("class_accuracy") self.class_acc = True eval_results = super().evaluate(results, metrics, metric_options, logger) @@ -135,10 +136,14 @@ def evaluate(self, results = np.vstack(results) gt_labels = self.get_gt_labels() accuracies = self.class_accuracy(results, gt_labels) - eval_results.update({f'{c} accuracy': a for c, a in zip(self.CLASSES, accuracies)}) - eval_results.update({'mean accuracy': np.mean(accuracies)}) - eval_results['accuracy'] = eval_results['accuracy_top-1'] + if any(np.isnan(accuracies)): + accuracies = np.nan_to_num(accuracies) + + eval_results.update({f"{c} accuracy": a for c, a in zip(self.CLASSES, accuracies)}) + eval_results.update({"mean accuracy": np.mean(accuracies)}) + + eval_results["accuracy"] = eval_results["accuracy_top-1"] return eval_results def class_accuracy(self, results, gt_labels): @@ -178,12 +183,7 @@ def load_annotations(self): self.gt_labels.append(onehot_indices) self.gt_labels = np.array(self.gt_labels) - def evaluate(self, - results, - metric='mAP', - metric_options=None, - indices=None, - logger=None): + def evaluate(self, results, metric="mAP", metric_options=None, indices=None, logger=None): """Evaluate the dataset. Args: results (list): Testing results of the dataset. @@ -198,34 +198,33 @@ def evaluate(self, dict: evaluation results """ if metric_options is None or metric_options == {}: - metric_options = {'thr': 0.5} + metric_options = {"thr": 0.5} if isinstance(metric, str): metrics = [metric] else: metrics = metric - allowed_metrics = ['accuracy-mlc', 'mAP', 'CP', 'CR', 'CF1', 'OP', 'OR', 'OF1'] + allowed_metrics = ["accuracy-mlc", "mAP", "CP", "CR", "CF1", "OP", "OR", "OF1"] eval_results = {} results = np.vstack(results) gt_labels = self.get_gt_labels() if indices is not None: gt_labels = gt_labels[indices] num_imgs = len(results) - assert len(gt_labels) == num_imgs, 'dataset testing results should '\ - 'be of the same length as gt_labels.' + assert len(gt_labels) == num_imgs, "dataset testing results should " "be of the same length as gt_labels." invalid_metrics = set(metrics) - set(allowed_metrics) if len(invalid_metrics) != 0: - raise ValueError(f'metric {invalid_metrics} is not supported.') + raise ValueError(f"metric {invalid_metrics} is not supported.") - if 'accuracy-mlc' in metrics: + if "accuracy-mlc" in metrics: true_label_idx = [] pred_label_idx = [] - pos_thr = metric_options.get('thr', 0.5) + pos_thr = metric_options.get("thr", 0.5) - true_label = (gt_labels == 1) - pred_label = (results > pos_thr) - cls_index = [i+1 for i in range(len(self.labels))] + true_label = gt_labels == 1 + pred_label = results > pos_thr + cls_index = [i + 1 for i in range(len(self.labels))] for true_lbl, pred_lbl in zip(true_label, pred_label): true_lbl_idx = set(true_lbl * cls_index) - set([0]) # except empty pred_lbl_idx = set(pred_lbl * cls_index) - set([0]) @@ -234,42 +233,35 @@ def evaluate(self, confusion_matrices = [] for cls_idx in cls_index: - group_labels_idx = set([cls_idx-1]) - y_true = [int(not group_labels_idx.issubset(true_labels)) - for true_labels in true_label_idx] - y_pred = [int(not group_labels_idx.issubset(pred_labels)) - for pred_labels in pred_label_idx] + group_labels_idx = set([cls_idx - 1]) + y_true = [int(not group_labels_idx.issubset(true_labels)) for true_labels in true_label_idx] + y_pred = [int(not group_labels_idx.issubset(pred_labels)) for pred_labels in pred_label_idx] matrix_data = sklearn_confusion_matrix(y_true, y_pred, labels=list(range(len([0, 1])))) confusion_matrices.append(matrix_data) - correct_per_label_group = [ - np.trace(mat) for mat in confusion_matrices - ] - total_per_label_group = [ - np.sum(mat) for mat in confusion_matrices - ] + correct_per_label_group = [np.trace(mat) for mat in confusion_matrices] + total_per_label_group = [np.sum(mat) for mat in confusion_matrices] acc = np.sum(correct_per_label_group) / np.sum(total_per_label_group) # MICRO average - eval_results['accuracy-mlc'] = acc + eval_results["accuracy-mlc"] = acc - if 'mAP' in metrics: + if "mAP" in metrics: mAP_value = mAP(results, gt_labels) - eval_results['mAP'] = mAP_value - if len(set(metrics) - {'mAP'}) != 0: - performance_keys = ['CP', 'CR', 'CF1', 'OP', 'OR', 'OF1'] - performance_values = average_performance(results, gt_labels, - **metric_options) + eval_results["mAP"] = mAP_value + if len(set(metrics) - {"mAP"}) != 0: + performance_keys = ["CP", "CR", "CF1", "OP", "OR", "OF1"] + performance_values = average_performance(results, gt_labels, **metric_options) for k, v in zip(performance_keys, performance_values): if k in metrics: eval_results[k] = v - eval_results['accuracy'] = mAP_value + eval_results["accuracy"] = mAP_value return eval_results @DATASETS.register_module() class MPAHierarchicalClsDataset(MPAMultilabelClsDataset): def __init__(self, **kwargs): - self.hierarchical_info = kwargs.pop('hierarchical_info', None) + self.hierarchical_info = kwargs.pop("hierarchical_info", None) super().__init__(**kwargs) def load_annotations(self): @@ -279,14 +271,15 @@ def load_annotations(self): item_labels = self.ote_dataset[i].get_roi_labels(self.labels, include_empty=include_empty) ignored_labels = self.ote_dataset[i].ignored_labels if item_labels: - num_cls_heads = self.hierarchical_info['num_multiclass_heads'] + num_cls_heads = self.hierarchical_info["num_multiclass_heads"] - class_indices = [0]*(self.hierarchical_info['num_multiclass_heads'] + - self.hierarchical_info['num_multilabel_classes']) + class_indices = [0] * ( + self.hierarchical_info["num_multiclass_heads"] + self.hierarchical_info["num_multilabel_classes"] + ) for j in range(num_cls_heads): class_indices[j] = -1 for ote_lbl in item_labels: - group_idx, in_group_idx = self.hierarchical_info['class_to_group_idx'][ote_lbl.name] + group_idx, in_group_idx = self.hierarchical_info["class_to_group_idx"][ote_lbl.name] if group_idx < num_cls_heads: class_indices[group_idx] = in_group_idx elif ote_lbl not in ignored_labels: @@ -294,8 +287,9 @@ def load_annotations(self): else: class_indices[num_cls_heads + in_group_idx] = -1 else: # this supposed to happen only on inference stage or if we have a negative in multilabel data - class_indices = [-1]*(self.hierarchical_info['num_multiclass_heads'] + - self.hierarchical_info['num_multilabel_classes']) + class_indices = [-1] * ( + self.hierarchical_info["num_multiclass_heads"] + self.hierarchical_info["num_multilabel_classes"] + ) self.gt_labels.append(class_indices) self.gt_labels = np.array(self.gt_labels) @@ -318,12 +312,7 @@ def mean_top_k_accuracy(scores, labels, k=1): return np.mean(accuracy_values) * 100 if len(accuracy_values) > 0 else 1.0 - def evaluate(self, - results, - metric='MHAcc', - metric_options=None, - indices=None, - logger=None): + def evaluate(self, results, metric="MHAcc", metric_options=None, indices=None, logger=None): """Evaluate the dataset. Args: results (list): Testing results of the dataset. @@ -338,50 +327,54 @@ def evaluate(self, dict: evaluation results """ if metric_options is None or metric_options == {}: - metric_options = {'thr': 0.5} + metric_options = {"thr": 0.5} if isinstance(metric, str): metrics = [metric] else: metrics = metric - allowed_metrics = ['MHAcc', 'avgClsAcc', 'mAP'] + allowed_metrics = ["MHAcc", "avgClsAcc", "mAP"] eval_results = {} results = np.vstack(results) gt_labels = self.get_gt_labels() if indices is not None: gt_labels = gt_labels[indices] num_imgs = len(results) - assert len(gt_labels) == num_imgs, 'dataset testing results should '\ - 'be of the same length as gt_labels.' + assert len(gt_labels) == num_imgs, "dataset testing results should " "be of the same length as gt_labels." invalid_metrics = set(metrics) - set(allowed_metrics) if len(invalid_metrics) != 0: - raise ValueError(f'metric {invalid_metrics} is not supported.') - - total_acc = 0. - total_acc_sl = 0. - for i in range(self.hierarchical_info['num_multiclass_heads']): - multiclass_logit = results[:, self.hierarchical_info['head_idx_to_logits_range'][i][0]: - self.hierarchical_info['head_idx_to_logits_range'][i][1]] # noqa: E127 + raise ValueError(f"metric {invalid_metrics} is not supported.") + + total_acc = 0.0 + total_acc_sl = 0.0 + for i in range(self.hierarchical_info["num_multiclass_heads"]): + multiclass_logit = results[ + :, + self.hierarchical_info["head_idx_to_logits_range"][i][0] : self.hierarchical_info[ + "head_idx_to_logits_range" + ][i][1], + ] # noqa: E127 multiclass_gt = gt_labels[:, i] cls_acc = self.mean_top_k_accuracy(multiclass_logit, multiclass_gt, k=1) total_acc += cls_acc total_acc_sl += cls_acc - mAP_value = 0. - if self.hierarchical_info['num_multilabel_classes'] and 'mAP' in metrics: - multilabel_logits = results[:, self.hierarchical_info['num_single_label_classes']:] - multilabel_gt = gt_labels[:, self.hierarchical_info['num_multiclass_heads']:] + mAP_value = 0.0 + if self.hierarchical_info["num_multilabel_classes"] and "mAP" in metrics: + multilabel_logits = results[:, self.hierarchical_info["num_single_label_classes"] :] + multilabel_gt = gt_labels[:, self.hierarchical_info["num_multiclass_heads"] :] mAP_value = mAP(multilabel_logits, multilabel_gt) total_acc += mAP_value - total_acc /= (self.hierarchical_info['num_multiclass_heads'] + - int(self.hierarchical_info['num_multilabel_classes'] > 0)) - - eval_results['MHAcc'] = total_acc - eval_results['avgClsAcc'] = total_acc_sl / self.hierarchical_info['num_multiclass_heads'] - eval_results['mAP'] = mAP_value - eval_results['accuracy'] = total_acc + total_acc /= self.hierarchical_info["num_multiclass_heads"] + int( + self.hierarchical_info["num_multilabel_classes"] > 0 + ) + + eval_results["MHAcc"] = total_acc + eval_results["avgClsAcc"] = total_acc_sl / self.hierarchical_info["num_multiclass_heads"] + eval_results["mAP"] = mAP_value + eval_results["accuracy"] = total_acc return eval_results diff --git a/external/model-preparation-algorithm/mpa_tasks/extensions/datasets/mpa_det_dataset.py b/external/model-preparation-algorithm/mpa_tasks/extensions/datasets/mpa_det_dataset.py index 9935918b7fa..75614b6010b 100644 --- a/external/model-preparation-algorithm/mpa_tasks/extensions/datasets/mpa_det_dataset.py +++ b/external/model-preparation-algorithm/mpa_tasks/extensions/datasets/mpa_det_dataset.py @@ -2,8 +2,8 @@ # SPDX-License-Identifier: Apache-2.0 # -from mmdet.datasets.builder import DATASETS from detection_tasks.extension.datasets import OTEDataset +from mmdet.datasets.builder import DATASETS from mpa.utils.logger import get_logger from mpa_tasks.utils.data_utils import get_old_new_img_indices @@ -14,11 +14,11 @@ class MPADetDataset(OTEDataset): def __init__(self, **kwargs): dataset_cfg = kwargs.copy() - _ = dataset_cfg.pop('org_type', None) - new_classes = dataset_cfg.pop('new_classes', []) + _ = dataset_cfg.pop("org_type", None) + new_classes = dataset_cfg.pop("new_classes", []) super().__init__(**dataset_cfg) - test_mode = kwargs.get('test_mode', False) + test_mode = kwargs.get("test_mode", False) if test_mode is False: self.img_indices = get_old_new_img_indices(self.labels, new_classes, self.ote_dataset) @@ -32,4 +32,4 @@ def get_cat_ids(self, idx): list[int]: All categories in the image of specified index. """ - return self.get_ann_info(idx)['labels'].astype(int).tolist() + return self.get_ann_info(idx)["labels"].astype(int).tolist() diff --git a/external/model-preparation-algorithm/mpa_tasks/extensions/datasets/mpa_seg_dataset.py b/external/model-preparation-algorithm/mpa_tasks/extensions/datasets/mpa_seg_dataset.py index 6433f41a24c..4efad03eec1 100644 --- a/external/model-preparation-algorithm/mpa_tasks/extensions/datasets/mpa_seg_dataset.py +++ b/external/model-preparation-algorithm/mpa_tasks/extensions/datasets/mpa_seg_dataset.py @@ -3,9 +3,9 @@ # from mmseg.datasets.builder import DATASETS -from segmentation_tasks.extension.datasets import OTEDataset from mpa.utils.logger import get_logger from mpa_tasks.utils.data_utils import get_old_new_img_indices +from segmentation_tasks.extension.datasets import OTEDataset logger = get_logger() @@ -14,10 +14,10 @@ class MPASegIncrDataset(OTEDataset): def __init__(self, **kwargs): pipeline = [] - test_mode = kwargs.get('test_mode', False) - logger.info(f'test_mode : {test_mode}') - if 'dataset' in kwargs: - dataset = kwargs['dataset'] + test_mode = kwargs.get("test_mode", False) + logger.info(f"test_mode : {test_mode}") + if "dataset" in kwargs: + dataset = kwargs["dataset"] ote_dataset = dataset.ote_dataset pipeline = dataset.pipeline classes = dataset.labels @@ -25,15 +25,15 @@ def __init__(self, **kwargs): new_classes = dataset.new_classes self.img_indices = get_old_new_img_indices(classes, new_classes, ote_dataset) else: - ote_dataset = kwargs['ote_dataset'] - pipeline = kwargs['pipeline'] - classes = kwargs['labels'] + ote_dataset = kwargs["ote_dataset"] + pipeline = kwargs["pipeline"] + classes = kwargs["labels"] for action in pipeline: - if 'domain' in action: - action.pop('domain') + if "domain" in action: + action.pop("domain") classes = [c.name for c in classes] - classes = ['background'] + classes + classes = ["background"] + classes super().__init__(ote_dataset=ote_dataset, pipeline=pipeline, classes=classes) if self.label_map is None: self.label_map = {} diff --git a/external/model-preparation-algorithm/mpa_tasks/extensions/datasets/pipelines/mpa_cls_pipeline.py b/external/model-preparation-algorithm/mpa_tasks/extensions/datasets/pipelines/mpa_cls_pipeline.py index 71affb9b11d..70dd6613755 100644 --- a/external/model-preparation-algorithm/mpa_tasks/extensions/datasets/pipelines/mpa_cls_pipeline.py +++ b/external/model-preparation-algorithm/mpa_tasks/extensions/datasets/pipelines/mpa_cls_pipeline.py @@ -2,10 +2,10 @@ # SPDX-License-Identifier: Apache-2.0 # -from mmcls.datasets import PIPELINES -import copy +from typing import Any, Dict + import numpy as np -from typing import Dict, Any, Optional +from mmcls.datasets import PIPELINES from ote_sdk.utils.argument_checks import check_input_parameters_type @@ -30,29 +30,30 @@ def __init__(self, to_float32: bool = False): @check_input_parameters_type() def __call__(self, results: Dict[str, Any]): - dataset_item = results['dataset_item'] + dataset_item = results["dataset_item"] img = dataset_item.numpy shape = img.shape - assert img.shape[0] == results['height'], f"{img.shape[0]} != {results['height']}" - assert img.shape[1] == results['width'], f"{img.shape[1]} != {results['width']}" + assert img.shape[0] == results["height"], f"{img.shape[0]} != {results['height']}" + assert img.shape[1] == results["width"], f"{img.shape[1]} != {results['width']}" filename = f"Dataset item index {results['index']}" - results['filename'] = filename - results['ori_filename'] = filename - results['img'] = img - results['img_shape'] = shape - results['ori_shape'] = shape + results["filename"] = filename + results["ori_filename"] = filename + results["img"] = img + results["img_shape"] = shape + results["ori_shape"] = shape # Set initial values for default meta_keys - results['pad_shape'] = shape + results["pad_shape"] = shape num_channels = 1 if len(shape) < 3 else shape[2] - results['img_norm_cfg'] = dict( + results["img_norm_cfg"] = dict( mean=np.zeros(num_channels, dtype=np.float32), std=np.ones(num_channels, dtype=np.float32), - to_rgb=False) - results['img_fields'] = ['img'] + to_rgb=False, + ) + results["img_fields"] = ["img"] if self.to_float32: - results['img'] = results['img'].astype(np.float32) + results["img"] = results["img"].astype(np.float32) return results diff --git a/external/model-preparation-algorithm/mpa_tasks/samples/cls_cls_il_sample.py b/external/model-preparation-algorithm/mpa_tasks/samples/cls_cls_il_sample.py index 1542f63dfa8..a8c6c707d00 100644 --- a/external/model-preparation-algorithm/mpa_tasks/samples/cls_cls_il_sample.py +++ b/external/model-preparation-algorithm/mpa_tasks/samples/cls_cls_il_sample.py @@ -3,15 +3,22 @@ # import argparse +import random import sys import numpy as np +import torch from mmcv.utils import get_logger from ote_sdk.configuration.helper import create from ote_sdk.entities.datasets import DatasetEntity from ote_sdk.entities.inference_parameters import InferenceParameters from ote_sdk.entities.label import Domain -from ote_sdk.entities.label_schema import LabelEntity, LabelGroup, LabelGroupType, LabelSchemaEntity +from ote_sdk.entities.label_schema import ( + LabelEntity, + LabelGroup, + LabelGroupType, + LabelSchemaEntity, +) from ote_sdk.entities.model import ModelEntity from ote_sdk.entities.model_template import parse_model_template from ote_sdk.entities.optimization_parameters import OptimizationParameters @@ -21,8 +28,7 @@ from ote_sdk.usecases.tasks.interfaces.export_interface import ExportType from ote_sdk.usecases.tasks.interfaces.optimization_interface import OptimizationType from torchreid_tasks.utils import get_task_class -import random -import torch + seed = 5 random.seed(seed) np.random.seed(seed) @@ -31,75 +37,88 @@ torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False -logger = get_logger(name='sample') +logger = get_logger(name="sample") -parser = argparse.ArgumentParser(description='Sample showcasing the new API') -parser.add_argument('template_file_path', help='path to template file') -parser.add_argument('--export', action='store_true') -parser.add_argument('--multilabel', action='store_true') -parser.add_argument('--hierarchical', action='store_true') +parser = argparse.ArgumentParser(description="Sample showcasing the new API") +parser.add_argument("template_file_path", help="path to template file") +parser.add_argument("--export", action="store_true") +parser.add_argument("--multilabel", action="store_true") +parser.add_argument("--hierarchical", action="store_true") args = parser.parse_args() def load_test_dataset(data_type): - from ote_sdk.entities.annotation import Annotation, AnnotationSceneEntity, AnnotationSceneKind + import PIL + from ote_sdk.entities.annotation import ( + Annotation, + AnnotationSceneEntity, + AnnotationSceneKind, + ) from ote_sdk.entities.dataset_item import DatasetItemEntity from ote_sdk.entities.image import Image from ote_sdk.entities.scored_label import ScoredLabel from ote_sdk.entities.shapes.rectangle import Rectangle from ote_sdk.entities.subset import Subset - import PIL def gen_image(resolution, shape=None): - image = PIL.Image.new('RGB', resolution, (255, 255, 255)) + image = PIL.Image.new("RGB", resolution, (255, 255, 255)) draw = PIL.ImageDraw.Draw(image) h, w = image.size - shape = shape.split('+') if '+' in shape else [shape] + shape = shape.split("+") if "+" in shape else [shape] for s in shape: - if s == 'rectangle': - draw.rectangle((h*0.1, w*0.1, h*0.4, w*0.4), fill=(0, 192, 192), outline=(0, 0, 0)) - if s == 'triangle': - draw.polygon(((h*0.5, w*0.25), (h, w*0.25), (h*0.8, w*0.5)), fill=(255, 255, 0), outline=(0, 0, 0)) - if s == 'pieslice': - draw.pieslice(((h*0.1, w*0.5), (h*0.5, w*0.9)), start=50, end=250, fill=(0, 255, 0), outline=(0, 0, 0)) - if s == 'circle': - draw.ellipse((h*0.5, w*0.5, h*0.9, w*0.9), fill='blue', outline='blue') - if s == 'text': - draw.text((0, 0), "Intel", fill='blue', align='center') + if s == "rectangle": + draw.rectangle((h * 0.1, w * 0.1, h * 0.4, w * 0.4), fill=(0, 192, 192), outline=(0, 0, 0)) + if s == "triangle": + draw.polygon( + ((h * 0.5, w * 0.25), (h, w * 0.25), (h * 0.8, w * 0.5)), fill=(255, 255, 0), outline=(0, 0, 0) + ) + if s == "pieslice": + draw.pieslice( + ((h * 0.1, w * 0.5), (h * 0.5, w * 0.9)), start=50, end=250, fill=(0, 255, 0), outline=(0, 0, 0) + ) + if s == "circle": + draw.ellipse((h * 0.5, w * 0.5, h * 0.9, w * 0.9), fill="blue", outline="blue") + if s == "text": + draw.text((0, 0), "Intel", fill="blue", align="center") return np.array(image), shape datas = [ - gen_image((32, 32), shape='rectangle'), - gen_image((32, 32), shape='triangle'), - gen_image((32, 32), shape='rectangle+triangle'), # for multilabel (old) - gen_image((32, 32), shape='pieslice'), - gen_image((32, 32), shape='pieslice+rectangle'), - gen_image((32, 32), shape='pieslice+triangle'), - gen_image((32, 32), shape='pieslice+rectangle+triangle'), # for multilabel (new) - gen_image((32, 32), shape='circle'), - gen_image((32, 32), shape='circle+text') # for hierarchical (new) + gen_image((32, 32), shape="rectangle"), + gen_image((32, 32), shape="triangle"), + gen_image((32, 32), shape="rectangle+triangle"), # for multilabel (old) + gen_image((32, 32), shape="pieslice"), + gen_image((32, 32), shape="pieslice+rectangle"), + gen_image((32, 32), shape="pieslice+triangle"), + gen_image((32, 32), shape="pieslice+rectangle+triangle"), # for multilabel (new) + gen_image((32, 32), shape="circle"), + gen_image((32, 32), shape="circle+text"), # for hierarchical (new) ] - labels = {'rectangle': LabelEntity(name='rectangle', domain=Domain.CLASSIFICATION, id=0), - 'triangle': LabelEntity(name='triangle', domain=Domain.CLASSIFICATION, id=1), - 'pieslice': LabelEntity(name='pieslice', domain=Domain.CLASSIFICATION, id=2), - 'circle' : LabelEntity(name='circle', domain=Domain.CLASSIFICATION, id=3), - 'text' : LabelEntity(name='text', domain=Domain.CLASSIFICATION, id=4)} + labels = { + "rectangle": LabelEntity(name="rectangle", domain=Domain.CLASSIFICATION, id=0), + "triangle": LabelEntity(name="triangle", domain=Domain.CLASSIFICATION, id=1), + "pieslice": LabelEntity(name="pieslice", domain=Domain.CLASSIFICATION, id=2), + "circle": LabelEntity(name="circle", domain=Domain.CLASSIFICATION, id=3), + "text": LabelEntity(name="text", domain=Domain.CLASSIFICATION, id=4), + } def get_image(i, subset, ignored_labels=None): image, shape = datas[i] lbl = [ScoredLabel(label=labels[s], probability=1.0) for s in shape] - return DatasetItemEntity(media=Image(data=image), - annotation_scene=AnnotationSceneEntity( - annotations=[Annotation( - Rectangle(x1=0.0, y1=0.0, x2=1.0, y2=1.0), - labels=lbl, - )], - kind=AnnotationSceneKind.ANNOTATION - ), - subset=subset, - ignored_labels=ignored_labels, - ) + return DatasetItemEntity( + media=Image(data=image), + annotation_scene=AnnotationSceneEntity( + annotations=[ + Annotation( + Rectangle(x1=0.0, y1=0.0, x2=1.0, y2=1.0), + labels=lbl, + ) + ], + kind=AnnotationSceneKind.ANNOTATION, + ), + subset=subset, + ignored_labels=ignored_labels, + ) def gen_old_new_dataset(multilabel=False, hierarchical=False): old_train, old_val, new_train, new_val = [], [], [], [] @@ -108,11 +127,11 @@ def gen_old_new_dataset(multilabel=False, hierarchical=False): if multilabel: old_img_idx = [0, 1, 2] new_img_idx = [0, 1, 2, 3, 4, 5, 6] - ignored_labels = [labels['pieslice']] + ignored_labels = [labels["pieslice"]] elif hierarchical: old_img_idx = [0, 1, 2, 3, 8] new_img_idx = [0, 1, 2, 3, 8, -1] - ignored_labels = [labels['text']] + ignored_labels = [labels["text"]] else: old_img_idx = [0, 1] new_img_idx = [0, 1, 3] @@ -131,16 +150,16 @@ def gen_old_new_dataset(multilabel=False, hierarchical=False): new_train.append(get_image(idx, Subset.TRAINING)) new_val.append(get_image(idx, Subset.VALIDATION)) - return old_train+old_val, new_train+new_val + return old_train + old_val, new_train + new_val old, new = gen_old_new_dataset(args.multilabel, args.hierarchical) if not args.hierarchical: - labels = [labels['rectangle'], labels['triangle'], labels['pieslice']] + labels = [labels["rectangle"], labels["triangle"], labels["pieslice"]] else: labels = [i for i in labels.values()] - if data_type == 'old': + if data_type == "old": return DatasetEntity(old), labels[:-1] else: return DatasetEntity(old + new), labels @@ -152,8 +171,8 @@ def get_label_schema(labels, multilabel=False, hierarchical=False): for label in labels: label_schema.add_group(LabelGroup(name=label.name, labels=[label], group_type=LabelGroupType.EXCLUSIVE)) elif hierarchical: - single_label_classes = ['pieslice', 'circle'] - multi_label_classes = ['rectangle', 'triangle', 'text'] + single_label_classes = ["pieslice", "circle"] + multi_label_classes = ["rectangle", "triangle", "text"] single_labels = [label for label in labels if label.name in single_label_classes] single_label_group = LabelGroup(name="labels", labels=single_labels, group_type=LabelGroupType.EXCLUSIVE) @@ -161,7 +180,11 @@ def get_label_schema(labels, multilabel=False, hierarchical=False): for label in labels: if label.name in multi_label_classes: - label_schema.add_group(LabelGroup(name=f'{label.name}____{label.name}_group', labels=[label], group_type=LabelGroupType.EXCLUSIVE)) + label_schema.add_group( + LabelGroup( + name=f"{label.name}____{label.name}_group", labels=[label], group_type=LabelGroupType.EXCLUSIVE + ) + ) else: main_group = LabelGroup(name="labels", labels=labels, group_type=LabelGroupType.EXCLUSIVE) label_schema.add_group(main_group) @@ -170,146 +193,138 @@ def get_label_schema(labels, multilabel=False, hierarchical=False): def main(): - logger.info('Train initial model with OLD dataset') - dataset, labels_list = load_test_dataset('old') + logger.info("Train initial model with OLD dataset") + dataset, labels_list = load_test_dataset("old") labels_schema = get_label_schema(labels_list, multilabel=args.multilabel, hierarchical=args.hierarchical) - logger.info(f'Train dataset: {len(dataset.get_subset(Subset.TRAINING))} items') - logger.info(f'Validation dataset: {len(dataset.get_subset(Subset.VALIDATION))} items') + logger.info(f"Train dataset: {len(dataset.get_subset(Subset.TRAINING))} items") + logger.info(f"Validation dataset: {len(dataset.get_subset(Subset.VALIDATION))} items") - logger.info('Load model template') + logger.info("Load model template") model_template = parse_model_template(args.template_file_path) - logger.info('Set hyperparameters') + logger.info("Set hyperparameters") params = create(model_template.hyper_parameters.data) params.learning_parameters.num_iters = 10 params.learning_parameters.learning_rate = 0.03 params.learning_parameters.learning_rate_warmup_iters = 4 params.learning_parameters.batch_size = 16 - logger.info('Setup environment') + logger.info("Setup environment") environment = TaskEnvironment( - model=None, - hyper_parameters=params, - label_schema=labels_schema, - model_template=model_template + model=None, hyper_parameters=params, label_schema=labels_schema, model_template=model_template ) - logger.info('Create base Task') + logger.info("Create base Task") task_impl_path = model_template.entrypoints.base task_cls = get_task_class(task_impl_path) task = task_cls(task_environment=environment) - logger.info('Train model') + logger.info("Train model") initial_model = ModelEntity( dataset, environment.get_model_configuration(), ) task.train(dataset, initial_model) - logger.info('Class-incremental learning with OLD + NEW dataset') - dataset, labels_list = load_test_dataset('new') + logger.info("Class-incremental learning with OLD + NEW dataset") + dataset, labels_list = load_test_dataset("new") labels_schema = get_label_schema(labels_list, multilabel=args.multilabel, hierarchical=args.hierarchical) - logger.info(f'Train dataset: {len(dataset.get_subset(Subset.TRAINING))} items') - logger.info(f'Validation dataset: {len(dataset.get_subset(Subset.VALIDATION))} items') + logger.info(f"Train dataset: {len(dataset.get_subset(Subset.TRAINING))} items") + logger.info(f"Validation dataset: {len(dataset.get_subset(Subset.VALIDATION))} items") - logger.info('Load model template') + logger.info("Load model template") model_template = parse_model_template(args.template_file_path) - logger.info('Set hyperparameters') + logger.info("Set hyperparameters") params = create(model_template.hyper_parameters.data) params.learning_parameters.num_iters = 10 params.learning_parameters.learning_rate = 0.03 params.learning_parameters.learning_rate_warmup_iters = 4 params.learning_parameters.batch_size = 16 - logger.info('Setup environment') + logger.info("Setup environment") environment = TaskEnvironment( - model=initial_model, - hyper_parameters=params, - label_schema=labels_schema, - model_template=model_template + model=initial_model, hyper_parameters=params, label_schema=labels_schema, model_template=model_template ) - logger.info('Create base Task') + logger.info("Create base Task") task_impl_path = model_template.entrypoints.base task_cls = get_task_class(task_impl_path) task = task_cls(task_environment=environment) - logger.info('Train model') + logger.info("Train model") output_model = ModelEntity( dataset, environment.get_model_configuration(), ) task.train(dataset, output_model) - logger.info('Get predictions on the validation set') + logger.info("Get predictions on the validation set") validation_dataset = dataset.get_subset(Subset.VALIDATION) predicted_validation_dataset = task.infer( - validation_dataset.with_empty_annotations(), - InferenceParameters(is_evaluation=True)) + validation_dataset.with_empty_annotations(), InferenceParameters(is_evaluation=True) + ) resultset = ResultSetEntity( model=output_model, ground_truth_dataset=validation_dataset, prediction_dataset=predicted_validation_dataset, ) - logger.info('Estimate quality on validation set') + logger.info("Estimate quality on validation set") task.evaluate(resultset) logger.info(str(resultset.performance)) if args.export: - logger.info('Export model') + logger.info("Export model") exported_model = ModelEntity( dataset, environment.get_model_configuration(), ) task.export(ExportType.OPENVINO, exported_model) - logger.info('Create OpenVINO Task') + logger.info("Create OpenVINO Task") environment.model = exported_model openvino_task_impl_path = model_template.entrypoints.openvino openvino_task_cls = get_task_class(openvino_task_impl_path) openvino_task = openvino_task_cls(environment) - logger.info('Get predictions on the validation set') + logger.info("Get predictions on the validation set") predicted_validation_dataset = openvino_task.infer( - validation_dataset.with_empty_annotations(), - InferenceParameters(is_evaluation=True)) + validation_dataset.with_empty_annotations(), InferenceParameters(is_evaluation=True) + ) resultset = ResultSetEntity( model=output_model, ground_truth_dataset=validation_dataset, prediction_dataset=predicted_validation_dataset, ) - logger.info('Estimate quality on validation set') + logger.info("Estimate quality on validation set") openvino_task.evaluate(resultset) logger.info(str(resultset.performance)) - logger.info('Run POT optimization') + logger.info("Run POT optimization") optimized_model = ModelEntity( dataset, environment.get_model_configuration(), ) openvino_task.optimize( - OptimizationType.POT, - dataset.get_subset(Subset.TRAINING), - optimized_model, - OptimizationParameters()) + OptimizationType.POT, dataset.get_subset(Subset.TRAINING), optimized_model, OptimizationParameters() + ) - logger.info('Get predictions on the validation set') + logger.info("Get predictions on the validation set") predicted_validation_dataset = openvino_task.infer( - validation_dataset.with_empty_annotations(), - InferenceParameters(is_evaluation=True)) + validation_dataset.with_empty_annotations(), InferenceParameters(is_evaluation=True) + ) resultset = ResultSetEntity( model=optimized_model, ground_truth_dataset=validation_dataset, prediction_dataset=predicted_validation_dataset, ) - logger.info('Performance of optimized model:') + logger.info("Performance of optimized model:") openvino_task.evaluate(resultset) logger.info(str(resultset.performance)) -if __name__ == '__main__': +if __name__ == "__main__": sys.exit(main() or 0) diff --git a/external/model-preparation-algorithm/mpa_tasks/samples/det_cls_il_sample.py b/external/model-preparation-algorithm/mpa_tasks/samples/det_cls_il_sample.py index 7c4c08a9276..6f12194a998 100644 --- a/external/model-preparation-algorithm/mpa_tasks/samples/det_cls_il_sample.py +++ b/external/model-preparation-algorithm/mpa_tasks/samples/det_cls_il_sample.py @@ -6,8 +6,8 @@ import sys import numpy as np -from mmcv.utils import get_logger from detection_tasks.apis.detection.ote_utils import get_task_class +from mmcv.utils import get_logger from ote_sdk.configuration.helper import create from ote_sdk.entities.datasets import DatasetEntity from ote_sdk.entities.inference_parameters import InferenceParameters @@ -20,16 +20,15 @@ from ote_sdk.entities.subset import Subset from ote_sdk.entities.task_environment import TaskEnvironment from ote_sdk.usecases.tasks.interfaces.export_interface import ExportType -from ote_sdk.usecases.tasks.interfaces.optimization_interface import \ - OptimizationType +from ote_sdk.usecases.tasks.interfaces.optimization_interface import OptimizationType -logger = get_logger(name='sample') +logger = get_logger(name="sample") def parse_args(): - parser = argparse.ArgumentParser(description='Sample showcasing the new API') - parser.add_argument('template_file_path', help='path to template file') - parser.add_argument('--export', action='store_true') + parser = argparse.ArgumentParser(description="Sample showcasing the new API") + parser.add_argument("template_file_path", help="path to template file") + parser.add_argument("--export", action="store_true") return parser.parse_args() @@ -37,8 +36,11 @@ def parse_args(): def load_test_dataset(data_type): - from ote_sdk.entities.annotation import (Annotation, AnnotationSceneEntity, - AnnotationSceneKind) + from ote_sdk.entities.annotation import ( + Annotation, + AnnotationSceneEntity, + AnnotationSceneKind, + ) from ote_sdk.entities.dataset_item import DatasetItemEntity from ote_sdk.entities.image import Image from ote_sdk.entities.label import LabelEntity @@ -49,8 +51,9 @@ def load_test_dataset(data_type): def gen_image(resolution, x1, y1, x2, y2, color): w, h = resolution image = np.full([h, w, 3], fill_value=255, dtype=np.uint8) - image[int(y1 * h):int(y2 * h), int(x1 * w):int(x2 * w), :] = \ - np.full([int(h*(y2-y1)), int(w*(x2-x1)), 3], fill_value=colors[color], dtype=np.uint8) + image[int(y1 * h) : int(y2 * h), int(x1 * w) : int(x2 * w), :] = np.full( + [int(h * (y2 - y1)), int(w * (x2 - x1)), 3], fill_value=colors[color], dtype=np.uint8 + ) return (image, Rectangle(x1=x1, y1=y1, x2=x2, y2=y2)) images = [ @@ -60,8 +63,8 @@ def gen_image(resolution, x1, y1, x2, y2, color): (0.5, 0.5, 1.0, 1.0), ] labels = [ - LabelEntity(name='red', domain=Domain.DETECTION, id=0), # OLD class - LabelEntity(name='green', domain=Domain.DETECTION, id=1), + LabelEntity(name="red", domain=Domain.DETECTION, id=0), # OLD class + LabelEntity(name="green", domain=Domain.DETECTION, id=1), ] def get_image(i, subset, label_id): @@ -70,7 +73,7 @@ def get_image(i, subset, label_id): media=Image(data=image), annotation_scene=AnnotationSceneEntity( annotations=[Annotation(bbox, labels=[ScoredLabel(label=labels[label_id])])], - kind=AnnotationSceneKind.ANNOTATION + kind=AnnotationSceneKind.ANNOTATION, ), subset=subset, ) @@ -127,153 +130,145 @@ def get_image(i, subset, label_id): ] old = old_train + old_val new = new_train + new_val - if data_type == 'old': + if data_type == "old": return DatasetEntity(old), [labels[0]] else: return DatasetEntity(old + new), labels def main(args): - logger.info('Train initial model with OLD dataset') - dataset, labels_list = load_test_dataset('old') + logger.info("Train initial model with OLD dataset") + dataset, labels_list = load_test_dataset("old") labels_schema = LabelSchemaEntity.from_labels(labels_list) - logger.info(f'Train dataset: {len(dataset.get_subset(Subset.TRAINING))} items') - logger.info(f'Validation dataset: {len(dataset.get_subset(Subset.VALIDATION))} items') + logger.info(f"Train dataset: {len(dataset.get_subset(Subset.TRAINING))} items") + logger.info(f"Validation dataset: {len(dataset.get_subset(Subset.VALIDATION))} items") - logger.info('Load model template') + logger.info("Load model template") model_template = parse_model_template(args.template_file_path) - logger.info('Set hyperparameters') + logger.info("Set hyperparameters") params = create(model_template.hyper_parameters.data) params.learning_parameters.num_iters = 5 params.learning_parameters.learning_rate = 0.01 params.learning_parameters.learning_rate_warmup_iters = 1 params.learning_parameters.batch_size = 4 - logger.info('Setup environment') + logger.info("Setup environment") environment = TaskEnvironment( - model=None, - hyper_parameters=params, - label_schema=labels_schema, - model_template=model_template + model=None, hyper_parameters=params, label_schema=labels_schema, model_template=model_template ) - logger.info('Create base Task') + logger.info("Create base Task") task_impl_path = model_template.entrypoints.base task_cls = get_task_class(task_impl_path) task = task_cls(task_environment=environment) - logger.info('Train model') + logger.info("Train model") initial_model = ModelEntity( dataset, environment.get_model_configuration(), ) task.train(dataset, initial_model) - logger.info('Class-incremental learning with OLD + NEW dataset') - dataset, labels_list = load_test_dataset('new') + logger.info("Class-incremental learning with OLD + NEW dataset") + dataset, labels_list = load_test_dataset("new") labels_schema = LabelSchemaEntity.from_labels(labels_list) - logger.info(f'Train dataset: {len(dataset.get_subset(Subset.TRAINING))} items') - logger.info(f'Validation dataset: {len(dataset.get_subset(Subset.VALIDATION))} items') + logger.info(f"Train dataset: {len(dataset.get_subset(Subset.TRAINING))} items") + logger.info(f"Validation dataset: {len(dataset.get_subset(Subset.VALIDATION))} items") - logger.info('Load model template') + logger.info("Load model template") model_template = parse_model_template(args.template_file_path) - logger.info('Set hyperparameters') + logger.info("Set hyperparameters") params = create(model_template.hyper_parameters.data) params.learning_parameters.num_iters = 5 params.learning_parameters.learning_rate = 0.01 params.learning_parameters.learning_rate_warmup_iters = 1 params.learning_parameters.batch_size = 4 - logger.info('Setup environment') + logger.info("Setup environment") environment = TaskEnvironment( - model=initial_model, - hyper_parameters=params, - label_schema=labels_schema, - model_template=model_template + model=initial_model, hyper_parameters=params, label_schema=labels_schema, model_template=model_template ) - logger.info('Create base Task') + logger.info("Create base Task") task_impl_path = model_template.entrypoints.base task_cls = get_task_class(task_impl_path) task = task_cls(task_environment=environment) - logger.info('Train model') + logger.info("Train model") output_model = ModelEntity( dataset, environment.get_model_configuration(), ) task.train(dataset, output_model) - logger.info('Get predictions on the validation set') + logger.info("Get predictions on the validation set") validation_dataset = dataset.get_subset(Subset.VALIDATION) predicted_validation_dataset = task.infer( - validation_dataset.with_empty_annotations(), - InferenceParameters(is_evaluation=True)) + validation_dataset.with_empty_annotations(), InferenceParameters(is_evaluation=True) + ) resultset = ResultSetEntity( model=output_model, ground_truth_dataset=validation_dataset, prediction_dataset=predicted_validation_dataset, ) - logger.info('Estimate quality on validation set') + logger.info("Estimate quality on validation set") task.evaluate(resultset) logger.info(str(resultset.performance)) if args.export: - logger.info('Export model') + logger.info("Export model") exported_model = ModelEntity( dataset, environment.get_model_configuration(), ) task.export(ExportType.OPENVINO, exported_model) - logger.info('Create OpenVINO Task') + logger.info("Create OpenVINO Task") environment.model = exported_model openvino_task_impl_path = model_template.entrypoints.openvino openvino_task_cls = get_task_class(openvino_task_impl_path) openvino_task = openvino_task_cls(environment) - logger.info('Get predictions on the validation set') + logger.info("Get predictions on the validation set") predicted_validation_dataset = openvino_task.infer( - validation_dataset.with_empty_annotations(), - InferenceParameters(is_evaluation=True)) + validation_dataset.with_empty_annotations(), InferenceParameters(is_evaluation=True) + ) resultset = ResultSetEntity( model=output_model, ground_truth_dataset=validation_dataset, prediction_dataset=predicted_validation_dataset, ) - logger.info('Estimate quality on validation set') + logger.info("Estimate quality on validation set") openvino_task.evaluate(resultset) logger.info(str(resultset.performance)) - logger.info('Run POT optimization') + logger.info("Run POT optimization") optimized_model = ModelEntity( dataset, environment.get_model_configuration(), ) openvino_task.optimize( - OptimizationType.POT, - dataset.get_subset(Subset.TRAINING), - optimized_model, - OptimizationParameters()) + OptimizationType.POT, dataset.get_subset(Subset.TRAINING), optimized_model, OptimizationParameters() + ) - logger.info('Get predictions on the validation set') + logger.info("Get predictions on the validation set") predicted_validation_dataset = openvino_task.infer( - validation_dataset.with_empty_annotations(), - InferenceParameters(is_evaluation=True)) + validation_dataset.with_empty_annotations(), InferenceParameters(is_evaluation=True) + ) resultset = ResultSetEntity( model=optimized_model, ground_truth_dataset=validation_dataset, prediction_dataset=predicted_validation_dataset, ) - logger.info('Performance of optimized model:') + logger.info("Performance of optimized model:") openvino_task.evaluate(resultset) logger.info(str(resultset.performance)) -if __name__ == '__main__': +if __name__ == "__main__": sys.exit(main(parse_args()) or 0) diff --git a/external/model-preparation-algorithm/mpa_tasks/samples/inst_seg_cls_il_sample.py b/external/model-preparation-algorithm/mpa_tasks/samples/inst_seg_cls_il_sample.py index f66296ab922..d2511834eb1 100644 --- a/external/model-preparation-algorithm/mpa_tasks/samples/inst_seg_cls_il_sample.py +++ b/external/model-preparation-algorithm/mpa_tasks/samples/inst_seg_cls_il_sample.py @@ -7,31 +7,30 @@ import cv2 import numpy as np -from mmcv.utils import get_logger from detection_tasks.apis.detection.ote_utils import get_task_class +from mmcv.utils import get_logger from ote_sdk.configuration.helper import create from ote_sdk.entities.datasets import DatasetEntity from ote_sdk.entities.inference_parameters import InferenceParameters from ote_sdk.entities.label import Domain from ote_sdk.entities.label_schema import LabelSchemaEntity -from ote_sdk.entities.shapes.polygon import Polygon, Point from ote_sdk.entities.model import ModelEntity from ote_sdk.entities.model_template import parse_model_template from ote_sdk.entities.optimization_parameters import OptimizationParameters from ote_sdk.entities.resultset import ResultSetEntity +from ote_sdk.entities.shapes.polygon import Point, Polygon from ote_sdk.entities.subset import Subset from ote_sdk.entities.task_environment import TaskEnvironment from ote_sdk.usecases.tasks.interfaces.export_interface import ExportType -from ote_sdk.usecases.tasks.interfaces.optimization_interface import \ - OptimizationType +from ote_sdk.usecases.tasks.interfaces.optimization_interface import OptimizationType -logger = get_logger(name='sample') +logger = get_logger(name="sample") def parse_args(): - parser = argparse.ArgumentParser(description='Sample showcasing the new API') - parser.add_argument('template_file_path', help='path to template file') - parser.add_argument('--export', action='store_true') + parser = argparse.ArgumentParser(description="Sample showcasing the new API") + parser.add_argument("template_file_path", help="path to template file") + parser.add_argument("--export", action="store_true") return parser.parse_args() @@ -39,8 +38,11 @@ def parse_args(): def load_test_dataset(data_type, task_type=Domain.INSTANCE_SEGMENTATION): - from ote_sdk.entities.annotation import (Annotation, AnnotationSceneEntity, - AnnotationSceneKind) + from ote_sdk.entities.annotation import ( + Annotation, + AnnotationSceneEntity, + AnnotationSceneKind, + ) from ote_sdk.entities.dataset_item import DatasetItemEntity from ote_sdk.entities.image import Image from ote_sdk.entities.label import LabelEntity @@ -51,21 +53,21 @@ def gen_circle_image(resolution): w, h = resolution image = np.full([h, w, 3], fill_value=255, dtype=np.uint8) gt = np.full([h, w, 1], fill_value=0, dtype=np.uint8) - cv2.circle(image, (int(h/2), int(w/2)), 90, (0, 0, 255), -1) - cv2.circle(gt, (int(h/2), int(w/2)), 90, 1, -1) + cv2.circle(image, (int(h / 2), int(w / 2)), 90, (0, 0, 255), -1) + cv2.circle(gt, (int(h / 2), int(w / 2)), 90, 1, -1) return (image, gt) def gen_rect_image(resolution): w, h = resolution image = np.full([h, w, 3], fill_value=255, dtype=np.uint8) gt = np.full([h, w, 1], fill_value=0, dtype=np.uint8) - cv2.rectangle(image, (int(h*0.1), int(w*0.1)), (int(h/2), int(w/2)), (0, 255, 0), -1) - cv2.rectangle(gt, (int(h*0.1), int(w*0.1)), (int(h/2), int(w/2)), 2, -1) + cv2.rectangle(image, (int(h * 0.1), int(w * 0.1)), (int(h / 2), int(w / 2)), (0, 255, 0), -1) + cv2.rectangle(gt, (int(h * 0.1), int(w * 0.1)), (int(h / 2), int(w / 2)), 2, -1) return (image, gt) labels = [ - LabelEntity(name='circle', domain=task_type, id=1), # OLD class - LabelEntity(name='rect', domain=task_type, id=2), + LabelEntity(name="circle", domain=task_type, id=1), # OLD class + LabelEntity(name="rect", domain=task_type, id=2), ] def get_image(type, subset, label_id): @@ -73,19 +75,15 @@ def get_image(type, subset, label_id): height, width = 1280, 720 if label_id == 1: image, gt = gen_circle_image((height, width)) - if type == 'new' and subset == Subset.TRAINING: - ignored_labels = [ - LabelEntity(name='rect', domain=Domain.INSTANCE_SEGMENTATION, id=2) - ] + if type == "new" and subset == Subset.TRAINING: + ignored_labels = [LabelEntity(name="rect", domain=Domain.INSTANCE_SEGMENTATION, id=2)] else: image, gt = gen_rect_image((height, width)) height, width = gt.shape[:2] - label_mask = (gt == label_id) + label_mask = gt == label_id label_index_map = label_mask.astype(np.uint8) - contours, hierarchies = cv2.findContours( - label_index_map, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE - ) + contours, hierarchies = cv2.findContours(label_index_map, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE) for contour, hierarchy in zip(contours, hierarchies[0]): if hierarchy[3] != -1: @@ -95,241 +93,230 @@ def get_image(type, subset, label_id): if len(contour) <= 2: continue - points = [ - Point(x=point[0][0] / width, y=point[0][1] / height) - for point in contour - ] + points = [Point(x=point[0][0] / width, y=point[0][1] / height) for point in contour] return DatasetItemEntity( media=Image(data=image), annotation_scene=AnnotationSceneEntity( - annotations=[Annotation(Polygon(points=points), labels=[ScoredLabel(label=labels[label_id-1])])], - kind=AnnotationSceneKind.ANNOTATION + annotations=[Annotation(Polygon(points=points), labels=[ScoredLabel(label=labels[label_id - 1])])], + kind=AnnotationSceneKind.ANNOTATION, ), subset=subset, ignored_labels=ignored_labels, ) old_train = [ - get_image('old', Subset.TRAINING, 1), - get_image('old', Subset.TRAINING, 1), - get_image('old', Subset.TRAINING, 1), - get_image('old', Subset.TRAINING, 1), - get_image('old', Subset.TRAINING, 1), - get_image('old', Subset.TRAINING, 1), - get_image('old', Subset.TRAINING, 1), - get_image('old', Subset.TRAINING, 1), + get_image("old", Subset.TRAINING, 1), + get_image("old", Subset.TRAINING, 1), + get_image("old", Subset.TRAINING, 1), + get_image("old", Subset.TRAINING, 1), + get_image("old", Subset.TRAINING, 1), + get_image("old", Subset.TRAINING, 1), + get_image("old", Subset.TRAINING, 1), + get_image("old", Subset.TRAINING, 1), ] old_val = [ - get_image('old', Subset.VALIDATION, 1), - get_image('old', Subset.VALIDATION, 1), - get_image('old', Subset.VALIDATION, 1), - get_image('old', Subset.VALIDATION, 1), - get_image('old', Subset.VALIDATION, 1), - get_image('old', Subset.VALIDATION, 1), - get_image('old', Subset.VALIDATION, 1), - get_image('old', Subset.VALIDATION, 1), + get_image("old", Subset.VALIDATION, 1), + get_image("old", Subset.VALIDATION, 1), + get_image("old", Subset.VALIDATION, 1), + get_image("old", Subset.VALIDATION, 1), + get_image("old", Subset.VALIDATION, 1), + get_image("old", Subset.VALIDATION, 1), + get_image("old", Subset.VALIDATION, 1), + get_image("old", Subset.VALIDATION, 1), ] new_train = [ - get_image('new', Subset.TRAINING, 1), - get_image('new', Subset.TRAINING, 1), - get_image('new', Subset.TRAINING, 1), - get_image('new', Subset.TRAINING, 1), - get_image('new', Subset.TRAINING, 2), - get_image('new', Subset.TRAINING, 2), - get_image('new', Subset.TRAINING, 2), - get_image('new', Subset.TRAINING, 2), + get_image("new", Subset.TRAINING, 1), + get_image("new", Subset.TRAINING, 1), + get_image("new", Subset.TRAINING, 1), + get_image("new", Subset.TRAINING, 1), + get_image("new", Subset.TRAINING, 2), + get_image("new", Subset.TRAINING, 2), + get_image("new", Subset.TRAINING, 2), + get_image("new", Subset.TRAINING, 2), ] new_val = [ - get_image('new', Subset.VALIDATION, 1), - get_image('new', Subset.VALIDATION, 1), - get_image('new', Subset.VALIDATION, 1), - get_image('new', Subset.VALIDATION, 1), - get_image('new', Subset.VALIDATION, 2), - get_image('new', Subset.VALIDATION, 2), - get_image('new', Subset.VALIDATION, 2), - get_image('new', Subset.VALIDATION, 2), + get_image("new", Subset.VALIDATION, 1), + get_image("new", Subset.VALIDATION, 1), + get_image("new", Subset.VALIDATION, 1), + get_image("new", Subset.VALIDATION, 1), + get_image("new", Subset.VALIDATION, 2), + get_image("new", Subset.VALIDATION, 2), + get_image("new", Subset.VALIDATION, 2), + get_image("new", Subset.VALIDATION, 2), ] new_test = [ - get_image('old', Subset.TESTING, 1), - get_image('old', Subset.TESTING, 1), - get_image('old', Subset.TESTING, 1), - get_image('old', Subset.TESTING, 1), - get_image('old', Subset.TESTING, 1), - get_image('old', Subset.TESTING, 1), - get_image('old', Subset.TESTING, 1), - get_image('old', Subset.TESTING, 1), - get_image('new', Subset.TESTING, 1), - get_image('new', Subset.TESTING, 1), - get_image('new', Subset.TESTING, 2), - get_image('new', Subset.TESTING, 2), - get_image('new', Subset.TESTING, 2), - get_image('new', Subset.TESTING, 2), - get_image('new', Subset.TESTING, 2), - get_image('new', Subset.TESTING, 2), - get_image('new', Subset.TESTING, 2), - get_image('new', Subset.TESTING, 2), - get_image('new', Subset.TESTING, 2), - get_image('new', Subset.TESTING, 2), + get_image("old", Subset.TESTING, 1), + get_image("old", Subset.TESTING, 1), + get_image("old", Subset.TESTING, 1), + get_image("old", Subset.TESTING, 1), + get_image("old", Subset.TESTING, 1), + get_image("old", Subset.TESTING, 1), + get_image("old", Subset.TESTING, 1), + get_image("old", Subset.TESTING, 1), + get_image("new", Subset.TESTING, 1), + get_image("new", Subset.TESTING, 1), + get_image("new", Subset.TESTING, 2), + get_image("new", Subset.TESTING, 2), + get_image("new", Subset.TESTING, 2), + get_image("new", Subset.TESTING, 2), + get_image("new", Subset.TESTING, 2), + get_image("new", Subset.TESTING, 2), + get_image("new", Subset.TESTING, 2), + get_image("new", Subset.TESTING, 2), + get_image("new", Subset.TESTING, 2), + get_image("new", Subset.TESTING, 2), ] old = old_train + old_val new = new_train + new_val - if data_type == 'old': - return DatasetEntity(old*5), [labels[0]] - elif data_type == 'test': - return DatasetEntity(new_test*5), labels + if data_type == "old": + return DatasetEntity(old * 5), [labels[0]] + elif data_type == "test": + return DatasetEntity(new_test * 5), labels else: - return DatasetEntity((old*5 + new*3)), labels + return DatasetEntity((old * 5 + new * 3)), labels def main(args): - logger.info('Load model template') + logger.info("Load model template") model_template = parse_model_template(args.template_file_path) task_type = model_template.task_type.domain - logger.info('Train initial model with OLD dataset') - dataset, labels_list = load_test_dataset('old', task_type) + logger.info("Train initial model with OLD dataset") + dataset, labels_list = load_test_dataset("old", task_type) labels_schema = LabelSchemaEntity.from_labels(labels_list) - logger.info(f'Train dataset: {len(dataset.get_subset(Subset.TRAINING))} items') - logger.info(f'Validation dataset: {len(dataset.get_subset(Subset.VALIDATION))} items') + logger.info(f"Train dataset: {len(dataset.get_subset(Subset.TRAINING))} items") + logger.info(f"Validation dataset: {len(dataset.get_subset(Subset.VALIDATION))} items") - logger.info('Set hyperparameters') + logger.info("Set hyperparameters") params = create(model_template.hyper_parameters.data) params.learning_parameters.num_iters = 5 params.learning_parameters.learning_rate = 0.01 params.learning_parameters.learning_rate_warmup_iters = 1 params.learning_parameters.batch_size = 4 - logger.info('Setup environment') + logger.info("Setup environment") environment = TaskEnvironment( - model=None, - hyper_parameters=params, - label_schema=labels_schema, - model_template=model_template + model=None, hyper_parameters=params, label_schema=labels_schema, model_template=model_template ) - logger.info('Create base Task') + logger.info("Create base Task") task_impl_path = model_template.entrypoints.base task_cls = get_task_class(task_impl_path) task = task_cls(task_environment=environment) - logger.info('Train model') + logger.info("Train model") initial_model = ModelEntity( dataset, environment.get_model_configuration(), ) task.train(dataset, initial_model) - logger.info('Class-incremental learning with OLD + NEW dataset') - dataset, labels_list = load_test_dataset('new', task_type) + logger.info("Class-incremental learning with OLD + NEW dataset") + dataset, labels_list = load_test_dataset("new", task_type) labels_schema = LabelSchemaEntity.from_labels(labels_list) - logger.info(f'Train dataset: {len(dataset.get_subset(Subset.TRAINING))} items') - logger.info(f'Validation dataset: {len(dataset.get_subset(Subset.VALIDATION))} items') + logger.info(f"Train dataset: {len(dataset.get_subset(Subset.TRAINING))} items") + logger.info(f"Validation dataset: {len(dataset.get_subset(Subset.VALIDATION))} items") - logger.info('Load model template') + logger.info("Load model template") model_template = parse_model_template(args.template_file_path) - logger.info('Set hyperparameters') + logger.info("Set hyperparameters") params = create(model_template.hyper_parameters.data) params.learning_parameters.num_iters = 5 params.learning_parameters.learning_rate = 0.015 params.learning_parameters.learning_rate_warmup_iters = 1 params.learning_parameters.batch_size = 4 - logger.info('Setup environment') + logger.info("Setup environment") environment = TaskEnvironment( - model=initial_model, - hyper_parameters=params, - label_schema=labels_schema, - model_template=model_template + model=initial_model, hyper_parameters=params, label_schema=labels_schema, model_template=model_template ) - logger.info('Create base Task') + logger.info("Create base Task") task_impl_path = model_template.entrypoints.base task_cls = get_task_class(task_impl_path) task = task_cls(task_environment=environment) - logger.info('Train model') + logger.info("Train model") output_model = ModelEntity( dataset, environment.get_model_configuration(), ) task.train(dataset, output_model) - logger.info('Get predictions on the test set') - testset, _ = load_test_dataset('test', task_type) + logger.info("Get predictions on the test set") + testset, _ = load_test_dataset("test", task_type) eval_dataset = testset.get_subset(Subset.TESTING) predicted_validation_dataset = task.infer( - eval_dataset.with_empty_annotations(), - InferenceParameters(is_evaluation=True)) + eval_dataset.with_empty_annotations(), InferenceParameters(is_evaluation=True) + ) resultset = ResultSetEntity( model=output_model, ground_truth_dataset=eval_dataset, prediction_dataset=predicted_validation_dataset, ) - logger.info('Estimate quality on validation set') + logger.info("Estimate quality on validation set") task.evaluate(resultset) logger.info(str(resultset.performance)) if args.export: - logger.info('Export model') + logger.info("Export model") exported_model = ModelEntity( dataset, environment.get_model_configuration(), ) task.export(ExportType.OPENVINO, exported_model) - logger.info('Create OpenVINO Task') + logger.info("Create OpenVINO Task") environment.model = exported_model openvino_task_impl_path = model_template.entrypoints.openvino openvino_task_cls = get_task_class(openvino_task_impl_path) openvino_task = openvino_task_cls(environment) - logger.info('Get predictions on the validation set') + logger.info("Get predictions on the validation set") predicted_validation_dataset = openvino_task.infer( - eval_dataset.with_empty_annotations(), - InferenceParameters(is_evaluation=True)) + eval_dataset.with_empty_annotations(), InferenceParameters(is_evaluation=True) + ) resultset = ResultSetEntity( model=output_model, ground_truth_dataset=eval_dataset, prediction_dataset=predicted_validation_dataset, ) - logger.info('Estimate quality on validation set') + logger.info("Estimate quality on validation set") openvino_task.evaluate(resultset) logger.info(str(resultset.performance)) - logger.info('Run POT optimization') + logger.info("Run POT optimization") optimized_model = ModelEntity( dataset, environment.get_model_configuration(), ) openvino_task.optimize( - OptimizationType.POT, - dataset.get_subset(Subset.TRAINING), - optimized_model, - OptimizationParameters()) + OptimizationType.POT, dataset.get_subset(Subset.TRAINING), optimized_model, OptimizationParameters() + ) - logger.info('Get predictions on the validation set') + logger.info("Get predictions on the validation set") predicted_validation_dataset = openvino_task.infer( - eval_dataset.with_empty_annotations(), - InferenceParameters(is_evaluation=True)) + eval_dataset.with_empty_annotations(), InferenceParameters(is_evaluation=True) + ) resultset = ResultSetEntity( model=optimized_model, ground_truth_dataset=eval_dataset, prediction_dataset=predicted_validation_dataset, ) - logger.info('Performance of optimized model:') + logger.info("Performance of optimized model:") openvino_task.evaluate(resultset) logger.info(str(resultset.performance)) -if __name__ == '__main__': +if __name__ == "__main__": sys.exit(main(parse_args()) or 0) diff --git a/external/model-preparation-algorithm/mpa_tasks/samples/seg_cls_il_sample.py b/external/model-preparation-algorithm/mpa_tasks/samples/seg_cls_il_sample.py index 126450de891..c8dbbf6921f 100644 --- a/external/model-preparation-algorithm/mpa_tasks/samples/seg_cls_il_sample.py +++ b/external/model-preparation-algorithm/mpa_tasks/samples/seg_cls_il_sample.py @@ -8,7 +8,6 @@ import cv2 import numpy as np from mmcv.utils import get_logger -from segmentation_tasks.apis.segmentation.ote_utils import get_task_class from ote_sdk.configuration.helper import create from ote_sdk.entities.datasets import DatasetEntity from ote_sdk.entities.inference_parameters import InferenceParameters @@ -21,16 +20,16 @@ from ote_sdk.entities.subset import Subset from ote_sdk.entities.task_environment import TaskEnvironment from ote_sdk.usecases.tasks.interfaces.export_interface import ExportType -from ote_sdk.usecases.tasks.interfaces.optimization_interface import \ - OptimizationType +from ote_sdk.usecases.tasks.interfaces.optimization_interface import OptimizationType +from segmentation_tasks.apis.segmentation.ote_utils import get_task_class -logger = get_logger(name='sample') +logger = get_logger(name="sample") def parse_args(): - parser = argparse.ArgumentParser(description='Sample showcasing the new API') - parser.add_argument('template_file_path', help='path to template file') - parser.add_argument('--export', action='store_true') + parser = argparse.ArgumentParser(description="Sample showcasing the new API") + parser.add_argument("template_file_path", help="path to template file") + parser.add_argument("--export", action="store_true") return parser.parse_args() @@ -38,8 +37,11 @@ def parse_args(): def load_test_dataset(data_type): - from ote_sdk.entities.annotation import (Annotation, AnnotationSceneEntity, - AnnotationSceneKind) + from ote_sdk.entities.annotation import ( + Annotation, + AnnotationSceneEntity, + AnnotationSceneKind, + ) from ote_sdk.entities.dataset_item import DatasetItemEntity from ote_sdk.entities.image import Image from ote_sdk.entities.label import LabelEntity @@ -51,40 +53,36 @@ def gen_circle_image(resolution): w, h = resolution image = np.full([h, w, 3], fill_value=128, dtype=np.uint8) gt = np.full([h, w, 1], fill_value=0, dtype=np.uint8) - cv2.circle(image, (int(h/2), int(w/2)), 90, (0, 0, 255), -1) - cv2.circle(gt, (int(h/2), int(w/2)), 90, 1, -1) + cv2.circle(image, (int(h / 2), int(w / 2)), 90, (0, 0, 255), -1) + cv2.circle(gt, (int(h / 2), int(w / 2)), 90, 1, -1) return (image, gt) def gen_rect_image(resolution): w, h = resolution image = np.full([h, w, 3], fill_value=128, dtype=np.uint8) gt = np.full([h, w, 1], fill_value=0, dtype=np.uint8) - cv2.rectangle(image, (int(h*0.1), int(w*0.1)), (int(h/2), int(w/2)), (0, 255, 0), -1) - cv2.rectangle(gt, (int(h*0.1), int(w*0.1)), (int(h/2), int(w/2)), 2, -1) + cv2.rectangle(image, (int(h * 0.1), int(w * 0.1)), (int(h / 2), int(w / 2)), (0, 255, 0), -1) + cv2.rectangle(gt, (int(h * 0.1), int(w * 0.1)), (int(h / 2), int(w / 2)), 2, -1) return (image, gt) labels = [ - LabelEntity(name='circle', domain=Domain.SEGMENTATION, id=1), # OLD class - LabelEntity(name='rect', domain=Domain.SEGMENTATION, id=2), + LabelEntity(name="circle", domain=Domain.SEGMENTATION, id=1), # OLD class + LabelEntity(name="rect", domain=Domain.SEGMENTATION, id=2), ] def get_image(type, subset, label_id): ignored_labels = [] if label_id == 1: image, gt = gen_circle_image((640, 480)) - if type == 'new' and subset == Subset.TRAINING: - ignored_labels = [ - LabelEntity(name='rect', domain=Domain.SEGMENTATION, id=2) - ] + if type == "new" and subset == Subset.TRAINING: + ignored_labels = [LabelEntity(name="rect", domain=Domain.SEGMENTATION, id=2)] else: image, gt = gen_rect_image((640, 480)) height, width = gt.shape[:2] - label_mask = (gt == label_id) + label_mask = gt == label_id label_index_map = label_mask.astype(np.uint8) - contours, hierarchies = cv2.findContours( - label_index_map, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE - ) + contours, hierarchies = cv2.findContours(label_index_map, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE) for contour, hierarchy in zip(contours, hierarchies[0]): if hierarchy[3] != -1: continue @@ -93,210 +91,203 @@ def get_image(type, subset, label_id): if len(contour) <= 2: continue - points = [ - Point(x=point[0][0] / width, y=point[0][1] / height) - for point in contour - ] + points = [Point(x=point[0][0] / width, y=point[0][1] / height) for point in contour] return DatasetItemEntity( media=Image(data=image), annotation_scene=AnnotationSceneEntity( - annotations=[Annotation(Polygon(points=points), labels=[ScoredLabel(label=labels[label_id-1])])], - kind=AnnotationSceneKind.ANNOTATION + annotations=[Annotation(Polygon(points=points), labels=[ScoredLabel(label=labels[label_id - 1])])], + kind=AnnotationSceneKind.ANNOTATION, ), subset=subset, ignored_labels=ignored_labels, ) old_train = [ - get_image('old', Subset.TRAINING, 1), - get_image('old', Subset.TRAINING, 1), - get_image('old', Subset.TRAINING, 1), - get_image('old', Subset.TRAINING, 1), - get_image('old', Subset.TRAINING, 1), - get_image('old', Subset.TRAINING, 1), - get_image('old', Subset.TRAINING, 1), - get_image('old', Subset.TRAINING, 1), + get_image("old", Subset.TRAINING, 1), + get_image("old", Subset.TRAINING, 1), + get_image("old", Subset.TRAINING, 1), + get_image("old", Subset.TRAINING, 1), + get_image("old", Subset.TRAINING, 1), + get_image("old", Subset.TRAINING, 1), + get_image("old", Subset.TRAINING, 1), + get_image("old", Subset.TRAINING, 1), ] old_val = [ - get_image('old', Subset.VALIDATION, 1), - get_image('old', Subset.VALIDATION, 1), - get_image('old', Subset.VALIDATION, 1), - get_image('old', Subset.VALIDATION, 1), - get_image('old', Subset.VALIDATION, 1), - get_image('old', Subset.VALIDATION, 1), - get_image('old', Subset.VALIDATION, 1), - get_image('old', Subset.VALIDATION, 1), + get_image("old", Subset.VALIDATION, 1), + get_image("old", Subset.VALIDATION, 1), + get_image("old", Subset.VALIDATION, 1), + get_image("old", Subset.VALIDATION, 1), + get_image("old", Subset.VALIDATION, 1), + get_image("old", Subset.VALIDATION, 1), + get_image("old", Subset.VALIDATION, 1), + get_image("old", Subset.VALIDATION, 1), ] new_train = [ - get_image('new', Subset.TRAINING, 1), - get_image('new', Subset.TRAINING, 1), - get_image('new', Subset.TRAINING, 1), - get_image('new', Subset.TRAINING, 1), - get_image('new', Subset.TRAINING, 2), - get_image('new', Subset.TRAINING, 2), - get_image('new', Subset.TRAINING, 2), - get_image('new', Subset.TRAINING, 2), + get_image("new", Subset.TRAINING, 1), + get_image("new", Subset.TRAINING, 1), + get_image("new", Subset.TRAINING, 1), + get_image("new", Subset.TRAINING, 1), + get_image("new", Subset.TRAINING, 2), + get_image("new", Subset.TRAINING, 2), + get_image("new", Subset.TRAINING, 2), + get_image("new", Subset.TRAINING, 2), ] new_val = [ - get_image('new', Subset.VALIDATION, 1), - get_image('new', Subset.VALIDATION, 1), - get_image('new', Subset.VALIDATION, 1), - get_image('new', Subset.VALIDATION, 1), - get_image('new', Subset.VALIDATION, 2), - get_image('new', Subset.VALIDATION, 2), - get_image('new', Subset.VALIDATION, 2), - get_image('new', Subset.VALIDATION, 2), + get_image("new", Subset.VALIDATION, 1), + get_image("new", Subset.VALIDATION, 1), + get_image("new", Subset.VALIDATION, 1), + get_image("new", Subset.VALIDATION, 1), + get_image("new", Subset.VALIDATION, 2), + get_image("new", Subset.VALIDATION, 2), + get_image("new", Subset.VALIDATION, 2), + get_image("new", Subset.VALIDATION, 2), ] old = old_train + old_val new = new_train + new_val - if data_type == 'old': - return DatasetEntity(old*10), [labels[0]] + if data_type == "old": + return DatasetEntity(old * 10), [labels[0]] else: - return DatasetEntity((old*10 + new*10)), labels + return DatasetEntity((old * 10 + new * 10)), labels def main(args): - logger.info('Train initial model with OLD dataset') - dataset, labels_list = load_test_dataset('old') + logger.info("Train initial model with OLD dataset") + dataset, labels_list = load_test_dataset("old") labels_schema = LabelSchemaEntity.from_labels(labels_list) - logger.info(f'Train dataset: {len(dataset.get_subset(Subset.TRAINING))} items') - logger.info(f'Validation dataset: {len(dataset.get_subset(Subset.VALIDATION))} items') + logger.info(f"Train dataset: {len(dataset.get_subset(Subset.TRAINING))} items") + logger.info(f"Validation dataset: {len(dataset.get_subset(Subset.VALIDATION))} items") - logger.info('Load model template') + logger.info("Load model template") model_template = parse_model_template(args.template_file_path) - logger.info('Set hyperparameters') + logger.info("Set hyperparameters") params = create(model_template.hyper_parameters.data) params.learning_parameters.num_iters = 10 params.learning_parameters.batch_size = 8 - logger.info('Setup environment') + logger.info("Setup environment") environment = TaskEnvironment( - model=None, - hyper_parameters=params, - label_schema=labels_schema, - model_template=model_template + model=None, hyper_parameters=params, label_schema=labels_schema, model_template=model_template ) - logger.info('Create base Task') + logger.info("Create base Task") task_impl_path = model_template.entrypoints.base task_cls = get_task_class(task_impl_path) task = task_cls(task_environment=environment) task.freeze = False - logger.info('Train model') + logger.info("Train model") initial_model = ModelEntity( dataset, environment.get_model_configuration(), ) task.train(dataset, initial_model) - logger.info('Class-incremental learning with OLD + NEW dataset') - dataset, labels_list = load_test_dataset('new') + logger.info("Class-incremental learning with OLD + NEW dataset") + dataset, labels_list = load_test_dataset("new") labels_schema = LabelSchemaEntity.from_labels(labels_list) - logger.info(f'Train dataset: {len(dataset.get_subset(Subset.TRAINING))} items') - logger.info(f'Validation dataset: {len(dataset.get_subset(Subset.VALIDATION))} items') + logger.info(f"Train dataset: {len(dataset.get_subset(Subset.TRAINING))} items") + logger.info(f"Validation dataset: {len(dataset.get_subset(Subset.VALIDATION))} items") - logger.info('Load model template') + logger.info("Load model template") model_template = parse_model_template(args.template_file_path) - logger.info('Set hyperparameters') + logger.info("Set hyperparameters") params = create(model_template.hyper_parameters.data) params.learning_parameters.num_iters = 10 params.learning_parameters.batch_size = 8 - logger.info('Setup environment') - environment = TaskEnvironment(model=initial_model, hyper_parameters=params, - label_schema=labels_schema, model_template=model_template) + logger.info("Setup environment") + environment = TaskEnvironment( + model=initial_model, hyper_parameters=params, label_schema=labels_schema, model_template=model_template + ) environment.model = ModelEntity( train_dataset=dataset, configuration=environment.get_model_configuration(), - model_adapters=initial_model.model_adapters + model_adapters=initial_model.model_adapters, ) - logger.info('Create base Task') + logger.info("Create base Task") task_impl_path = model_template.entrypoints.base task_cls = get_task_class(task_impl_path) task = task_cls(task_environment=environment) - logger.info('Train model') + logger.info("Train model") output_model = ModelEntity( dataset, environment.get_model_configuration(), ) task.train(dataset, output_model) - logger.info('Get predictions on the validation set') + logger.info("Get predictions on the validation set") validation_dataset = dataset.get_subset(Subset.VALIDATION) predicted_validation_dataset = task.infer( - validation_dataset.with_empty_annotations(), - InferenceParameters(is_evaluation=True)) + validation_dataset.with_empty_annotations(), InferenceParameters(is_evaluation=True) + ) resultset = ResultSetEntity( model=output_model, ground_truth_dataset=validation_dataset, prediction_dataset=predicted_validation_dataset, ) - logger.info('Estimate quality on validation set') + logger.info("Estimate quality on validation set") task.evaluate(resultset) logger.info(str(resultset.performance)) if args.export: - logger.info('Export model') + logger.info("Export model") exported_model = ModelEntity( dataset, environment.get_model_configuration(), ) task.export(ExportType.OPENVINO, exported_model) - logger.info('Create OpenVINO Task') + logger.info("Create OpenVINO Task") environment.model = exported_model openvino_task_impl_path = model_template.entrypoints.openvino openvino_task_cls = get_task_class(openvino_task_impl_path) openvino_task = openvino_task_cls(environment) - logger.info('Get predictions on the validation set') + logger.info("Get predictions on the validation set") predicted_validation_dataset = openvino_task.infer( - validation_dataset.with_empty_annotations(), - InferenceParameters(is_evaluation=True)) + validation_dataset.with_empty_annotations(), InferenceParameters(is_evaluation=True) + ) resultset = ResultSetEntity( model=output_model, ground_truth_dataset=validation_dataset, prediction_dataset=predicted_validation_dataset, ) - logger.info('Estimate quality on validation set') + logger.info("Estimate quality on validation set") openvino_task.evaluate(resultset) logger.info(str(resultset.performance)) - logger.info('Run POT optimization') + logger.info("Run POT optimization") optimized_model = ModelEntity( dataset, environment.get_model_configuration(), ) openvino_task.optimize( - OptimizationType.POT, - dataset.get_subset(Subset.TRAINING), - optimized_model, - OptimizationParameters()) + OptimizationType.POT, dataset.get_subset(Subset.TRAINING), optimized_model, OptimizationParameters() + ) - logger.info('Get predictions on the validation set') + logger.info("Get predictions on the validation set") predicted_validation_dataset = openvino_task.infer( - validation_dataset.with_empty_annotations(), - InferenceParameters(is_evaluation=True)) + validation_dataset.with_empty_annotations(), InferenceParameters(is_evaluation=True) + ) resultset = ResultSetEntity( model=optimized_model, ground_truth_dataset=validation_dataset, prediction_dataset=predicted_validation_dataset, ) - logger.info('Performance of optimized model:') + logger.info("Performance of optimized model:") openvino_task.evaluate(resultset) logger.info(str(resultset.performance)) -if __name__ == '__main__': +if __name__ == "__main__": sys.exit(main(parse_args()) or 0) diff --git a/external/model-preparation-algorithm/mpa_tasks/utils/__init__.py b/external/model-preparation-algorithm/mpa_tasks/utils/__init__.py index 55faa3b3f9b..6ae084b01d4 100644 --- a/external/model-preparation-algorithm/mpa_tasks/utils/__init__.py +++ b/external/model-preparation-algorithm/mpa_tasks/utils/__init__.py @@ -3,3 +3,5 @@ # from .runner import EpochRunnerWithCancel + +__all__ = [EpochRunnerWithCancel] diff --git a/external/model-preparation-algorithm/mpa_tasks/utils/data_utils.py b/external/model-preparation-algorithm/mpa_tasks/utils/data_utils.py index 5545f6d1c99..8eb968a07ec 100644 --- a/external/model-preparation-algorithm/mpa_tasks/utils/data_utils.py +++ b/external/model-preparation-algorithm/mpa_tasks/utils/data_utils.py @@ -2,11 +2,6 @@ # SPDX-License-Identifier: Apache-2.0 # -from typing import Union, Iterable - -import cv2 -import numpy as np - from mpa.utils.logger import get_logger logger = get_logger() @@ -32,12 +27,4 @@ def get_old_new_img_indices(labels, new_classes, dataset): ids_new.append(i) else: ids_old.append(i) - return {'old': ids_old, 'new': ids_new} - - -def get_actmap(saliency_map: Union[np.ndarray, Iterable, int, float], - output_res: Union[tuple, list]): - saliency_map = cv2.resize(saliency_map, output_res) - saliency_map = cv2.applyColorMap(saliency_map, cv2.COLORMAP_JET) - saliency_map = cv2.cvtColor(saliency_map, cv2.COLOR_BGR2RGB) - return saliency_map + return {"old": ids_old, "new": ids_new} diff --git a/external/model-preparation-algorithm/mpa_tasks/utils/runner.py b/external/model-preparation-algorithm/mpa_tasks/utils/runner.py index 25447642677..10220ca6e44 100644 --- a/external/model-preparation-algorithm/mpa_tasks/utils/runner.py +++ b/external/model-preparation-algorithm/mpa_tasks/utils/runner.py @@ -6,13 +6,10 @@ # * https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/epoch_based_runner.py # * https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/iter_based_runner.py -import time - import torch.distributed as dist from mmcv.runner import RUNNERS, EpochBasedRunner, get_dist_info -from torch.utils.data.dataloader import DataLoader - from ote_sdk.utils.argument_checks import check_input_parameters_type +from torch.utils.data.dataloader import DataLoader @RUNNERS.register_module() @@ -29,7 +26,7 @@ def __init__(self, *args, **kwargs): self.distributed = True if world_size > 1 else False def stop(self) -> bool: - """ Returning a boolean to break the training loop + """Returning a boolean to break the training loop This method supports distributed training by broadcasting should_stop to other ranks :return: a cancellation bool """ @@ -46,20 +43,20 @@ def stop(self) -> bool: @check_input_parameters_type() def train(self, data_loader: DataLoader, **kwargs): self.model.train() - self.mode = 'train' + self.mode = "train" self.data_loader = data_loader self._max_iters = self._max_epochs * len(self.data_loader) - self.call_hook('before_train_epoch') + self.call_hook("before_train_epoch") # TODO: uncomment below line or resolve root cause of deadlock issue if multi-GPUs need to be supported. # time.sleep(2) # Prevent possible multi-gpu deadlock during epoch transition for i, data_batch in enumerate(self.data_loader): self._inner_iter = i - self.call_hook('before_train_iter') + self.call_hook("before_train_iter") self.run_iter(data_batch, train_mode=True, **kwargs) - self.call_hook('after_train_iter') + self.call_hook("after_train_iter") if self.stop(): break self._iter += 1 - self.call_hook('after_train_epoch') + self.call_hook("after_train_epoch") self.stop() self._epoch += 1 diff --git a/external/model-preparation-algorithm/setup.py b/external/model-preparation-algorithm/setup.py index 9bfaf61a16f..0710569b0f0 100644 --- a/external/model-preparation-algorithm/setup.py +++ b/external/model-preparation-algorithm/setup.py @@ -3,21 +3,24 @@ # import os.path as osp -from setuptools import setup, find_packages + +from setuptools import find_packages, setup repo_root = osp.dirname(osp.realpath(__file__)) + def get_requirements(filename): requires = [] - with open(osp.join(repo_root, filename), 'r') as f: + with open(osp.join(repo_root, filename), "r") as f: for line in f.readlines(): requires.append(line.strip()) return requires -requirements = get_requirements('requirements.txt') + +requirements = get_requirements("requirements.txt") setup( - name='mpa_tasks', + name="mpa_tasks", packages=find_packages(), install_requires=requirements, ) diff --git a/external/model-preparation-algorithm/submodule b/external/model-preparation-algorithm/submodule index b316488053b..aec6cdcfd3a 160000 --- a/external/model-preparation-algorithm/submodule +++ b/external/model-preparation-algorithm/submodule @@ -1 +1 @@ -Subproject commit b316488053b4eac67b7b9830f7806c204ed2feff +Subproject commit aec6cdcfd3a0a6e5234afa182d2bc48dbe990dfc diff --git a/external/model-preparation-algorithm/tests/__init__.py b/external/model-preparation-algorithm/tests/__init__.py new file mode 100644 index 00000000000..1e19f1159d9 --- /dev/null +++ b/external/model-preparation-algorithm/tests/__init__.py @@ -0,0 +1,3 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# diff --git a/external/model-preparation-algorithm/tests/api_tests/test_ote_classification_api.py b/external/model-preparation-algorithm/tests/api_tests/test_ote_classification_api.py new file mode 100644 index 00000000000..dff083f2c8b --- /dev/null +++ b/external/model-preparation-algorithm/tests/api_tests/test_ote_classification_api.py @@ -0,0 +1,279 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +import os.path as osp +import random +from typing import Optional + +import cv2 as cv +import numpy as np +import pytest +from bson import ObjectId +from mpa_tasks.apis.classification import ( + ClassificationInferenceTask, + ClassificationTrainTask, +) +from ote_sdk.configuration.helper import create +from ote_sdk.entities.annotation import ( + Annotation, + AnnotationSceneEntity, + AnnotationSceneKind, +) +from ote_sdk.entities.dataset_item import DatasetItemEntity +from ote_sdk.entities.datasets import DatasetEntity +from ote_sdk.entities.id import ID +from ote_sdk.entities.image import Image +from ote_sdk.entities.inference_parameters import InferenceParameters +from ote_sdk.entities.label import Domain, LabelEntity +from ote_sdk.entities.label_schema import LabelGroup, LabelGroupType, LabelSchemaEntity +from ote_sdk.entities.model import ModelEntity +from ote_sdk.entities.model_template import parse_model_template +from ote_sdk.entities.scored_label import ScoredLabel +from ote_sdk.entities.shapes.rectangle import Rectangle +from ote_sdk.entities.subset import Subset +from ote_sdk.entities.task_environment import TaskEnvironment +from ote_sdk.entities.train_parameters import TrainParameters +from ote_sdk.test_suite.e2e_test_system import e2e_pytest_api +from ote_sdk.usecases.tasks.interfaces.export_interface import ExportType + +from tests.mpa_common import eval + + +DEFAULT_CLS_TEMPLATE_DIR = osp.join("configs", "classification", "efficientnet_b0_cls_incr") + + +class TestMPAClsAPI: + @e2e_pytest_api + def test_reading_classification_cls_incr_model_template(self): + classification_template = [ + "efficientnet_b0_cls_incr", + "efficientnet_v2_s_cls_incr", + "mobilenet_v3_large_1_cls_incr", + ] + for model_template in classification_template: + parse_model_template(osp.join("configs", "classification", model_template, "template.yaml")) + + @staticmethod + def generate_label_schema(not_empty_labels, multilabel=False, hierarchical=False): + assert len(not_empty_labels) > 1 + + label_schema = LabelSchemaEntity() + if multilabel: + emptylabel = LabelEntity(name="Empty label", is_empty=True, domain=Domain.CLASSIFICATION) + empty_group = LabelGroup(name="empty", labels=[emptylabel], group_type=LabelGroupType.EMPTY_LABEL) + for label in not_empty_labels: + label_schema.add_group( + LabelGroup( + name=label.name, + labels=[label], + group_type=LabelGroupType.EXCLUSIVE, + ) + ) + label_schema.add_group(empty_group) + elif hierarchical: + single_label_classes = ["b", "g", "r"] + multi_label_classes = ["w", "p"] + emptylabel = LabelEntity(name="Empty label", is_empty=True, domain=Domain.CLASSIFICATION) + empty_group = LabelGroup(name="empty", labels=[emptylabel], group_type=LabelGroupType.EMPTY_LABEL) + single_labels = [] + for label in not_empty_labels: + if label.name in multi_label_classes: + label_schema.add_group( + LabelGroup( + name=label.name, + labels=[label], + group_type=LabelGroupType.EXCLUSIVE, + ) + ) + if empty_group not in label_schema.get_groups(include_empty=True): + label_schema.add_group(empty_group) + elif label.name in single_label_classes: + single_labels.append(label) + if single_labels: + single_label_group = LabelGroup( + name="labels", + labels=single_labels, + group_type=LabelGroupType.EXCLUSIVE, + ) + label_schema.add_group(single_label_group) + else: + main_group = LabelGroup( + name="labels", + labels=not_empty_labels, + group_type=LabelGroupType.EXCLUSIVE, + ) + label_schema.add_group(main_group) + return label_schema + + @staticmethod + def setup_configurable_parameters(template_dir, num_iters=10): + model_template = parse_model_template(osp.join(template_dir, "template.yaml")) + hyper_parameters = create(model_template.hyper_parameters.data) + hyper_parameters.learning_parameters.num_iters = num_iters + return hyper_parameters, model_template + + def init_environment(self, params, model_template, multilabel, hierarchical, number_of_images=10): + resolution = (224, 224) + if hierarchical: + colors = [(0, 255, 0), (0, 0, 255), (255, 0, 0), (0, 0, 0), (230, 230, 250)] + cls_names = ["b", "g", "r", "w", "p"] + texts = ["Blue", "Green", "Red", "White", "Purple"] + else: + colors = [(0, 255, 0), (0, 0, 255)] + cls_names = ["b", "g"] + texts = ["Blue", "Green"] + env_labels = [ + LabelEntity(name=name, domain=Domain.CLASSIFICATION, is_empty=False, id=ID(i)) + for i, name in enumerate(cls_names) + ] + + items = [] + + for _ in range(0, number_of_images): + for j, lbl in enumerate(env_labels): + class_img = np.zeros((*resolution, 3), dtype=np.uint8) + class_img[:] = colors[j] + class_img = cv.putText( + class_img, + texts[j], + (50, 50), + cv.FONT_HERSHEY_SIMPLEX, + 0.8 + j * 0.2, + colors[j - 1], + 2, + cv.LINE_AA, + ) + + image = Image(data=class_img) + labels = [ScoredLabel(label=lbl, probability=1.0)] + shapes = [Annotation(Rectangle.generate_full_box(), labels)] + annotation_scene = AnnotationSceneEntity(kind=AnnotationSceneKind.ANNOTATION, annotations=shapes) + items.append(DatasetItemEntity(media=image, annotation_scene=annotation_scene)) + + rng = random.Random() + rng.seed(100) + rng.shuffle(items) + for i, _ in enumerate(items): + subset_region = i / number_of_images + if subset_region >= 0.9: + subset = Subset.TESTING + elif subset_region >= 0.6: + subset = Subset.VALIDATION + else: + subset = Subset.TRAINING + items[i].subset = subset + + dataset = DatasetEntity(items) + labels_schema = self.generate_label_schema( + dataset.get_labels(), multilabel=multilabel, hierarchical=hierarchical + ) + environment = TaskEnvironment( + model=None, + hyper_parameters=params, + label_schema=labels_schema, + model_template=model_template, + ) + return environment, dataset + + @e2e_pytest_api + @pytest.mark.parametrize( + "multilabel,hierarchical", + [(False, False), (True, False), (False, True)], + ids=["multiclass", "multilabel", "hierarchical"], + ) + def test_training_progress_tracking(self, multilabel, hierarchical): + hyper_parameters, model_template = self.setup_configurable_parameters(DEFAULT_CLS_TEMPLATE_DIR, num_iters=5) + task_environment, dataset = self.init_environment( + hyper_parameters, model_template, multilabel, hierarchical, 20 + ) + task = ClassificationTrainTask(task_environment=task_environment) + print("Task initialized, model training starts.") + + training_progress_curve = [] + + def progress_callback(progress: float, score: Optional[float] = None): + training_progress_curve.append(progress) + + train_parameters = TrainParameters + train_parameters.update_progress = progress_callback + output_model = ModelEntity( + dataset, + task_environment.get_model_configuration(), + ) + task.train(dataset, output_model, train_parameters) + + assert len(training_progress_curve) > 0 + assert np.all(training_progress_curve[1:] >= training_progress_curve[:-1]) + + @e2e_pytest_api + @pytest.mark.parametrize( + "multilabel,hierarchical", + [(False, False), (True, False), (False, True)], + ids=["multiclass", "multilabel", "hierarchical"], + ) + def test_inference_progress_tracking(self, multilabel, hierarchical): + hyper_parameters, model_template = self.setup_configurable_parameters(DEFAULT_CLS_TEMPLATE_DIR, num_iters=5) + task_environment, dataset = self.init_environment( + hyper_parameters, model_template, multilabel, hierarchical, 20 + ) + task = ClassificationInferenceTask(task_environment=task_environment) + print("Task initialized, model inference starts.") + + inference_progress_curve = [] + + def progress_callback(progress: int): + inference_progress_curve.append(progress) + + inference_parameters = InferenceParameters + inference_parameters.update_progress = progress_callback + task.infer(dataset.with_empty_annotations(), inference_parameters) + + assert len(inference_progress_curve) > 0 + assert np.all(inference_progress_curve[1:] >= inference_progress_curve[:-1]) + + @e2e_pytest_api + @pytest.mark.parametrize( + "multilabel,hierarchical", + [(False, False), (True, False), (False, True)], + ids=["multiclass", "multilabel", "hierarchical"], + ) + def test_inference_task(self, multilabel, hierarchical): + # Prepare pretrained weights + hyper_parameters, model_template = self.setup_configurable_parameters(DEFAULT_CLS_TEMPLATE_DIR, num_iters=2) + classification_environment, dataset = self.init_environment( + hyper_parameters, model_template, multilabel, hierarchical, 50 + ) + val_dataset = dataset.get_subset(Subset.VALIDATION) + + train_task = ClassificationTrainTask(task_environment=classification_environment) + + training_progress_curve = [] + + def progress_callback(progress: float, score: Optional[float] = None): + training_progress_curve.append(progress) + + train_parameters = TrainParameters + train_parameters.update_progress = progress_callback + trained_model = ModelEntity( + dataset, + classification_environment.get_model_configuration(), + ) + train_task.train(dataset, trained_model, train_parameters) + performance_after_train = eval(train_task, trained_model, val_dataset) + + # Create InferenceTask + classification_environment.model = trained_model + inference_task = ClassificationInferenceTask(task_environment=classification_environment) + + performance_after_load = eval(inference_task, trained_model, val_dataset) + + assert performance_after_train == performance_after_load + + # Export + exported_model = ModelEntity( + dataset, + classification_environment.get_model_configuration(), + _id=ObjectId(), + ) + inference_task.export(ExportType.OPENVINO, exported_model) diff --git a/external/model-preparation-algorithm/tests/api_tests/test_ote_detection_api.py b/external/model-preparation-algorithm/tests/api_tests/test_ote_detection_api.py new file mode 100644 index 00000000000..2b6c67a2392 --- /dev/null +++ b/external/model-preparation-algorithm/tests/api_tests/test_ote_detection_api.py @@ -0,0 +1,253 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +import glob +import os.path as osp +import random +import time +import warnings +from concurrent.futures import ThreadPoolExecutor +from typing import Optional +import numpy as np +from bson import ObjectId +from detection_tasks.apis.detection.ote_utils import generate_label_schema +from mpa_tasks.apis.detection import DetectionInferenceTask, DetectionTrainTask +from ote_sdk.configuration.helper import create +from ote_sdk.entities.annotation import ( + AnnotationSceneEntity, + AnnotationSceneKind, +) +from ote_sdk.entities.dataset_item import DatasetItemEntity +from ote_sdk.entities.datasets import DatasetEntity +from ote_sdk.entities.image import Image +from ote_sdk.entities.inference_parameters import InferenceParameters +from ote_sdk.entities.model import ModelEntity +from ote_sdk.entities.model_template import ( + TaskType, + parse_model_template, + task_type_to_label_domain, +) +from ote_sdk.entities.subset import Subset +from ote_sdk.entities.task_environment import TaskEnvironment +from ote_sdk.entities.train_parameters import TrainParameters +from ote_sdk.test_suite.e2e_test_system import e2e_pytest_api +from ote_sdk.tests.test_helpers import generate_random_annotated_image +from ote_sdk.usecases.tasks.interfaces.export_interface import ExportType +from ote_sdk.utils.shape_factory import ShapeFactory + +from tests.mpa_common import eval + + +DEFAULT_DET_TEMPLATE_DIR = osp.join("configs", "detection", "mobilenetv2_atss_cls_incr") + + +class TestMPADetAPI: + """ + Collection of tests for OTE API and OTE Model Templates + """ + + @e2e_pytest_api + def test_reading_detection_cls_incr_model_template(self): + detection_template = ["mobilenetv2_atss_cls_incr"] + for model_template in detection_template: + parse_model_template(osp.join("configs", "detection", model_template, "template.yaml")) + + def init_environment(self, params, model_template, number_of_images=500, task_type=TaskType.DETECTION): + + labels_names = ("rectangle", "ellipse", "triangle") + labels_schema = generate_label_schema(labels_names, task_type_to_label_domain(task_type)) + labels_list = labels_schema.get_labels(False) + environment = TaskEnvironment( + model=None, + hyper_parameters=params, + label_schema=labels_schema, + model_template=model_template, + ) + + warnings.filterwarnings("ignore", message=".* coordinates .* are out of bounds.*") + items = [] + for i in range(0, number_of_images): + image_numpy, annos = generate_random_annotated_image( + image_width=640, + image_height=480, + labels=labels_list, + max_shapes=20, + min_size=50, + max_size=100, + random_seed=None, + ) + # Convert shapes according to task + for anno in annos: + if task_type == TaskType.INSTANCE_SEGMENTATION: + anno.shape = ShapeFactory.shape_as_polygon(anno.shape) + else: + anno.shape = ShapeFactory.shape_as_rectangle(anno.shape) + + image = Image(data=image_numpy) + annotation_scene = AnnotationSceneEntity(kind=AnnotationSceneKind.ANNOTATION, annotations=annos) + items.append(DatasetItemEntity(media=image, annotation_scene=annotation_scene)) + warnings.resetwarnings() + + rng = random.Random() + rng.shuffle(items) + for i, _ in enumerate(items): + subset_region = i / number_of_images + if subset_region >= 0.8: + subset = Subset.TESTING + elif subset_region >= 0.6: + subset = Subset.VALIDATION + else: + subset = Subset.TRAINING + items[i].subset = subset + + dataset = DatasetEntity(items) + return environment, dataset + + @staticmethod + def setup_configurable_parameters(template_dir, num_iters=10): + glb = glob.glob(f"{template_dir}/template*.yaml") + template_path = glb[0] if glb else None + if not template_path: + raise RuntimeError(f"Template YAML not found: {template_dir}") + + model_template = parse_model_template(template_path) + hyper_parameters = create(model_template.hyper_parameters.data) + hyper_parameters.learning_parameters.num_iters = num_iters + hyper_parameters.postprocessing.result_based_confidence_threshold = False + hyper_parameters.postprocessing.confidence_threshold = 0.1 + return hyper_parameters, model_template + + @e2e_pytest_api + def test_cancel_training_detection(self): + """ + Tests starting and cancelling training. + + Flow of the test: + - Creates a randomly annotated project with a small dataset containing 3 classes: + ['rectangle', 'triangle', 'circle']. + - Start training and give cancel training signal after 10 seconds. Assert that training + stops within 35 seconds after that + - Start training and give cancel signal immediately. Assert that training stops within 25 seconds. + + This test should be finished in under one minute on a workstation. + """ + hyper_parameters, model_template = self.setup_configurable_parameters(DEFAULT_DET_TEMPLATE_DIR, num_iters=500) + detection_environment, dataset = self.init_environment(hyper_parameters, model_template, 64) + + detection_task = DetectionTrainTask(task_environment=detection_environment) + + executor = ThreadPoolExecutor(max_workers=1, thread_name_prefix="train_thread") + + output_model = ModelEntity( + dataset, + detection_environment.get_model_configuration(), + ) + + training_progress_curve = [] + + def progress_callback(progress: float, score: Optional[float] = None): + training_progress_curve.append(progress) + + train_parameters = TrainParameters + train_parameters.update_progress = progress_callback + + # Test stopping after some time + start_time = time.time() + train_future = executor.submit(detection_task.train, dataset, output_model, train_parameters) + # give train_thread some time to initialize the model + while not detection_task._is_training: + time.sleep(10) + detection_task.cancel_training() + + # stopping process has to happen in less than 35 seconds + train_future.result() + assert training_progress_curve[-1] == 100 + assert time.time() - start_time < 100, "Expected to stop within 100 seconds." + + # Test stopping immediately + start_time = time.time() + train_future = executor.submit(detection_task.train, dataset, output_model) + detection_task.cancel_training() + + train_future.result() + assert time.time() - start_time < 25 # stopping process has to happen in less than 25 seconds + + @e2e_pytest_api + def test_training_progress_tracking(self): + hyper_parameters, model_template = self.setup_configurable_parameters(DEFAULT_DET_TEMPLATE_DIR, num_iters=5) + detection_environment, dataset = self.init_environment(hyper_parameters, model_template, 50) + + task = DetectionTrainTask(task_environment=detection_environment) + print("Task initialized, model training starts.") + + training_progress_curve = [] + + def progress_callback(progress: float, score: Optional[float] = None): + training_progress_curve.append(progress) + + train_parameters = TrainParameters + train_parameters.update_progress = progress_callback + output_model = ModelEntity( + dataset, + detection_environment.get_model_configuration(), + ) + task.train(dataset, output_model, train_parameters) + + assert len(training_progress_curve) > 0 + assert np.all(training_progress_curve[1:] >= training_progress_curve[:-1]) + + @e2e_pytest_api + def test_inference_progress_tracking(self): + hyper_parameters, model_template = self.setup_configurable_parameters(DEFAULT_DET_TEMPLATE_DIR, num_iters=10) + detection_environment, dataset = self.init_environment(hyper_parameters, model_template, 50) + + task = DetectionInferenceTask(task_environment=detection_environment) + print("Task initialized, model inference starts.") + inference_progress_curve = [] + + def progress_callback(progress: int): + assert isinstance(progress, int) + inference_progress_curve.append(progress) + + inference_parameters = InferenceParameters + inference_parameters.update_progress = progress_callback + task.infer(dataset.with_empty_annotations(), inference_parameters) + + assert len(inference_progress_curve) > 0 + assert np.all(inference_progress_curve[1:] >= inference_progress_curve[:-1]) + + @e2e_pytest_api + def test_inference_task(self): + # Prepare pretrained weights + hyper_parameters, model_template = self.setup_configurable_parameters(DEFAULT_DET_TEMPLATE_DIR, num_iters=2) + detection_environment, dataset = self.init_environment(hyper_parameters, model_template, 50) + val_dataset = dataset.get_subset(Subset.VALIDATION) + + train_task = DetectionTrainTask(task_environment=detection_environment) + + training_progress_curve = [] + + def progress_callback(progress: float, score: Optional[float] = None): + training_progress_curve.append(progress) + + train_parameters = TrainParameters + train_parameters.update_progress = progress_callback + trained_model = ModelEntity( + dataset, + detection_environment.get_model_configuration(), + ) + train_task.train(dataset, trained_model, train_parameters) + performance_after_train = eval(train_task, trained_model, val_dataset) + + # Create InferenceTask + detection_environment.model = trained_model + inference_task = DetectionInferenceTask(task_environment=detection_environment) + + performance_after_load = eval(inference_task, trained_model, val_dataset) + + assert performance_after_train == performance_after_load + + # Export + exported_model = ModelEntity(dataset, detection_environment.get_model_configuration(), _id=ObjectId()) + inference_task.export(ExportType.OPENVINO, exported_model) diff --git a/external/model-preparation-algorithm/tests/api_tests/test_ote_segmentation_api.py b/external/model-preparation-algorithm/tests/api_tests/test_ote_segmentation_api.py new file mode 100644 index 00000000000..98fd67986cd --- /dev/null +++ b/external/model-preparation-algorithm/tests/api_tests/test_ote_segmentation_api.py @@ -0,0 +1,293 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +import os.path as osp +import random +import time +import warnings +from concurrent.futures import ThreadPoolExecutor +from typing import Optional + +import numpy as np +from bson import ObjectId +from mpa_tasks.apis.segmentation import SegmentationInferenceTask, SegmentationTrainTask +from ote_sdk.configuration.helper import create +from ote_sdk.entities.annotation import ( + Annotation, + AnnotationSceneEntity, + AnnotationSceneKind, +) +from ote_sdk.entities.color import Color +from ote_sdk.entities.dataset_item import DatasetItemEntity +from ote_sdk.entities.datasets import DatasetEntity +from ote_sdk.entities.image import Image +from ote_sdk.entities.inference_parameters import InferenceParameters +from ote_sdk.entities.label import Domain, LabelEntity +from ote_sdk.entities.label_schema import LabelGroup, LabelGroupType, LabelSchemaEntity +from ote_sdk.entities.model import ModelEntity +from ote_sdk.entities.model_template import parse_model_template +from ote_sdk.entities.shapes.ellipse import Ellipse +from ote_sdk.entities.shapes.polygon import Point, Polygon +from ote_sdk.entities.shapes.rectangle import Rectangle +from ote_sdk.entities.subset import Subset +from ote_sdk.entities.task_environment import TaskEnvironment +from ote_sdk.entities.train_parameters import TrainParameters +from ote_sdk.test_suite.e2e_test_system import e2e_pytest_api +from ote_sdk.tests.test_helpers import generate_random_annotated_image +from ote_sdk.usecases.tasks.interfaces.export_interface import ExportType + +from tests.mpa_common import eval + + +DEFAULT_SEG_TEMPLATE_DIR = osp.join("configs", "segmentation", "ocr-lite-hrnet-18-mod2") + + +class TestMPASegAPI: + """ + Collection of tests for OTE API and OTE Model Templates + """ + + @e2e_pytest_api + def test_reading_segmentation_cls_incr_model_template(self): + segmentation_template = [ + "ocr-lite-hrnet-18-mod2", + "ocr-lite-hrnet-s-mod2", + "ocr-lite-hrnet-x-mod3", + ] + for model_template in segmentation_template: + parse_model_template(osp.join("configs", "segmentation", model_template, "template.yaml")) + + @staticmethod + def generate_label_schema(label_names): + label_domain = Domain.SEGMENTATION + rgb = [int(i) for i in np.random.randint(0, 256, 3)] + colors = [Color(*rgb) for _ in range(len(label_names))] + not_empty_labels = [ + LabelEntity(name=name, color=colors[i], domain=label_domain, id=i) for i, name in enumerate(label_names) + ] + empty_label = LabelEntity( + name="Empty label", + color=Color(42, 43, 46), + is_empty=True, + domain=label_domain, + id=len(not_empty_labels), + ) + + label_schema = LabelSchemaEntity() + exclusive_group = LabelGroup(name="labels", labels=not_empty_labels, group_type=LabelGroupType.EXCLUSIVE) + empty_group = LabelGroup(name="empty", labels=[empty_label], group_type=LabelGroupType.EMPTY_LABEL) + label_schema.add_group(exclusive_group) + label_schema.add_group(empty_group) + return label_schema + + def init_environment(self, params, model_template, number_of_images=10): + labels_names = ("rectangle", "ellipse", "triangle") + labels_schema = self.generate_label_schema(labels_names) + labels_list = labels_schema.get_labels(False) + environment = TaskEnvironment( + model=None, + hyper_parameters=params, + label_schema=labels_schema, + model_template=model_template, + ) + + warnings.filterwarnings("ignore", message=".* coordinates .* are out of bounds.*") + items = [] + for i in range(0, number_of_images): + image_numpy, shapes = generate_random_annotated_image( + image_width=640, + image_height=480, + labels=labels_list, + max_shapes=20, + min_size=50, + max_size=100, + random_seed=None, + ) + # Convert all shapes to polygons + out_shapes = [] + for shape in shapes: + shape_labels = shape.get_labels(include_empty=True) + + in_shape = shape.shape + if isinstance(in_shape, Rectangle): + points = [ + Point(in_shape.x1, in_shape.y1), + Point(in_shape.x2, in_shape.y1), + Point(in_shape.x2, in_shape.y2), + Point(in_shape.x1, in_shape.y2), + ] + elif isinstance(in_shape, Ellipse): + points = [Point(x, y) for x, y in in_shape.get_evenly_distributed_ellipse_coordinates()] + elif isinstance(in_shape, Polygon): + points = in_shape.points + + out_shapes.append(Annotation(Polygon(points=points), labels=shape_labels)) + + image = Image(data=image_numpy) + annotation = AnnotationSceneEntity(kind=AnnotationSceneKind.ANNOTATION, annotations=out_shapes) + items.append(DatasetItemEntity(media=image, annotation_scene=annotation)) + warnings.resetwarnings() + + rng = random.Random() + rng.shuffle(items) + for i, _ in enumerate(items): + subset_region = i / number_of_images + if subset_region >= 0.8: + subset = Subset.TESTING + elif subset_region >= 0.6: + subset = Subset.VALIDATION + else: + subset = Subset.TRAINING + + items[i].subset = subset + + dataset = DatasetEntity(items) + + return environment, dataset + + @staticmethod + def setup_configurable_parameters(template_dir, num_iters=10): + model_template = parse_model_template(osp.join(template_dir, "template.yaml")) + + hyper_parameters = create(model_template.hyper_parameters.data) + hyper_parameters.learning_parameters.learning_rate_fixed_iters = 0 + hyper_parameters.learning_parameters.learning_rate_warmup_iters = 1 + hyper_parameters.learning_parameters.num_iters = num_iters + hyper_parameters.learning_parameters.num_checkpoints = 1 + + return hyper_parameters, model_template + + @e2e_pytest_api + def test_cancel_training_segmentation(self): + """ + Tests starting and cancelling training. + + Flow of the test: + - Creates a randomly annotated project with a small dataset. + - Start training and give cancel training signal after 10 seconds. Assert that training + stops within 35 seconds after that + - Start training and give cancel signal immediately. Assert that training stops within 25 seconds. + + This test should be finished in under one minute on a workstation. + """ + hyper_parameters, model_template = self.setup_configurable_parameters(DEFAULT_SEG_TEMPLATE_DIR, num_iters=200) + segmentation_environment, dataset = self.init_environment(hyper_parameters, model_template, 64) + + segmentation_task = SegmentationTrainTask(task_environment=segmentation_environment) + + executor = ThreadPoolExecutor(max_workers=1, thread_name_prefix="train_thread") + + output_model = ModelEntity( + dataset, + segmentation_environment.get_model_configuration(), + ) + + training_progress_curve = [] + + def progress_callback(progress: float, score: Optional[float] = None): + training_progress_curve.append(progress) + + train_parameters = TrainParameters + train_parameters.update_progress = progress_callback + + # Test stopping after some time + start_time = time.time() + train_future = executor.submit(segmentation_task.train, dataset, output_model, train_parameters) + # give train_thread some time to initialize the model + while not segmentation_task._is_training: + time.sleep(10) + segmentation_task.cancel_training() + + # stopping process has to happen in less than 35 seconds + train_future.result() + assert training_progress_curve[-1] == 100 + assert time.time() - start_time < 100, "Expected to stop within 100 seconds." + + # Test stopping immediately + start_time = time.time() + train_future = executor.submit(segmentation_task.train, dataset, output_model) + segmentation_task.cancel_training() + + train_future.result() + assert time.time() - start_time < 25 # stopping process has to happen in less than 25 seconds + + @e2e_pytest_api + def test_training_progress_tracking(self): + hyper_parameters, model_template = self.setup_configurable_parameters(DEFAULT_SEG_TEMPLATE_DIR, num_iters=5) + segmentation_environment, dataset = self.init_environment(hyper_parameters, model_template, 12) + + task = SegmentationTrainTask(task_environment=segmentation_environment) + print("Task initialized, model training starts.") + + training_progress_curve = [] + + def progress_callback(progress: float, score: Optional[float] = None): + training_progress_curve.append(progress) + + train_parameters = TrainParameters + train_parameters.update_progress = progress_callback + output_model = ModelEntity( + dataset, + segmentation_environment.get_model_configuration(), + ) + task.train(dataset, output_model, train_parameters) + + assert len(training_progress_curve) > 0 + assert np.all(training_progress_curve[1:] >= training_progress_curve[:-1]) + + @e2e_pytest_api + def test_inference_progress_tracking(self): + hyper_parameters, model_template = self.setup_configurable_parameters(DEFAULT_SEG_TEMPLATE_DIR, num_iters=10) + segmentation_environment, dataset = self.init_environment(hyper_parameters, model_template, 12) + + task = SegmentationInferenceTask(task_environment=segmentation_environment) + print("Task initialized, model inference starts.") + + inference_progress_curve = [] + + def progress_callback(progress: int): + assert isinstance(progress, int) + inference_progress_curve.append(progress) + + inference_parameters = InferenceParameters + inference_parameters.update_progress = progress_callback + task.infer(dataset.with_empty_annotations(), inference_parameters) + + assert len(inference_progress_curve) > 0 + assert np.all(inference_progress_curve[1:] >= inference_progress_curve[:-1]) + + @e2e_pytest_api + def test_inference_task(self): + # Prepare pretrained weights + hyper_parameters, model_template = self.setup_configurable_parameters(DEFAULT_SEG_TEMPLATE_DIR, num_iters=2) + segmentation_environment, dataset = self.init_environment(hyper_parameters, model_template, 30) + val_dataset = dataset.get_subset(Subset.VALIDATION) + + train_task = SegmentationTrainTask(task_environment=segmentation_environment) + + training_progress_curve = [] + + def progress_callback(progress: float, score: Optional[float] = None): + training_progress_curve.append(progress) + + train_parameters = TrainParameters + train_parameters.update_progress = progress_callback + trained_model = ModelEntity( + dataset, + segmentation_environment.get_model_configuration(), + ) + train_task.train(dataset, trained_model, train_parameters) + performance_after_train = eval(train_task, trained_model, val_dataset) + + # Create InferenceTask + segmentation_environment.model = trained_model + inference_task = SegmentationInferenceTask(task_environment=segmentation_environment) + + performance_after_load = eval(inference_task, trained_model, val_dataset) + + assert performance_after_train == performance_after_load + + # Export + exported_model = ModelEntity(dataset, segmentation_environment.get_model_configuration(), _id=ObjectId()) + inference_task.export(ExportType.OPENVINO, exported_model) diff --git a/external/model-preparation-algorithm/tests/config.py b/external/model-preparation-algorithm/tests/config.py deleted file mode 100644 index 1c65369aebe..00000000000 --- a/external/model-preparation-algorithm/tests/config.py +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -try: - import os - from e2e import config as config_e2e - - config_e2e.repository_name = os.environ.get('TT_REPOSITORY_NAME', 'ote/training_extensions/external/model-preparation-algorithm') -except ImportError: - pass diff --git a/external/model-preparation-algorithm/tests/conftest.py b/external/model-preparation-algorithm/tests/conftest.py index 718e41b1051..dd380ff6e26 100644 --- a/external/model-preparation-algorithm/tests/conftest.py +++ b/external/model-preparation-algorithm/tests/conftest.py @@ -4,59 +4,78 @@ try: import e2e.fixtures - - from e2e.conftest_utils import * # noqa - from e2e.conftest_utils import pytest_addoption as _e2e_pytest_addoption # noqa - from e2e import config # noqa + from e2e.conftest_utils import * # noqa + from e2e.conftest_utils import pytest_addoption as _e2e_pytest_addoption # noqa + from e2e import config # noqa from e2e.utils import get_plugins_from_packages + pytest_plugins = get_plugins_from_packages([e2e]) + import os + from e2e import config as config_e2e + + config_e2e.repository_name = os.environ.get( + "TT_REPOSITORY_NAME", + "ote/training_extensions/external/model-preparation-algorithm", + ) except ImportError: _e2e_pytest_addoption = None pass -import config import pytest -from ote_sdk.test_suite.pytest_insertions import * +from ote_sdk.test_suite.pytest_insertions import ( + get_pytest_plugins_from_ote, + ote_conftest_insertion, + ote_pytest_generate_tests_insertion, + ote_pytest_addoption_insertion, +) from ote_sdk.test_suite.training_tests_common import REALLIFE_USECASE_CONSTANT pytest_plugins = get_pytest_plugins_from_ote() -ote_conftest_insertion(default_repository_name='ote/training_extensions/external/model-preparation-algorithm') +ote_conftest_insertion(default_repository_name="ote/training_extensions/external/model-preparation-algorithm") + @pytest.fixture def ote_test_domain_fx(): - return 'model-preparation-algorithm' + raise NotImplementedError("Please, implement the fixture ote_test_domain_fx in your test file") + @pytest.fixture def ote_test_scenario_fx(current_test_parameters_fx): assert isinstance(current_test_parameters_fx, dict) - if current_test_parameters_fx.get('usecase') == REALLIFE_USECASE_CONSTANT: - return 'performance' + if current_test_parameters_fx.get("usecase") == REALLIFE_USECASE_CONSTANT: + return "performance" else: - return 'integration' + return "integration" + -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def ote_templates_root_dir_fx(): import os.path as osp import logging + logger = logging.getLogger(__name__) root = osp.dirname(osp.dirname(osp.realpath(__file__))) - root = f'{root}/configs/' - logger.debug(f'overloaded ote_templates_root_dir_fx: return {root}') + root = f"{root}/configs/" + logger.debug(f"overloaded ote_templates_root_dir_fx: return {root}") return root -@pytest.fixture(scope='session') + +@pytest.fixture(scope="session") def ote_reference_root_dir_fx(): import os.path as osp import logging + logger = logging.getLogger(__name__) root = osp.dirname(osp.dirname(osp.realpath(__file__))) - root = f'{root}/tests/reference/' - logger.debug(f'overloaded ote_reference_root_dir_fx: return {root}') + root = f"{root}/tests/reference/" + logger.debug(f"overloaded ote_reference_root_dir_fx: return {root}") return root + # pytest magic def pytest_generate_tests(metafunc): ote_pytest_generate_tests_insertion(metafunc) + def pytest_addoption(parser): ote_pytest_addoption_insertion(parser) diff --git a/external/model-preparation-algorithm/tests/expected_metrics/metrics_test_ote_training.yml b/external/model-preparation-algorithm/tests/expected_metrics/metrics_test_ote_training.yml index c13bd14e093..f0d395825a3 100644 --- a/external/model-preparation-algorithm/tests/expected_metrics/metrics_test_ote_training.yml +++ b/external/model-preparation-algorithm/tests/expected_metrics/metrics_test_ote_training.yml @@ -11,16 +11,16 @@ "metrics.accuracy.Accuracy": "base": "export_evaluation.metrics.accuracy.Accuracy" "max_diff": 0.05 -"ACTION-training_evaluation,model-Custom_Image_Classification_EfficinetNet-V2-S,dataset-cifar10_cls_incr,num_iters-CONFIG,batch-CONFIG,usecase-reallife": +"ACTION-training_evaluation,model-Custom_Image_Classification_EfficientNet-V2-S,dataset-cifar10_cls_incr,num_iters-CONFIG,batch-CONFIG,usecase-reallife": "metrics.accuracy.Accuracy": "target_value": 0.96 "max_diff_if_less_threshold": 0.015 "max_diff_if_greater_threshold": 0.02 -"ACTION-export_evaluation,model-Custom_Image_Classification_EfficinetNet-V2-S,dataset-cifar10_cls_incr,num_iters-CONFIG,batch-CONFIG,usecase-reallife": +"ACTION-export_evaluation,model-Custom_Image_Classification_EfficientNet-V2-S,dataset-cifar10_cls_incr,num_iters-CONFIG,batch-CONFIG,usecase-reallife": "metrics.accuracy.Accuracy": "base": "training_evaluation.metrics.accuracy.Accuracy" "max_diff": 0.01 -"ACTION-pot_evaluation,model-Custom_Image_Classification_EfficinetNet-V2-S,dataset-cifar10_cls_incr,num_iters-CONFIG,batch-CONFIG,usecase-reallife": +"ACTION-pot_evaluation,model-Custom_Image_Classification_EfficientNet-V2-S,dataset-cifar10_cls_incr,num_iters-CONFIG,batch-CONFIG,usecase-reallife": "metrics.accuracy.Accuracy": "base": "export_evaluation.metrics.accuracy.Accuracy" "max_diff": 0.05 diff --git a/external/model-preparation-algorithm/tests/mpa_common.py b/external/model-preparation-algorithm/tests/mpa_common.py new file mode 100644 index 00000000000..a6a10769912 --- /dev/null +++ b/external/model-preparation-algorithm/tests/mpa_common.py @@ -0,0 +1,189 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +import logging +import os.path as osp +import time +from collections import namedtuple +from copy import deepcopy +from typing import List, Type + +from detection_tasks.extension.datasets.data_utils import load_dataset_items_coco_format +from mpa_tasks.apis import BaseTask +from ote_sdk.entities.datasets import DatasetEntity +from ote_sdk.entities.label import Domain +from ote_sdk.entities.label_schema import LabelSchemaEntity +from ote_sdk.entities.metrics import Performance +from ote_sdk.entities.model import ModelEntity +from ote_sdk.entities.resultset import ResultSetEntity +from ote_sdk.entities.subset import Subset +from ote_sdk.test_suite.training_tests_actions import ( + BaseOTETestAction, + OTETestExportAction, + OTETestExportEvaluationAction, + OTETestPotAction, + OTETestPotEvaluationAction, + OTETestTrainingAction, + OTETestTrainingEvaluationAction, + # OTETestNNCFAction, + # OTETestNNCFEvaluationAction, + # OTETestNNCFExportAction, + # OTETestNNCFExportEvaluationAction, + # OTETestNNCFGraphAction, +) +from ote_sdk.test_suite.training_tests_common import ROOT_PATH_KEY, make_paths_be_abs +from segmentation_tasks.extension.datasets.mmdataset import load_dataset_items +from torchreid_tasks.utils import ClassificationDatasetAdapter + +logger = logging.getLogger(__name__) + + +def get_test_action_classes() -> List[Type[BaseOTETestAction]]: + return [ + OTETestTrainingAction, + OTETestTrainingEvaluationAction, + OTETestExportAction, + OTETestExportEvaluationAction, + OTETestPotAction, + OTETestPotEvaluationAction, + # OTETestNNCFAction, + # OTETestNNCFEvaluationAction, + # OTETestNNCFExportAction, + # OTETestNNCFExportEvaluationAction, + # OTETestNNCFGraphAction, + ] + + +def eval(task: BaseTask, model: ModelEntity, dataset: DatasetEntity) -> Performance: + start_time = time.time() + result_dataset = task.infer(dataset.with_empty_annotations()) + end_time = time.time() + print(f"{len(dataset)} analysed in {end_time - start_time} seconds") + result_set = ResultSetEntity(model=model, ground_truth_dataset=dataset, prediction_dataset=result_dataset) + task.evaluate(result_set) + assert result_set.performance is not None + return result_set.performance + + +def DATASET_PARAMETERS_FIELDS() -> List[str]: + return deepcopy( + [ + "annotations_train", + "images_train_dir", + "annotations_val", + "images_val_dir", + "annotations_test", + "images_test_dir", + "pre_trained_model", + ] + ) + + +DatasetParameters = namedtuple("DatasetParameters", DATASET_PARAMETERS_FIELDS()) + + +def _get_dataset_params_from_dataset_definitions(dataset_definitions, dataset_name): + if dataset_name not in dataset_definitions: + raise ValueError( + f"dataset {dataset_name} is absent in dataset_definitions, " + f"dataset_definitions.keys={list(dataset_definitions.keys())}" + ) + cur_dataset_definition = dataset_definitions[dataset_name] + training_parameters_fields = {k: v for k, v in cur_dataset_definition.items() if k in DATASET_PARAMETERS_FIELDS()} + make_paths_be_abs(training_parameters_fields, dataset_definitions[ROOT_PATH_KEY]) + + assert set(DATASET_PARAMETERS_FIELDS()) == set( + training_parameters_fields.keys() + ), f"ERROR: dataset definitions for name={dataset_name} does not contain all required fields" + assert all( + training_parameters_fields.values() + ), f"ERROR: dataset definitions for name={dataset_name} contains empty values for some required fields" + + params = DatasetParameters(**training_parameters_fields) + return params + + +def _create_classification_dataset_and_labels_schema(dataset_params, model_name): + logger.debug(f"Using for train annotation file {dataset_params.annotations_train}") + logger.debug(f"Using for val annotation file {dataset_params.annotations_val}") + + dataset = ClassificationDatasetAdapter( + train_data_root=osp.join(dataset_params.images_train_dir), + train_ann_file=osp.join(dataset_params.annotations_train), + val_data_root=osp.join(dataset_params.images_val_dir), + val_ann_file=osp.join(dataset_params.annotations_val), + test_data_root=osp.join(dataset_params.images_test_dir), + test_ann_file=osp.join(dataset_params.annotations_test), + ) + + labels_schema = LabelSchemaEntity.from_labels(dataset.get_labels()) + return dataset, labels_schema + + +def _create_object_detection_dataset_and_labels_schema(dataset_params): + logger.debug(f"Using for train annotation file {dataset_params.annotations_train}") + logger.debug(f"Using for val annotation file {dataset_params.annotations_val}") + labels_list = [] + items = [] + items.extend( + load_dataset_items_coco_format( + ann_file_path=dataset_params.annotations_train, + data_root_dir=dataset_params.images_train_dir, + domain=Domain.DETECTION, + subset=Subset.TRAINING, + labels_list=labels_list, + ) + ) + items.extend( + load_dataset_items_coco_format( + ann_file_path=dataset_params.annotations_val, + data_root_dir=dataset_params.images_val_dir, + domain=Domain.DETECTION, + subset=Subset.VALIDATION, + labels_list=labels_list, + ) + ) + items.extend( + load_dataset_items_coco_format( + ann_file_path=dataset_params.annotations_test, + data_root_dir=dataset_params.images_test_dir, + domain=Domain.DETECTION, + subset=Subset.TESTING, + labels_list=labels_list, + ) + ) + dataset = DatasetEntity(items=items) + labels_schema = LabelSchemaEntity.from_labels(dataset.get_labels()) + return dataset, labels_schema + + +def _create_segmentation_dataset_and_labels_schema(dataset_params): + logger.debug(f"Using for train annotation file {dataset_params.annotations_train}") + logger.debug(f"Using for val annotation file {dataset_params.annotations_val}") + labels_list = [] + items = load_dataset_items( + ann_file_path=dataset_params.annotations_train, + data_root_dir=dataset_params.images_train_dir, + subset=Subset.TRAINING, + labels_list=labels_list, + ) + items.extend( + load_dataset_items( + ann_file_path=dataset_params.annotations_val, + data_root_dir=dataset_params.images_val_dir, + subset=Subset.VALIDATION, + labels_list=labels_list, + ) + ) + items.extend( + load_dataset_items( + ann_file_path=dataset_params.annotations_test, + data_root_dir=dataset_params.images_test_dir, + subset=Subset.TESTING, + labels_list=labels_list, + ) + ) + dataset = DatasetEntity(items=items) + labels_schema = LabelSchemaEntity.from_labels(labels_list) + return dataset, labels_schema diff --git a/external/model-preparation-algorithm/tests/ote_cli/test_classification.py b/external/model-preparation-algorithm/tests/ote_cli/test_classification.py new file mode 100644 index 00000000000..1558157faf0 --- /dev/null +++ b/external/model-preparation-algorithm/tests/ote_cli/test_classification.py @@ -0,0 +1,503 @@ +"""Tests for MPA Class-Incremental Learning for image classification with OTE CLI""" +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +import os +import pytest + +from ote_sdk.test_suite.e2e_test_system import e2e_pytest_component +from ote_sdk.entities.model_template import parse_model_template + +from ote_cli.registry import Registry +from ote_cli.utils.tests import ( + create_venv, + get_some_vars, + ote_demo_deployment_testing, + ote_demo_testing, + ote_demo_openvino_testing, + ote_deploy_openvino_testing, + ote_eval_deployment_testing, + ote_eval_openvino_testing, + ote_eval_testing, + ote_hpo_testing, + ote_train_testing, + ote_export_testing, + pot_optimize_testing, + pot_eval_testing, + nncf_optimize_testing, + nncf_export_testing, + nncf_eval_testing, + nncf_eval_openvino_testing, +) + +# Pre-train w/ 'intel', 'openvino' classes +args0 = { + "--train-ann-file": "", + "--train-data-roots": "data/text_recognition/initial_data", + "--val-ann-file": "", + "--val-data-roots": "data/text_recognition/initial_data", + "--test-ann-files": "", + "--test-data-roots": "data/text_recognition/initial_data", + "--input": "data/text_recognition/initial_data/intel", + "train_params": [ + "params", + "--learning_parameters.num_iters", + "2", + "--learning_parameters.batch_size", + "4", + ], +} + +# Pre-train w/ 'intel', 'openvino', 'opencv' classes +args = { + "--train-ann-file": "", + "--train-data-roots": "data/text_recognition/IL_data", + "--val-ann-file": "", + "--val-data-roots": "data/text_recognition/IL_data", + "--test-ann-files": "", + "--test-data-roots": "data/text_recognition/IL_data", + "--input": "data/text_recognition/IL_data/intel", + "train_params": [ + "params", + "--learning_parameters.num_iters", + "2", + "--learning_parameters.batch_size", + "4", + ], +} + +root = "/tmp/ote_cli/" +ote_dir = os.getcwd() + +TT_STABILITY_TESTS = os.environ.get("TT_STABILITY_TESTS", False) +if TT_STABILITY_TESTS: + default_template = parse_model_template( + os.path.join( + "external/model-preparation-algorithm/configs", + "classification", + "efficientnet_b0_cls_incr", + "template.yaml", + ) + ) + templates = [default_template] * 100 + templates_ids = [template.model_template_id + f"-{i+1}" for i, template in enumerate(templates)] +else: + templates = Registry("external/model-preparation-algorithm").filter(task_type="CLASSIFICATION").templates + templates_ids = [template.model_template_id for template in templates] + + +class TestToolsMPAClassification: + @e2e_pytest_component + def test_create_venv(self): + work_dir, _, algo_backend_dir = get_some_vars(templates[0], root) + create_venv(algo_backend_dir, work_dir) + + @e2e_pytest_component + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ote_train(self, template): + ote_train_testing(template, root, ote_dir, args0) + _, template_work_dir, _ = get_some_vars(template, root) + args1 = args.copy() + args1["--load-weights"] = f"{template_work_dir}/trained_{template.model_template_id}/weights.pth" + ote_train_testing(template, root, ote_dir, args1) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ote_export(self, template): + ote_export_testing(template, root) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ote_eval(self, template): + ote_eval_testing(template, root, ote_dir, args) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ote_eval_openvino(self, template): + ote_eval_openvino_testing(template, root, ote_dir, args, threshold=0.0) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ote_demo(self, template): + ote_demo_testing(template, root, ote_dir, args) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ote_demo_openvino(self, template): + ote_demo_openvino_testing(template, root, ote_dir, args) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ote_deploy_openvino(self, template): + ote_deploy_openvino_testing(template, root, ote_dir, args) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ote_eval_deployment(self, template): + ote_eval_deployment_testing(template, root, ote_dir, args, threshold=0.0) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ote_demo_deployment(self, template): + ote_demo_deployment_testing(template, root, ote_dir, args) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ote_hpo(self, template): + ote_hpo_testing(template, root, ote_dir, args) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_nncf_optimize(self, template): + if template.entrypoints.nncf is None: + pytest.skip("nncf entrypoint is none") + + nncf_optimize_testing(template, root, ote_dir, args) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_nncf_export(self, template): + if template.entrypoints.nncf is None: + pytest.skip("nncf entrypoint is none") + + nncf_export_testing(template, root) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_nncf_eval(self, template): + if template.entrypoints.nncf is None: + pytest.skip("nncf entrypoint is none") + + nncf_eval_testing(template, root, ote_dir, args, threshold=0.001) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_nncf_eval_openvino(self, template): + if template.entrypoints.nncf is None: + pytest.skip("nncf entrypoint is none") + + nncf_eval_openvino_testing(template, root, ote_dir, args) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_pot_optimize(self, template): + pot_optimize_testing(template, root, ote_dir, args) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_pot_eval(self, template): + pot_eval_testing(template, root, ote_dir, args) + + +# Pre-train w/ 'car', 'tree' classes +args0_m = { + "--train-ann-file": "data/car_tree_bug/annotations/multilabel_car_tree.json", + "--train-data-roots": "data/car_tree_bug/images", + "--val-ann-file": "data/car_tree_bug/annotations/multilabel_car_tree.json", + "--val-data-roots": "data/car_tree_bug/images", + "--test-ann-files": "data/car_tree_bug/annotations/multilabel_car_tree.json", + "--test-data-roots": "data/car_tree_bug/images", + "--input": "data/car_tree_bug/images", + "train_params": [ + "params", + "--learning_parameters.num_iters", + "2", + "--learning_parameters.batch_size", + "4", + ], +} + +# Class-Incremental learning w/ 'car', 'tree', 'bug' classes +args_m = { + "--train-ann-file": "data/car_tree_bug/annotations/multilabel_default.json", + "--train-data-roots": "data/car_tree_bug/images", + "--val-ann-file": "data/car_tree_bug/annotations/multilabel_default.json", + "--val-data-roots": "data/car_tree_bug/images", + "--test-ann-files": "data/car_tree_bug/annotations/multilabel_default.json", + "--test-data-roots": "data/car_tree_bug/images", + "--input": "data/car_tree_bug/images", + "train_params": [ + "params", + "--learning_parameters.num_iters", + "2", + "--learning_parameters.batch_size", + "4", + ], +} + + +class TestToolsMPAMultilabelClassification: + @e2e_pytest_component + def test_create_venv(self): + work_dir, _, algo_backend_dir = get_some_vars(templates[0], root) + create_venv(algo_backend_dir, work_dir) + + @e2e_pytest_component + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ote_train(self, template): + ote_train_testing(template, root, ote_dir, args0_m) + _, template_work_dir, _ = get_some_vars(template, root) + args1 = args_m.copy() + args1["--load-weights"] = f"{template_work_dir}/trained_{template.model_template_id}/weights.pth" + ote_train_testing(template, root, ote_dir, args1) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ote_export(self, template): + ote_export_testing(template, root) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ote_eval(self, template): + ote_eval_testing(template, root, ote_dir, args_m) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ote_eval_openvino(self, template): + ote_eval_openvino_testing(template, root, ote_dir, args_m, threshold=0.0) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ote_demo(self, template): + pytest.skip("Demo for multi-label classification is not supported now.") + ote_demo_testing(template, root, ote_dir, args_m) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ote_demo_openvino(self, template): + pytest.skip("Demo for multi-label classification is not supported now.") + ote_demo_openvino_testing(template, root, ote_dir, args_m) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ote_deploy_openvino(self, template): + ote_deploy_openvino_testing(template, root, ote_dir, args_m) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ote_eval_deployment(self, template): + ote_eval_deployment_testing(template, root, ote_dir, args_m, threshold=0.0) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ote_demo_deployment(self, template): + pytest.xfail("Demo for multi-label classification is not supported now.") + ote_demo_deployment_testing(template, root, ote_dir, args_m) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ote_hpo(self, template): + ote_hpo_testing(template, root, ote_dir, args_m) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_nncf_optimize(self, template): + if template.entrypoints.nncf is None: + pytest.skip("nncf entrypoint is none") + + nncf_optimize_testing(template, root, ote_dir, args_m) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_nncf_export(self, template): + if template.entrypoints.nncf is None: + pytest.skip("nncf entrypoint is none") + + nncf_export_testing(template, root) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_nncf_eval(self, template): + if template.entrypoints.nncf is None: + pytest.skip("nncf entrypoint is none") + + nncf_eval_testing(template, root, ote_dir, args_m, threshold=0.001) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_nncf_eval_openvino(self, template): + if template.entrypoints.nncf is None: + pytest.skip("nncf entrypoint is none") + + nncf_eval_openvino_testing(template, root, ote_dir, args_m) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_pot_optimize(self, template): + pot_optimize_testing(template, root, ote_dir, args_m) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_pot_eval(self, template): + pot_eval_testing(template, root, ote_dir, args_m) + + +# TODO: (Jihwan) Enable C-IL test without image loading via ote-cli. +args_h = { + "--train-ann-file": "data/car_tree_bug/annotations/hierarchical_default.json", + "--train-data-roots": "data/car_tree_bug/images", + "--val-ann-file": "data/car_tree_bug/annotations/hierarchical_default.json", + "--val-data-roots": "data/car_tree_bug/images", + "--test-ann-files": "data/car_tree_bug/annotations/hierarchical_default.json", + "--test-data-roots": "data/car_tree_bug/images", + "--input": "data/car_tree_bug/images", + "train_params": [ + "params", + "--learning_parameters.num_iters", + "2", + "--learning_parameters.batch_size", + "4", + ], +} + + +class TestToolsMPAHierarchicalClassification: + @e2e_pytest_component + def test_create_venv(self): + work_dir, _, algo_backend_dir = get_some_vars(templates[0], root) + create_venv(algo_backend_dir, work_dir) + + @e2e_pytest_component + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ote_train(self, template): + ote_train_testing(template, root, ote_dir, args_h) + _, template_work_dir, _ = get_some_vars(template, root) + args1 = args_h.copy() + args1["--load-weights"] = f"{template_work_dir}/trained_{template.model_template_id}/weights.pth" + ote_train_testing(template, root, ote_dir, args1) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ote_export(self, template): + ote_export_testing(template, root) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ote_eval(self, template): + ote_eval_testing(template, root, ote_dir, args_h) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ote_eval_openvino(self, template): + ote_eval_openvino_testing(template, root, ote_dir, args_h, threshold=0.02) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ote_demo(self, template): + pytest.skip("Demo for hierarchical classification is not supported now.") + ote_demo_testing(template, root, ote_dir, args_h) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ote_demo_openvino(self, template): + pytest.skip("Demo for hierarchical classification is not supported now.") + ote_demo_openvino_testing(template, root, ote_dir, args_h) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ote_deploy_openvino(self, template): + ote_deploy_openvino_testing(template, root, ote_dir, args_h) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ote_eval_deployment(self, template): + ote_eval_deployment_testing(template, root, ote_dir, args_h, threshold=0.0) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ote_demo_deployment(self, template): + pytest.skip("Demo for hierarchical classification is not supported now.") + ote_demo_deployment_testing(template, root, ote_dir, args_h) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_ote_hpo(self, template): + ote_hpo_testing(template, root, ote_dir, args_h) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_nncf_optimize(self, template): + if template.entrypoints.nncf is None: + pytest.skip("nncf entrypoint is none") + + nncf_optimize_testing(template, root, ote_dir, args_h) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_nncf_export(self, template): + if template.entrypoints.nncf is None: + pytest.skip("nncf entrypoint is none") + + nncf_export_testing(template, root) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_nncf_eval(self, template): + if template.entrypoints.nncf is None: + pytest.skip("nncf entrypoint is none") + + nncf_eval_testing(template, root, ote_dir, args_h, threshold=0.001) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_nncf_eval_openvino(self, template): + if template.entrypoints.nncf is None: + pytest.skip("nncf entrypoint is none") + + nncf_eval_openvino_testing(template, root, ote_dir, args_h) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_pot_optimize(self, template): + pot_optimize_testing(template, root, ote_dir, args_h) + + @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") + @pytest.mark.parametrize("template", templates, ids=templates_ids) + def test_pot_eval(self, template): + pot_eval_testing(template, root, ote_dir, args_h) diff --git a/external/model-preparation-algorithm/tests/ote_cli/test_det_cls_il.py b/external/model-preparation-algorithm/tests/ote_cli/test_det_cls_il.py deleted file mode 100644 index 3d37d5c95a7..00000000000 --- a/external/model-preparation-algorithm/tests/ote_cli/test_det_cls_il.py +++ /dev/null @@ -1,178 +0,0 @@ -"""Tests for MPA Class-Incremental Learning for object detection with OTE CLI""" -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import os -import pytest -from subprocess import run - -from ote_sdk.test_suite.e2e_test_system import e2e_pytest_component - -from ote_cli.registry import Registry -from ote_cli.utils.tests import ( - collect_env_vars, - create_venv, - get_some_vars, - ote_demo_deployment_testing, - ote_demo_testing, - ote_demo_openvino_testing, - ote_deploy_openvino_testing, - ote_eval_deployment_testing, - ote_eval_openvino_testing, - ote_eval_testing, - ote_hpo_testing, - ote_train_testing, - ote_export_testing, - pot_optimize_testing, - pot_eval_testing, - nncf_optimize_testing, - nncf_export_testing, - nncf_eval_testing, - nncf_eval_openvino_testing, -) - -# Pre-train w/ 'person' class -args0 = { - '--train-ann-file': 'data/airport/annotation_person_train.json', - '--train-data-roots': 'data/airport/train', - '--val-ann-file': 'data/airport/annotation_person_train.json', - '--val-data-roots': 'data/airport/train', - '--test-ann-files': 'data/airport/annotation_person_train.json', - '--test-data-roots': 'data/airport/train', - '--input': 'data/airport/train', - 'train_params': [ - 'params', - '--learning_parameters.num_iters', - '2', - '--learning_parameters.batch_size', - '2' - ] -} - -# Class-Incremental learning w/ 'vehicle', 'person', 'non-vehicle' classes -args = { - '--train-ann-file': 'data/airport/annotation_example_train.json', - '--train-data-roots': 'data/airport/train', - '--val-ann-file': 'data/airport/annotation_example_train.json', - '--val-data-roots': 'data/airport/train', - '--test-ann-files': 'data/airport/annotation_example_train.json', - '--test-data-roots': 'data/airport/train', - '--input': 'data/airport/train', - 'train_params': [ - 'params', - '--learning_parameters.num_iters', - '2', - '--learning_parameters.batch_size', - '2' - ] -} - -root = '/tmp/ote_cli_det/' -ote_dir = os.getcwd() - -templates = Registry('external/model-preparation-algorithm').filter(task_type='DETECTION').templates -templates_ids = [template.model_template_id for template in templates] - - -class TestToolsDetClsIncr: - @e2e_pytest_component - def test_create_venv(self): - work_dir, _, algo_backend_dir = get_some_vars(templates[0], root) - create_venv(algo_backend_dir, work_dir) - - @e2e_pytest_component - @pytest.mark.parametrize('template', templates, ids=templates_ids) - def test_ote_train(self, template): - ote_train_testing(template, root, ote_dir, args0) - _, template_work_dir, _ = get_some_vars(template, root) - args1 = args.copy() - args1['--load-weights'] = f'{template_work_dir}/trained_{template.model_template_id}/weights.pth' - ote_train_testing(template, root, ote_dir, args1) - - @e2e_pytest_component - @pytest.mark.parametrize('template', templates, ids=templates_ids) - def test_ote_export(self, template): - ote_export_testing(template, root) - - @e2e_pytest_component - @pytest.mark.parametrize('template', templates, ids=templates_ids) - def test_ote_eval(self, template): - ote_eval_testing(template, root, ote_dir, args) - - @e2e_pytest_component - @pytest.mark.parametrize('template', templates, ids=templates_ids) - def test_ote_eval_openvino(self, template): - ote_eval_openvino_testing(template, root, ote_dir, args, threshold=0.2) - - @e2e_pytest_component - @pytest.mark.parametrize('template', templates, ids=templates_ids) - def test_ote_demo(self, template): - ote_demo_testing(template, root, ote_dir, args) - - @e2e_pytest_component - @pytest.mark.parametrize('template', templates, ids=templates_ids) - def test_ote_demo_openvino(self, template): - ote_demo_openvino_testing(template, root, ote_dir, args) - - @e2e_pytest_component - @pytest.mark.parametrize('template', templates, ids=templates_ids) - def test_ote_deploy_openvino(self, template): - ote_deploy_openvino_testing(template, root, ote_dir, args) - - @e2e_pytest_component - @pytest.mark.parametrize('template', templates, ids=templates_ids) - def test_ote_eval_deployment(self, template): - ote_eval_deployment_testing(template, root, ote_dir, args, threshold=0.0) - - @e2e_pytest_component - @pytest.mark.parametrize('template', templates, ids=templates_ids) - def test_ote_demo_deployment(self, template): - ote_demo_deployment_testing(template, root, ote_dir, args) - - @e2e_pytest_component - @pytest.mark.parametrize('template', templates, ids=templates_ids) - def test_ote_hpo(self, template): - ote_hpo_testing(template, root, ote_dir, args) - - @e2e_pytest_component - @pytest.mark.parametrize('template', templates, ids=templates_ids) - def test_nncf_optimize(self, template): - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - - nncf_optimize_testing(template, root, ote_dir, args) - - @e2e_pytest_component - @pytest.mark.parametrize('template', templates, ids=templates_ids) - def test_nncf_export(self, template): - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - - nncf_export_testing(template, root) - - @e2e_pytest_component - @pytest.mark.parametrize('template', templates, ids=templates_ids) - def test_nncf_eval(self, template): - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - - nncf_eval_testing(template, root, ote_dir, args, threshold=0.001) - - @e2e_pytest_component - @pytest.mark.parametrize('template', templates, ids=templates_ids) - def test_nncf_eval_openvino(self, template): - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - - nncf_eval_openvino_testing(template, root, ote_dir, args) - - @e2e_pytest_component - @pytest.mark.parametrize('template', templates, ids=templates_ids) - def test_pot_optimize(self, template): - pot_optimize_testing(template, root, ote_dir, args) - - @e2e_pytest_component - @pytest.mark.parametrize('template', templates, ids=templates_ids) - def test_pot_eval(self, template): - pot_eval_testing(template, root, ote_dir, args) diff --git a/external/model-preparation-algorithm/tests/ote_cli/test_multiclass_cls_il.py b/external/model-preparation-algorithm/tests/ote_cli/test_detection.py similarity index 59% rename from external/model-preparation-algorithm/tests/ote_cli/test_multiclass_cls_il.py rename to external/model-preparation-algorithm/tests/ote_cli/test_detection.py index 8c9527e5e7c..65fb9552ed9 100644 --- a/external/model-preparation-algorithm/tests/ote_cli/test_multiclass_cls_il.py +++ b/external/model-preparation-algorithm/tests/ote_cli/test_detection.py @@ -1,4 +1,4 @@ -"""Tests for MPA Class-Incremental Learning for image classification with OTE CLI""" +"""Tests for MPA Class-Incremental Learning for object detection with OTE CLI""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -7,6 +7,7 @@ import pytest from ote_sdk.test_suite.e2e_test_system import e2e_pytest_component +from ote_sdk.entities.model_template import parse_model_template from ote_cli.registry import Registry from ote_cli.utils.tests import ( @@ -28,53 +29,50 @@ nncf_export_testing, nncf_eval_testing, nncf_eval_openvino_testing, - xfail_templates, ) -# Pre-train w/ 'intel', 'openvino' classes +# Pre-train w/ 'person' class args0 = { - '--train-ann-file': '', - '--train-data-roots': 'data/text_recognition/initial_data', - '--val-ann-file': '', - '--val-data-roots': 'data/text_recognition/initial_data', - '--test-ann-files': '', - '--test-data-roots': 'data/text_recognition/initial_data', - '--input': 'data/text_recognition/initial_data/intel', - 'train_params': [ - 'params', - '--learning_parameters.num_iters', - '2', - '--learning_parameters.batch_size', - '2', - ] + "--train-ann-file": "data/airport/annotation_person_train.json", + "--train-data-roots": "data/airport/train", + "--val-ann-file": "data/airport/annotation_person_train.json", + "--val-data-roots": "data/airport/train", + "--test-ann-files": "data/airport/annotation_person_train.json", + "--test-data-roots": "data/airport/train", + "--input": "data/airport/train", + "train_params": ["params", "--learning_parameters.num_iters", "4", "--learning_parameters.batch_size", "4"], } -# Pre-train w/ 'intel', 'openvino', 'opencv' classes +# Class-Incremental learning w/ 'vehicle', 'person', 'non-vehicle' classes args = { - '--train-ann-file': '', - '--train-data-roots': 'data/text_recognition/IL_data', - '--val-ann-file': '', - '--val-data-roots': 'data/text_recognition/IL_data', - '--test-ann-files': '', - '--test-data-roots': 'data/text_recognition/IL_data', - '--input': 'data/text_recognition/IL_data/intel', - 'train_params': [ - 'params', - '--learning_parameters.num_iters', - '2', - '--learning_parameters.batch_size', - '4', - ] + "--train-ann-file": "data/airport/annotation_example_train.json", + "--train-data-roots": "data/airport/train", + "--val-ann-file": "data/airport/annotation_example_train.json", + "--val-data-roots": "data/airport/train", + "--test-ann-files": "data/airport/annotation_example_train.json", + "--test-data-roots": "data/airport/train", + "--input": "data/airport/train", + "train_params": ["params", "--learning_parameters.num_iters", "2", "--learning_parameters.batch_size", "4"], } -root = '/tmp/ote_cli_multiclass/' +root = "/tmp/ote_cli/" ote_dir = os.getcwd() -templates = Registry('external/model-preparation-algorithm').filter(task_type='CLASSIFICATION').templates -templates_ids = [template.model_template_id for template in templates] +TT_STABILITY_TESTS = os.environ.get("TT_STABILITY_TESTS", False) +if TT_STABILITY_TESTS: + default_template = parse_model_template( + os.path.join( + "external/model-preparation-algorithm/configs", "detection", "mobilenetv2_atss_cls_incr", "template.yaml" + ) + ) + templates = [default_template] * 100 + templates_ids = [template.model_template_id + f"-{i+1}" for i, template in enumerate(templates)] +else: + templates = Registry("external/model-preparation-algorithm").filter(task_type="DETECTION").templates + templates_ids = [template.model_template_id for template in templates] -class TestToolsClsClsIncr: +class TestToolsMPADetection: @e2e_pytest_component def test_create_venv(self): work_dir, _, algo_backend_dir = get_some_vars(templates[0], root) @@ -86,55 +84,65 @@ def test_ote_train(self, template): ote_train_testing(template, root, ote_dir, args0) _, template_work_dir, _ = get_some_vars(template, root) args1 = args.copy() - args1['--load-weights'] = f'{template_work_dir}/trained_{template.model_template_id}/weights.pth' + args1["--load-weights"] = f"{template_work_dir}/trained_{template.model_template_id}/weights.pth" ote_train_testing(template, root, ote_dir, args1) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_export(self, template): ote_export_testing(template, root) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_eval(self, template): ote_eval_testing(template, root, ote_dir, args) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_eval_openvino(self, template): - ote_eval_openvino_testing(template, root, ote_dir, args, threshold=0.0) + ote_eval_openvino_testing(template, root, ote_dir, args, threshold=0.2) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_demo(self, template): ote_demo_testing(template, root, ote_dir, args) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_demo_openvino(self, template): ote_demo_openvino_testing(template, root, ote_dir, args) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_deploy_openvino(self, template): ote_deploy_openvino_testing(template, root, ote_dir, args) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_eval_deployment(self, template): ote_eval_deployment_testing(template, root, ote_dir, args, threshold=0.0) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_demo_deployment(self, template): ote_demo_deployment_testing(template, root, ote_dir, args) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_hpo(self, template): ote_hpo_testing(template, root, ote_dir, args) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_optimize(self, template): if template.entrypoints.nncf is None: @@ -143,6 +151,7 @@ def test_nncf_optimize(self, template): nncf_optimize_testing(template, root, ote_dir, args) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_export(self, template): if template.entrypoints.nncf is None: @@ -151,6 +160,7 @@ def test_nncf_export(self, template): nncf_export_testing(template, root) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_eval(self, template): if template.entrypoints.nncf is None: @@ -159,6 +169,7 @@ def test_nncf_eval(self, template): nncf_eval_testing(template, root, ote_dir, args, threshold=0.001) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_eval_openvino(self, template): if template.entrypoints.nncf is None: @@ -167,11 +178,13 @@ def test_nncf_eval_openvino(self, template): nncf_eval_openvino_testing(template, root, ote_dir, args) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_pot_optimize(self, template): pot_optimize_testing(template, root, ote_dir, args) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_pot_eval(self, template): pot_eval_testing(template, root, ote_dir, args) diff --git a/external/model-preparation-algorithm/tests/ote_cli/test_hierarchical_cls_il.py b/external/model-preparation-algorithm/tests/ote_cli/test_hierarchical_cls_il.py deleted file mode 100644 index e785c2e9fbe..00000000000 --- a/external/model-preparation-algorithm/tests/ote_cli/test_hierarchical_cls_il.py +++ /dev/null @@ -1,162 +0,0 @@ -"""Tests for MPA Class-Incremental Learning for image classification with OTE CLI""" -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import os -import pytest - -from ote_sdk.test_suite.e2e_test_system import e2e_pytest_component - -from ote_cli.registry import Registry -from ote_cli.utils.tests import ( - create_venv, - get_some_vars, - ote_demo_deployment_testing, - ote_demo_testing, - ote_demo_openvino_testing, - ote_deploy_openvino_testing, - ote_eval_deployment_testing, - ote_eval_openvino_testing, - ote_eval_testing, - ote_hpo_testing, - ote_train_testing, - ote_export_testing, - pot_optimize_testing, - pot_eval_testing, - nncf_optimize_testing, - nncf_export_testing, - nncf_eval_testing, - nncf_eval_openvino_testing, - xfail_templates, -) - -# TODO: (Jihwan) Enable C-IL test without image loading via ote-cli. -args = { - '--train-ann-file': 'data/car_tree_bug/annotations/hierarchical_default.json', - '--train-data-roots': 'data/car_tree_bug/images', - '--val-ann-file': 'data/car_tree_bug/annotations/hierarchical_default.json', - '--val-data-roots': 'data/car_tree_bug/images', - '--test-ann-files': 'data/car_tree_bug/annotations/hierarchical_default.json', - '--test-data-roots': 'data/car_tree_bug/images', - '--input': 'data/car_tree_bug/images', - 'train_params': [ - 'params', - '--learning_parameters.num_iters', - '2', - '--learning_parameters.batch_size', - '2', - ] -} - -root = '/tmp/ote_cli_hierarchical/' -ote_dir = os.getcwd() - -templates = Registry('external/model-preparation-algorithm').filter(task_type='CLASSIFICATION').templates -templates_ids = [template.model_template_id for template in templates] - - -class TestToolsClsClsIncr: - @e2e_pytest_component - def test_create_venv(self): - work_dir, _, algo_backend_dir = get_some_vars(templates[0], root) - create_venv(algo_backend_dir, work_dir) - - @e2e_pytest_component - @pytest.mark.parametrize("template", templates, ids=templates_ids) - def test_ote_train(self, template): - ote_train_testing(template, root, ote_dir, args) - _, template_work_dir, _ = get_some_vars(template, root) - args1 = args.copy() - args1['--load-weights'] = f'{template_work_dir}/trained_{template.model_template_id}/weights.pth' - ote_train_testing(template, root, ote_dir, args1) - - @e2e_pytest_component - @pytest.mark.parametrize("template", templates, ids=templates_ids) - def test_ote_export(self, template): - ote_export_testing(template, root) - - @e2e_pytest_component - @pytest.mark.parametrize("template", templates, ids=templates_ids) - def test_ote_eval(self, template): - ote_eval_testing(template, root, ote_dir, args) - - @e2e_pytest_component - @pytest.mark.parametrize("template", templates, ids=templates_ids) - def test_ote_eval_openvino(self, template): - ote_eval_openvino_testing(template, root, ote_dir, args, threshold=0.02) - - @e2e_pytest_component - @pytest.mark.parametrize("template", templates, ids=templates_ids) - def test_ote_demo(self, template): - pytest.skip("Demo for hierarchical classification is not supported now.") - ote_demo_testing(template, root, ote_dir, args) - - @e2e_pytest_component - @pytest.mark.parametrize("template", templates, ids=templates_ids) - def test_ote_demo_openvino(self, template): - pytest.skip("Demo for hierarchical classification is not supported now.") - ote_demo_openvino_testing(template, root, ote_dir, args) - - @e2e_pytest_component - @pytest.mark.parametrize("template", templates, ids=templates_ids) - def test_ote_deploy_openvino(self, template): - ote_deploy_openvino_testing(template, root, ote_dir, args) - - @e2e_pytest_component - @pytest.mark.parametrize("template", templates, ids=templates_ids) - def test_ote_eval_deployment(self, template): - ote_eval_deployment_testing(template, root, ote_dir, args, threshold=0.0) - - @e2e_pytest_component - @pytest.mark.parametrize("template", templates, ids=templates_ids) - def test_ote_demo_deployment(self, template): - pytest.skip("Demo for hierarchical classification is not supported now.") - ote_demo_deployment_testing(template, root, ote_dir, args) - - @e2e_pytest_component - @pytest.mark.parametrize("template", templates, ids=templates_ids) - def test_ote_hpo(self, template): - ote_hpo_testing(template, root, ote_dir, args) - - @e2e_pytest_component - @pytest.mark.parametrize("template", templates, ids=templates_ids) - def test_nncf_optimize(self, template): - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - - nncf_optimize_testing(template, root, ote_dir, args) - - @e2e_pytest_component - @pytest.mark.parametrize("template", templates, ids=templates_ids) - def test_nncf_export(self, template): - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - - nncf_export_testing(template, root) - - @e2e_pytest_component - @pytest.mark.parametrize("template", templates, ids=templates_ids) - def test_nncf_eval(self, template): - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - - nncf_eval_testing(template, root, ote_dir, args, threshold=0.001) - - @e2e_pytest_component - @pytest.mark.parametrize("template", templates, ids=templates_ids) - def test_nncf_eval_openvino(self, template): - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - - nncf_eval_openvino_testing(template, root, ote_dir, args) - - @e2e_pytest_component - @pytest.mark.parametrize("template", templates, ids=templates_ids) - def test_pot_optimize(self, template): - pot_optimize_testing(template, root, ote_dir, args) - - @e2e_pytest_component - @pytest.mark.parametrize("template", templates, ids=templates_ids) - def test_pot_eval(self, template): - pot_eval_testing(template, root, ote_dir, args) diff --git a/external/model-preparation-algorithm/tests/ote_cli/test_ins_seg_cls_il.py b/external/model-preparation-algorithm/tests/ote_cli/test_instance_segmentation.py similarity index 58% rename from external/model-preparation-algorithm/tests/ote_cli/test_ins_seg_cls_il.py rename to external/model-preparation-algorithm/tests/ote_cli/test_instance_segmentation.py index d02736ba17a..c30a3d3d559 100644 --- a/external/model-preparation-algorithm/tests/ote_cli/test_ins_seg_cls_il.py +++ b/external/model-preparation-algorithm/tests/ote_cli/test_instance_segmentation.py @@ -7,6 +7,7 @@ import pytest from ote_sdk.test_suite.e2e_test_system import e2e_pytest_component +from ote_sdk.entities.model_template import parse_model_template from ote_cli.registry import Registry from ote_cli.utils.tests import ( @@ -26,110 +27,118 @@ nncf_eval_testing, nncf_eval_openvino_testing, pot_optimize_testing, - pot_eval_testing + pot_eval_testing, ) # Pre-train w/ 'car & tree' class args0 = { - '--train-ann-file': 'data/car_tree_bug/annotations/instances_car_tree.json', - '--train-data-roots': 'data/car_tree_bug/images', - '--val-ann-file': 'data/car_tree_bug/annotations/instances_car_tree.json', - '--val-data-roots': 'data/car_tree_bug/images', - '--test-ann-files': 'data/car_tree_bug/annotations/instances_car_tree.json', - '--test-data-roots': 'data/car_tree_bug/images', - '--input': 'data/car_tree_bug/images', - 'train_params': [ - 'params', - '--learning_parameters.num_iters', - '5', - '--learning_parameters.batch_size', - '2' - ] + "--train-ann-file": "data/car_tree_bug/annotations/instances_car_tree.json", + "--train-data-roots": "data/car_tree_bug/images", + "--val-ann-file": "data/car_tree_bug/annotations/instances_car_tree.json", + "--val-data-roots": "data/car_tree_bug/images", + "--test-ann-files": "data/car_tree_bug/annotations/instances_car_tree.json", + "--test-data-roots": "data/car_tree_bug/images", + "--input": "data/car_tree_bug/images", + "train_params": ["params", "--learning_parameters.num_iters", "4", "--learning_parameters.batch_size", "4"], } # Class-Incremental learning w/ 'car', 'tree', 'bug' classes args = { - '--train-ann-file': 'data/car_tree_bug/annotations/instances_default.json', - '--train-data-roots': 'data/car_tree_bug/images', - '--val-ann-file': 'data/car_tree_bug/annotations/instances_default.json', - '--val-data-roots': 'data/car_tree_bug/images', - '--test-ann-files': 'data/car_tree_bug/annotations/instances_default.json', - '--test-data-roots': 'data/car_tree_bug/images', - '--input': 'data/car_tree_bug/images', - 'train_params': [ - 'params', - '--learning_parameters.num_iters', - '2', - '--learning_parameters.batch_size', - '2' - ] + "--train-ann-file": "data/car_tree_bug/annotations/instances_default.json", + "--train-data-roots": "data/car_tree_bug/images", + "--val-ann-file": "data/car_tree_bug/annotations/instances_default.json", + "--val-data-roots": "data/car_tree_bug/images", + "--test-ann-files": "data/car_tree_bug/annotations/instances_default.json", + "--test-data-roots": "data/car_tree_bug/images", + "--input": "data/car_tree_bug/images", + "train_params": ["params", "--learning_parameters.num_iters", "4", "--learning_parameters.batch_size", "4"], } -root = '/tmp/ote_cli/' +root = "/tmp/ote_cli/" ote_dir = os.getcwd() -templates = Registry( - 'external/model-preparation-algorithm', experimental=True - ).filter(task_type='INSTANCE_SEGMENTATION').templates -templates_ids = [template.model_template_id for template in templates] - - -class TestToolsInstanceSegmentationClsIncr: +TT_STABILITY_TESTS = os.environ.get("TT_STABILITY_TESTS", False) +if TT_STABILITY_TESTS: + default_template = parse_model_template( + os.path.join( + "external/model-preparation-algorithm/configs", + "instance-segmentation", + "resnet50_maskrcnn", + "template.yaml", + ) + ) + templates = [default_template] * 100 + templates_ids = [template.model_template_id + f"-{i+1}" for i, template in enumerate(templates)] +else: + templates = Registry("external/model-preparation-algorithm").filter(task_type="INSTANCE_SEGMENTATION").templates + templates_ids = [template.model_template_id for template in templates] + + +class TestToolsMPAInstanceSegmentation: @e2e_pytest_component def test_create_venv(self): work_dir, _, algo_backend_dir = get_some_vars(templates[0], root) create_venv(algo_backend_dir, work_dir) @e2e_pytest_component - @pytest.mark.parametrize('template', templates, ids=templates_ids) + @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_train(self, template): ote_train_testing(template, root, ote_dir, args0) _, template_work_dir, _ = get_some_vars(template, root) args1 = args.copy() - args1['--load-weights'] = f'{template_work_dir}/trained_{template.model_template_id}/weights.pth' + args1["--load-weights"] = f"{template_work_dir}/trained_{template.model_template_id}/weights.pth" ote_train_testing(template, root, ote_dir, args1) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_export(self, template): ote_export_testing(template, root) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_eval(self, template): ote_eval_testing(template, root, ote_dir, args) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_eval_openvino(self, template): ote_eval_openvino_testing(template, root, ote_dir, args, threshold=0.2) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_demo(self, template): ote_demo_testing(template, root, ote_dir, args) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_demo_openvino(self, template): ote_demo_openvino_testing(template, root, ote_dir, args) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_deploy_openvino(self, template): ote_deploy_openvino_testing(template, root, ote_dir, args) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_eval_deployment(self, template): ote_eval_deployment_testing(template, root, ote_dir, args, threshold=0.0) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_demo_deployment(self, template): ote_demo_deployment_testing(template, root, ote_dir, args) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_optimize(self, template): if template.entrypoints.nncf is None: @@ -138,6 +147,7 @@ def test_nncf_optimize(self, template): nncf_optimize_testing(template, root, ote_dir, args) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_export(self, template): if template.entrypoints.nncf is None: @@ -146,6 +156,7 @@ def test_nncf_export(self, template): nncf_export_testing(template, root) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_eval(self, template): if template.entrypoints.nncf is None: @@ -154,6 +165,7 @@ def test_nncf_eval(self, template): nncf_eval_testing(template, root, ote_dir, args, threshold=0.001) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_eval_openvino(self, template): if template.entrypoints.nncf is None: @@ -162,11 +174,13 @@ def test_nncf_eval_openvino(self, template): nncf_eval_openvino_testing(template, root, ote_dir, args) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_pot_optimize(self, template): pot_optimize_testing(template, root, ote_dir, args) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_pot_eval(self, template): pot_eval_testing(template, root, ote_dir, args) diff --git a/external/model-preparation-algorithm/tests/ote_cli/test_multilabel_cls_il.py b/external/model-preparation-algorithm/tests/ote_cli/test_multilabel_cls_il.py deleted file mode 100644 index 919c95c3697..00000000000 --- a/external/model-preparation-algorithm/tests/ote_cli/test_multilabel_cls_il.py +++ /dev/null @@ -1,180 +0,0 @@ -"""Tests for MPA Class-Incremental Learning for image classification with OTE CLI""" -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import os -import pytest - -from ote_sdk.test_suite.e2e_test_system import e2e_pytest_component - -from ote_cli.registry import Registry -from ote_cli.utils.tests import ( - create_venv, - get_some_vars, - ote_demo_deployment_testing, - ote_demo_testing, - ote_demo_openvino_testing, - ote_deploy_openvino_testing, - ote_eval_deployment_testing, - ote_eval_openvino_testing, - ote_eval_testing, - ote_hpo_testing, - ote_train_testing, - ote_export_testing, - pot_optimize_testing, - pot_eval_testing, - nncf_optimize_testing, - nncf_export_testing, - nncf_eval_testing, - nncf_eval_openvino_testing, - xfail_templates, -) - -# Pre-train w/ 'car', 'tree' classes -args0 = { - '--train-ann-file': 'data/car_tree_bug/annotations/multilabel_car_tree.json', - '--train-data-roots': 'data/car_tree_bug/images', - '--val-ann-file': 'data/car_tree_bug/annotations/multilabel_car_tree.json', - '--val-data-roots': 'data/car_tree_bug/images', - '--test-ann-files': 'data/car_tree_bug/annotations/multilabel_car_tree.json', - '--test-data-roots': 'data/car_tree_bug/images', - '--input': 'data/car_tree_bug/images', - 'train_params': [ - 'params', - '--learning_parameters.num_iters', - '2', - '--learning_parameters.batch_size', - '4', - ] -} - -# Class-Incremental learning w/ 'car', 'tree', 'bug' classes -args = { - '--train-ann-file': 'data/car_tree_bug/annotations/multilabel_default.json', - '--train-data-roots': 'data/car_tree_bug/images', - '--val-ann-file': 'data/car_tree_bug/annotations/multilabel_default.json', - '--val-data-roots': 'data/car_tree_bug/images', - '--test-ann-files': 'data/car_tree_bug/annotations/multilabel_default.json', - '--test-data-roots': 'data/car_tree_bug/images', - '--input': 'data/car_tree_bug/images', - 'train_params': [ - 'params', - '--learning_parameters.num_iters', - '2', - '--learning_parameters.batch_size', - '4', - ] -} - -root = '/tmp/ote_cli_multilabel/' -ote_dir = os.getcwd() - -templates = Registry('external/model-preparation-algorithm').filter(task_type='CLASSIFICATION').templates -templates_ids = [template.model_template_id for template in templates] - - -class TestToolsClsClsIncr: - @e2e_pytest_component - def test_create_venv(self): - work_dir, _, algo_backend_dir = get_some_vars(templates[0], root) - create_venv(algo_backend_dir, work_dir) - - @e2e_pytest_component - @pytest.mark.parametrize("template", templates, ids=templates_ids) - def test_ote_train(self, template): - ote_train_testing(template, root, ote_dir, args0) - _, template_work_dir, _ = get_some_vars(template, root) - args1 = args.copy() - args1['--load-weights'] = f'{template_work_dir}/trained_{template.model_template_id}/weights.pth' - ote_train_testing(template, root, ote_dir, args1) - - @e2e_pytest_component - @pytest.mark.parametrize("template", templates, ids=templates_ids) - def test_ote_export(self, template): - ote_export_testing(template, root) - - @e2e_pytest_component - @pytest.mark.parametrize("template", templates, ids=templates_ids) - def test_ote_eval(self, template): - ote_eval_testing(template, root, ote_dir, args) - - @e2e_pytest_component - @pytest.mark.parametrize("template", templates, ids=templates_ids) - def test_ote_eval_openvino(self, template): - ote_eval_openvino_testing(template, root, ote_dir, args, threshold=0.0) - - @e2e_pytest_component - @pytest.mark.parametrize("template", templates, ids=templates_ids) - def test_ote_demo(self, template): - pytest.skip("Demo for multi-label classification is not supported now.") - ote_demo_testing(template, root, ote_dir, args) - - @e2e_pytest_component - @pytest.mark.parametrize("template", templates, ids=templates_ids) - def test_ote_demo_openvino(self, template): - pytest.skip("Demo for multi-label classification is not supported now.") - ote_demo_openvino_testing(template, root, ote_dir, args) - - @e2e_pytest_component - @pytest.mark.parametrize("template", templates, ids=templates_ids) - def test_ote_deploy_openvino(self, template): - ote_deploy_openvino_testing(template, root, ote_dir, args) - - @e2e_pytest_component - @pytest.mark.parametrize("template", templates, ids=templates_ids) - def test_ote_eval_deployment(self, template): - ote_eval_deployment_testing(template, root, ote_dir, args, threshold=0.0) - - @e2e_pytest_component - @pytest.mark.parametrize("template", templates, ids=templates_ids) - def test_ote_demo_deployment(self, template): - pytest.xfail("Demo for multi-label classification is not supported now.") - ote_demo_deployment_testing(template, root, ote_dir, args) - - @e2e_pytest_component - @pytest.mark.parametrize("template", templates, ids=templates_ids) - def test_ote_hpo(self, template): - ote_hpo_testing(template, root, ote_dir, args) - - @e2e_pytest_component - @pytest.mark.parametrize("template", templates, ids=templates_ids) - def test_nncf_optimize(self, template): - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - - nncf_optimize_testing(template, root, ote_dir, args) - - @e2e_pytest_component - @pytest.mark.parametrize("template", templates, ids=templates_ids) - def test_nncf_export(self, template): - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - - nncf_export_testing(template, root) - - @e2e_pytest_component - @pytest.mark.parametrize("template", templates, ids=templates_ids) - def test_nncf_eval(self, template): - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - - nncf_eval_testing(template, root, ote_dir, args, threshold=0.001) - - @e2e_pytest_component - @pytest.mark.parametrize("template", templates, ids=templates_ids) - def test_nncf_eval_openvino(self, template): - if template.entrypoints.nncf is None: - pytest.skip("nncf entrypoint is none") - - nncf_eval_openvino_testing(template, root, ote_dir, args) - - @e2e_pytest_component - @pytest.mark.parametrize("template", templates, ids=templates_ids) - def test_pot_optimize(self, template): - pot_optimize_testing(template, root, ote_dir, args) - - @e2e_pytest_component - @pytest.mark.parametrize("template", templates, ids=templates_ids) - def test_pot_eval(self, template): - pot_eval_testing(template, root, ote_dir, args) diff --git a/external/model-preparation-algorithm/tests/ote_cli/test_seg_cls_il.py b/external/model-preparation-algorithm/tests/ote_cli/test_segmentation.py similarity index 59% rename from external/model-preparation-algorithm/tests/ote_cli/test_seg_cls_il.py rename to external/model-preparation-algorithm/tests/ote_cli/test_segmentation.py index b29e0cc58c8..cdbf6be858c 100644 --- a/external/model-preparation-algorithm/tests/ote_cli/test_seg_cls_il.py +++ b/external/model-preparation-algorithm/tests/ote_cli/test_segmentation.py @@ -8,6 +8,7 @@ import pytest from ote_sdk.test_suite.e2e_test_system import e2e_pytest_component +from ote_sdk.entities.model_template import parse_model_template from ote_cli.registry import Registry @@ -34,40 +35,50 @@ args = { - '--train-ann-file': 'data/segmentation/custom/annotations/training', - '--train-data-roots': 'data/segmentation/custom/images/training', - '--val-ann-file': 'data/segmentation/custom/annotations/training', - '--val-data-roots': 'data/segmentation/custom/images/training', - '--test-ann-files': 'data/segmentation/custom/annotations/training', - '--test-data-roots': 'data/segmentation/custom/images/training', - '--input': 'data/segmentation/custom/images/training', - 'train_params': [ - 'params', - '--learning_parameters.learning_rate_fixed_iters', - '0', - '--learning_parameters.learning_rate_warmup_iters', - '25', - '--learning_parameters.num_iters', - '2', - '--learning_parameters.batch_size', - '2' - ] + "--train-ann-file": "data/segmentation/custom/annotations/training", + "--train-data-roots": "data/segmentation/custom/images/training", + "--val-ann-file": "data/segmentation/custom/annotations/training", + "--val-data-roots": "data/segmentation/custom/images/training", + "--test-ann-files": "data/segmentation/custom/annotations/training", + "--test-data-roots": "data/segmentation/custom/images/training", + "--input": "data/segmentation/custom/images/training", + "train_params": [ + "params", + "--learning_parameters.learning_rate_fixed_iters", + "0", + "--learning_parameters.learning_rate_warmup_iters", + "25", + "--learning_parameters.num_iters", + "2", + "--learning_parameters.batch_size", + "4", + ], } -root = '/tmp/ote_cli_seg/' +root = "/tmp/ote_cli/" ote_dir = os.getcwd() -templates = Registry('external/model-preparation-algorithm', experimental=True).filter(task_type='SEGMENTATION').templates -templates_ids = [template.model_template_id for template in templates] +TT_STABILITY_TESTS = os.environ.get("TT_STABILITY_TESTS", False) +if TT_STABILITY_TESTS: + default_template = parse_model_template( + os.path.join( + "external/model-preparation-algorithm/configs", "segmentation", "ocr-lite-hrnet-18-mod2", "template.yaml" + ) + ) + templates = [default_template] * 100 + templates_ids = [template.model_template_id + f"-{i+1}" for i, template in enumerate(templates)] +else: + templates = Registry("external/model-preparation-algorithm").filter(task_type="SEGMENTATION").templates + templates_ids = [template.model_template_id for template in templates] -class TestToolsSegClsIncr: +class TestToolsMPASegmentation: @e2e_pytest_component def test_create_venv(self): work_dir, _, algo_backend_dir = get_some_vars(templates[0], root) create_venv(algo_backend_dir, work_dir) - print(f'algo_backend_dir: {algo_backend_dir}') - print(f'work_dir: {work_dir}') + print(f"algo_backend_dir: {algo_backend_dir}") + print(f"work_dir: {work_dir}") @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) @@ -75,55 +86,65 @@ def test_ote_train(self, template): ote_train_testing(template, root, ote_dir, args) _, template_work_dir, _ = get_some_vars(template, root) args1 = args.copy() - args1['--load-weights'] = f'{template_work_dir}/trained_{template.model_template_id}/weights.pth' + args1["--load-weights"] = f"{template_work_dir}/trained_{template.model_template_id}/weights.pth" ote_train_testing(template, root, ote_dir, args1) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_export(self, template): ote_export_testing(template, root) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_eval(self, template): ote_eval_testing(template, root, ote_dir, args) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_eval_openvino(self, template): ote_eval_openvino_testing(template, root, ote_dir, args, threshold=0.1) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_demo(self, template): ote_demo_testing(template, root, ote_dir, args) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_demo_openvino(self, template): ote_demo_openvino_testing(template, root, ote_dir, args) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_deploy_openvino(self, template): ote_deploy_openvino_testing(template, root, ote_dir, args) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_eval_deployment(self, template): ote_eval_deployment_testing(template, root, ote_dir, args, threshold=0.0) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_demo_deployment(self, template): ote_demo_deployment_testing(template, root, ote_dir, args) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_hpo(self, template): ote_hpo_testing(template, root, ote_dir, args) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_optimize(self, template): if template.entrypoints.nncf is None: @@ -132,6 +153,7 @@ def test_nncf_optimize(self, template): nncf_optimize_testing(template, root, ote_dir, args) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_export(self, template): if template.entrypoints.nncf is None: @@ -140,6 +162,7 @@ def test_nncf_export(self, template): nncf_export_testing(template, root) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_eval(self, template): if template.entrypoints.nncf is None: @@ -148,6 +171,7 @@ def test_nncf_eval(self, template): nncf_eval_testing(template, root, ote_dir, args, threshold=0.001) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_eval_openvino(self, template): if template.entrypoints.nncf is None: @@ -156,15 +180,17 @@ def test_nncf_eval_openvino(self, template): nncf_eval_openvino_testing(template, root, ote_dir, args) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_pot_optimize(self, template): - if template.model_template_id.startswith('ClassIncremental_Semantic_Segmentation_Lite-HRNet-'): - pytest.skip('CVS-82482') + if template.model_template_id.startswith("ClassIncremental_Semantic_Segmentation_Lite-HRNet-"): + pytest.skip("CVS-82482") pot_optimize_testing(template, root, ote_dir, args) @e2e_pytest_component + @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_pot_eval(self, template): - if template.model_template_id.startswith('ClassIncremental_Semantic_Segmentation_Lite-HRNet-'): - pytest.skip('CVS-82482') + if template.model_template_id.startswith("ClassIncremental_Semantic_Segmentation_Lite-HRNet-"): + pytest.skip("CVS-82482") pot_eval_testing(template, root, ote_dir, args) diff --git a/external/model-preparation-algorithm/tests/test_ote_api.py b/external/model-preparation-algorithm/tests/test_ote_api.py deleted file mode 100644 index 7b8663aebd7..00000000000 --- a/external/model-preparation-algorithm/tests/test_ote_api.py +++ /dev/null @@ -1,858 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import glob -import os.path as osp -import random -import time -import warnings -from concurrent.futures import ThreadPoolExecutor -from typing import Optional -import pytest -import cv2 as cv -import numpy as np -from bson import ObjectId -from detection_tasks.apis.detection.ote_utils import generate_label_schema -from mpa_tasks.apis import BaseTask -from mpa_tasks.apis.classification import ( - ClassificationInferenceTask, - ClassificationTrainTask, -) -from mpa_tasks.apis.detection import DetectionInferenceTask, DetectionTrainTask -from mpa_tasks.apis.segmentation import SegmentationInferenceTask, SegmentationTrainTask -from ote_sdk.configuration.helper import create -from ote_sdk.entities.annotation import ( - Annotation, - AnnotationSceneEntity, - AnnotationSceneKind, -) -from ote_sdk.entities.color import Color -from ote_sdk.entities.dataset_item import DatasetItemEntity -from ote_sdk.entities.datasets import DatasetEntity -from ote_sdk.entities.id import ID -from ote_sdk.entities.image import Image -from ote_sdk.entities.inference_parameters import InferenceParameters -from ote_sdk.entities.label import Domain, LabelEntity -from ote_sdk.entities.label_schema import LabelGroup, LabelGroupType, LabelSchemaEntity -from ote_sdk.entities.metrics import Performance -from ote_sdk.entities.model import ModelEntity -from ote_sdk.entities.model_template import ( - TaskType, - parse_model_template, - task_type_to_label_domain, -) -from ote_sdk.entities.resultset import ResultSetEntity -from ote_sdk.entities.scored_label import ScoredLabel -from ote_sdk.entities.shapes.ellipse import Ellipse -from ote_sdk.entities.shapes.polygon import Point, Polygon -from ote_sdk.entities.shapes.rectangle import Rectangle -from ote_sdk.entities.subset import Subset -from ote_sdk.entities.task_environment import TaskEnvironment -from ote_sdk.entities.train_parameters import TrainParameters -from ote_sdk.test_suite.e2e_test_system import e2e_pytest_api -from ote_sdk.tests.test_helpers import generate_random_annotated_image -from ote_sdk.usecases.tasks.interfaces.export_interface import ExportType -from ote_sdk.utils.shape_factory import ShapeFactory - -DEFAULT_CLS_TEMPLATE_DIR = osp.join( - "configs", "classification", "efficientnet_b0_cls_incr" -) -DEFAULT_DET_TEMPLATE_DIR = osp.join("configs", "detection", "mobilenetv2_atss_cls_incr") -DEFAULT_SEG_TEMPLATE_DIR = osp.join("configs", "segmentation", "ocr-lite-hrnet-18-mod2") - - -def eval(task: BaseTask, model: ModelEntity, dataset: DatasetEntity) -> Performance: - start_time = time.time() - result_dataset = task.infer(dataset.with_empty_annotations()) - end_time = time.time() - print(f"{len(dataset)} analysed in {end_time - start_time} seconds") - result_set = ResultSetEntity( - model=model, ground_truth_dataset=dataset, prediction_dataset=result_dataset - ) - task.evaluate(result_set) - assert result_set.performance is not None - return result_set.performance - - -class TestMPAClsAPI: - @e2e_pytest_api - def test_reading_classification_cls_incr_model_template(self): - classification_template = [ - "efficientnet_b0_cls_incr", - "efficientnet_v2_s_cls_incr", - "mobilenet_v3_large_1_cls_incr", - ] - for model_template in classification_template: - parse_model_template( - osp.join("configs", "classification", model_template, "template.yaml") - ) - - @staticmethod - def generate_label_schema(not_empty_labels, multilabel=False, hierarchical=False): - assert len(not_empty_labels) > 1 - - label_schema = LabelSchemaEntity() - if multilabel: - emptylabel = LabelEntity( - name="Empty label", is_empty=True, domain=Domain.CLASSIFICATION - ) - empty_group = LabelGroup( - name="empty", labels=[emptylabel], group_type=LabelGroupType.EMPTY_LABEL - ) - for label in not_empty_labels: - label_schema.add_group( - LabelGroup( - name=label.name, - labels=[label], - group_type=LabelGroupType.EXCLUSIVE, - ) - ) - label_schema.add_group(empty_group) - elif hierarchical: - single_label_classes = ["b", "g", "r"] - multi_label_classes = ["w", "p"] - emptylabel = LabelEntity( - name="Empty label", is_empty=True, domain=Domain.CLASSIFICATION - ) - empty_group = LabelGroup( - name="empty", labels=[emptylabel], group_type=LabelGroupType.EMPTY_LABEL - ) - single_labels = [] - for label in not_empty_labels: - if label.name in multi_label_classes: - label_schema.add_group( - LabelGroup( - name=label.name, - labels=[label], - group_type=LabelGroupType.EXCLUSIVE, - ) - ) - if empty_group not in label_schema.get_groups(include_empty=True): - label_schema.add_group(empty_group) - elif label.name in single_label_classes: - single_labels.append(label) - if single_labels: - single_label_group = LabelGroup( - name="labels", - labels=single_labels, - group_type=LabelGroupType.EXCLUSIVE, - ) - label_schema.add_group(single_label_group) - else: - main_group = LabelGroup( - name="labels", - labels=not_empty_labels, - group_type=LabelGroupType.EXCLUSIVE, - ) - label_schema.add_group(main_group) - return label_schema - - @staticmethod - def setup_configurable_parameters(template_dir, num_iters=10): - model_template = parse_model_template(osp.join(template_dir, "template.yaml")) - hyper_parameters = create(model_template.hyper_parameters.data) - hyper_parameters.learning_parameters.num_iters = num_iters - return hyper_parameters, model_template - - def init_environment(self, params, model_template, multilabel, hierarchical, number_of_images=10): - resolution = (224, 224) - if hierarchical: - colors = [(0, 255, 0), (0, 0, 255), (255, 0, 0), (0, 0, 0), (230, 230, 250)] - cls_names = ["b", "g", "r", "w", "p"] - texts = ["Blue", "Green", "Red", "White", "Purple"] - else: - colors = [(0, 255, 0), (0, 0, 255)] - cls_names = ["b", "g"] - texts = ["Blue", "Green"] - env_labels = [ - LabelEntity( - name=name, domain=Domain.CLASSIFICATION, is_empty=False, id=ID(i) - ) - for i, name in enumerate(cls_names) - ] - - items = [] - - for _ in range(0, number_of_images): - for j, lbl in enumerate(env_labels): - class_img = np.zeros((*resolution, 3), dtype=np.uint8) - class_img[:] = colors[j] - class_img = cv.putText( - class_img, - texts[j], - (50, 50), - cv.FONT_HERSHEY_SIMPLEX, - 0.8 + j * 0.2, - colors[j - 1], - 2, - cv.LINE_AA, - ) - - image = Image(data=class_img) - labels = [ScoredLabel(label=lbl, probability=1.0)] - shapes = [Annotation(Rectangle.generate_full_box(), labels)] - annotation_scene = AnnotationSceneEntity( - kind=AnnotationSceneKind.ANNOTATION, annotations=shapes - ) - items.append( - DatasetItemEntity(media=image, annotation_scene=annotation_scene) - ) - - rng = random.Random() - rng.seed(100) - rng.shuffle(items) - for i, _ in enumerate(items): - subset_region = i / number_of_images - if subset_region >= 0.9: - subset = Subset.TESTING - elif subset_region >= 0.6: - subset = Subset.VALIDATION - else: - subset = Subset.TRAINING - items[i].subset = subset - - dataset = DatasetEntity(items) - labels_schema = self.generate_label_schema( - dataset.get_labels(), multilabel=multilabel, hierarchical=hierarchical - ) - environment = TaskEnvironment( - model=None, - hyper_parameters=params, - label_schema=labels_schema, - model_template=model_template, - ) - return environment, dataset - - @e2e_pytest_api - @pytest.mark.parametrize( - "multilabel,hierarchical", [(False, False), (True, False), (False, True)], ids=["multiclass", "multilabel", "hierarchical"] - ) - def test_training_progress_tracking(self, multilabel, hierarchical): - hyper_parameters, model_template = self.setup_configurable_parameters( - DEFAULT_CLS_TEMPLATE_DIR, num_iters=5 - ) - task_environment, dataset = self.init_environment( - hyper_parameters, model_template, multilabel, hierarchical, 20 - ) - task = ClassificationTrainTask(task_environment=task_environment) - print("Task initialized, model training starts.") - - training_progress_curve = [] - - def progress_callback(progress: float, score: Optional[float] = None): - training_progress_curve.append(progress) - - train_parameters = TrainParameters - train_parameters.update_progress = progress_callback - output_model = ModelEntity(dataset, task_environment.get_model_configuration(),) - task.train(dataset, output_model, train_parameters) - - assert len(training_progress_curve) > 0 - assert np.all(training_progress_curve[1:] >= training_progress_curve[:-1]) - - @e2e_pytest_api - @pytest.mark.parametrize( - "multilabel,hierarchical", [(False, False), (True, False), (False, True)], ids=["multiclass", "multilabel", "hierarchical"] - ) - def test_inference_progress_tracking(self, multilabel, hierarchical): - hyper_parameters, model_template = self.setup_configurable_parameters( - DEFAULT_CLS_TEMPLATE_DIR, num_iters=5 - ) - task_environment, dataset = self.init_environment( - hyper_parameters, model_template, multilabel, hierarchical, 20 - ) - task = ClassificationInferenceTask(task_environment=task_environment) - print("Task initialized, model inference starts.") - - inference_progress_curve = [] - - def progress_callback(progress: int): - inference_progress_curve.append(progress) - - inference_parameters = InferenceParameters - inference_parameters.update_progress = progress_callback - task.infer(dataset.with_empty_annotations(), inference_parameters) - - assert len(inference_progress_curve) > 0 - assert np.all(inference_progress_curve[1:] >= inference_progress_curve[:-1]) - - @e2e_pytest_api - @pytest.mark.parametrize( - "multilabel,hierarchical", [(False, False), (True, False), (False, True)], ids=["multiclass", "multilabel", "hierarchical"] - ) - def test_inference_task(self, multilabel, hierarchical): - # Prepare pretrained weights - hyper_parameters, model_template = self.setup_configurable_parameters( - DEFAULT_CLS_TEMPLATE_DIR, num_iters=2 - ) - classification_environment, dataset = self.init_environment( - hyper_parameters, model_template, multilabel, hierarchical, 50 - ) - val_dataset = dataset.get_subset(Subset.VALIDATION) - - train_task = ClassificationTrainTask( - task_environment=classification_environment - ) - - training_progress_curve = [] - - def progress_callback(progress: float, score: Optional[float] = None): - training_progress_curve.append(progress) - - train_parameters = TrainParameters - train_parameters.update_progress = progress_callback - trained_model = ModelEntity( - dataset, classification_environment.get_model_configuration(), - ) - train_task.train(dataset, trained_model, train_parameters) - performance_after_train = eval(train_task, trained_model, val_dataset) - - # Create InferenceTask - classification_environment.model = trained_model - inference_task = ClassificationInferenceTask( - task_environment=classification_environment - ) - - performance_after_load = eval(inference_task, trained_model, val_dataset) - - assert performance_after_train == performance_after_load - - # Export - exported_model = ModelEntity( - dataset, - classification_environment.get_model_configuration(), - _id=ObjectId(), - ) - inference_task.export(ExportType.OPENVINO, exported_model) - - -class TestMPADetAPI: - """ - Collection of tests for OTE API and OTE Model Templates - """ - - @e2e_pytest_api - def test_reading_detection_cls_incr_model_template(self): - detection_template = ["mobilenetv2_atss_cls_incr"] - for model_template in detection_template: - parse_model_template( - osp.join("configs", "detection", model_template, "template.yaml") - ) - - def init_environment( - self, params, model_template, number_of_images=500, task_type=TaskType.DETECTION - ): - - labels_names = ("rectangle", "ellipse", "triangle") - labels_schema = generate_label_schema( - labels_names, task_type_to_label_domain(task_type) - ) - labels_list = labels_schema.get_labels(False) - environment = TaskEnvironment( - model=None, - hyper_parameters=params, - label_schema=labels_schema, - model_template=model_template, - ) - - warnings.filterwarnings( - "ignore", message=".* coordinates .* are out of bounds.*" - ) - items = [] - for i in range(0, number_of_images): - image_numpy, annos = generate_random_annotated_image( - image_width=640, - image_height=480, - labels=labels_list, - max_shapes=20, - min_size=50, - max_size=100, - random_seed=None, - ) - # Convert shapes according to task - for anno in annos: - if task_type == TaskType.INSTANCE_SEGMENTATION: - anno.shape = ShapeFactory.shape_as_polygon(anno.shape) - else: - anno.shape = ShapeFactory.shape_as_rectangle(anno.shape) - - image = Image(data=image_numpy) - annotation_scene = AnnotationSceneEntity( - kind=AnnotationSceneKind.ANNOTATION, annotations=annos - ) - items.append( - DatasetItemEntity(media=image, annotation_scene=annotation_scene) - ) - warnings.resetwarnings() - - rng = random.Random() - rng.shuffle(items) - for i, _ in enumerate(items): - subset_region = i / number_of_images - if subset_region >= 0.8: - subset = Subset.TESTING - elif subset_region >= 0.6: - subset = Subset.VALIDATION - else: - subset = Subset.TRAINING - items[i].subset = subset - - dataset = DatasetEntity(items) - return environment, dataset - - @staticmethod - def setup_configurable_parameters(template_dir, num_iters=10): - glb = glob.glob(f"{template_dir}/template*.yaml") - template_path = glb[0] if glb else None - if not template_path: - raise RuntimeError(f"Template YAML not found: {template_dir}") - - model_template = parse_model_template(template_path) - hyper_parameters = create(model_template.hyper_parameters.data) - hyper_parameters.learning_parameters.num_iters = num_iters - hyper_parameters.postprocessing.result_based_confidence_threshold = False - hyper_parameters.postprocessing.confidence_threshold = 0.1 - return hyper_parameters, model_template - - @e2e_pytest_api - def test_cancel_training_detection(self): - """ - Tests starting and cancelling training. - - Flow of the test: - - Creates a randomly annotated project with a small dataset containing 3 classes: - ['rectangle', 'triangle', 'circle']. - - Start training and give cancel training signal after 10 seconds. Assert that training - stops within 35 seconds after that - - Start training and give cancel signal immediately. Assert that training stops within 25 seconds. - - This test should be finished in under one minute on a workstation. - """ - hyper_parameters, model_template = self.setup_configurable_parameters( - DEFAULT_DET_TEMPLATE_DIR, num_iters=500 - ) - detection_environment, dataset = self.init_environment( - hyper_parameters, model_template, 64 - ) - - detection_task = DetectionTrainTask(task_environment=detection_environment) - - executor = ThreadPoolExecutor(max_workers=1, thread_name_prefix="train_thread") - - output_model = ModelEntity( - dataset, detection_environment.get_model_configuration(), - ) - - training_progress_curve = [] - - def progress_callback(progress: float, score: Optional[float] = None): - training_progress_curve.append(progress) - - train_parameters = TrainParameters - train_parameters.update_progress = progress_callback - - # Test stopping after some time - start_time = time.time() - train_future = executor.submit( - detection_task.train, dataset, output_model, train_parameters - ) - # give train_thread some time to initialize the model - while not detection_task._is_training: - time.sleep(10) - detection_task.cancel_training() - - # stopping process has to happen in less than 35 seconds - train_future.result() - assert training_progress_curve[-1] == 100 - assert time.time() - start_time < 100, "Expected to stop within 100 seconds." - - # Test stopping immediately - start_time = time.time() - train_future = executor.submit(detection_task.train, dataset, output_model) - detection_task.cancel_training() - - train_future.result() - assert ( - time.time() - start_time < 25 - ) # stopping process has to happen in less than 25 seconds - - @e2e_pytest_api - def test_training_progress_tracking(self): - hyper_parameters, model_template = self.setup_configurable_parameters( - DEFAULT_DET_TEMPLATE_DIR, num_iters=5 - ) - detection_environment, dataset = self.init_environment( - hyper_parameters, model_template, 50 - ) - - task = DetectionTrainTask(task_environment=detection_environment) - print("Task initialized, model training starts.") - - training_progress_curve = [] - - def progress_callback(progress: float, score: Optional[float] = None): - training_progress_curve.append(progress) - - train_parameters = TrainParameters - train_parameters.update_progress = progress_callback - output_model = ModelEntity( - dataset, detection_environment.get_model_configuration(), - ) - task.train(dataset, output_model, train_parameters) - - assert len(training_progress_curve) > 0 - assert np.all(training_progress_curve[1:] >= training_progress_curve[:-1]) - - @e2e_pytest_api - def test_inference_progress_tracking(self): - hyper_parameters, model_template = self.setup_configurable_parameters( - DEFAULT_DET_TEMPLATE_DIR, num_iters=10 - ) - detection_environment, dataset = self.init_environment( - hyper_parameters, model_template, 50 - ) - - task = DetectionInferenceTask(task_environment=detection_environment) - print("Task initialized, model inference starts.") - inference_progress_curve = [] - - def progress_callback(progress: int): - assert isinstance(progress, int) - inference_progress_curve.append(progress) - - inference_parameters = InferenceParameters - inference_parameters.update_progress = progress_callback - task.infer(dataset.with_empty_annotations(), inference_parameters) - - assert len(inference_progress_curve) > 0 - assert np.all(inference_progress_curve[1:] >= inference_progress_curve[:-1]) - - @e2e_pytest_api - def test_inference_task(self): - # Prepare pretrained weights - hyper_parameters, model_template = self.setup_configurable_parameters( - DEFAULT_DET_TEMPLATE_DIR, num_iters=2 - ) - detection_environment, dataset = self.init_environment( - hyper_parameters, model_template, 50 - ) - val_dataset = dataset.get_subset(Subset.VALIDATION) - - train_task = DetectionTrainTask(task_environment=detection_environment) - - training_progress_curve = [] - - def progress_callback(progress: float, score: Optional[float] = None): - training_progress_curve.append(progress) - - train_parameters = TrainParameters - train_parameters.update_progress = progress_callback - trained_model = ModelEntity( - dataset, detection_environment.get_model_configuration(), - ) - train_task.train(dataset, trained_model, train_parameters) - performance_after_train = eval(train_task, trained_model, val_dataset) - - # Create InferenceTask - detection_environment.model = trained_model - inference_task = DetectionInferenceTask(task_environment=detection_environment) - - performance_after_load = eval(inference_task, trained_model, val_dataset) - - assert performance_after_train == performance_after_load - - # Export - exported_model = ModelEntity( - dataset, detection_environment.get_model_configuration(), _id=ObjectId() - ) - inference_task.export(ExportType.OPENVINO, exported_model) - - -class TestMPASegAPI: - """ - Collection of tests for OTE API and OTE Model Templates - """ - - @e2e_pytest_api - def test_reading_segmentation_cls_incr_model_template(self): - segmentation_template = [ - "ocr-lite-hrnet-18-mod2", - "ocr-lite-hrnet-s-mod2", - "ocr-lite-hrnet-x-mod3", - ] - for model_template in segmentation_template: - parse_model_template( - osp.join("configs", "segmentation", model_template, "template.yaml") - ) - - @staticmethod - def generate_label_schema(label_names): - label_domain = Domain.SEGMENTATION - rgb = [int(i) for i in np.random.randint(0, 256, 3)] - colors = [Color(*rgb) for _ in range(len(label_names))] - not_empty_labels = [ - LabelEntity(name=name, color=colors[i], domain=label_domain, id=i) - for i, name in enumerate(label_names) - ] - empty_label = LabelEntity( - name="Empty label", - color=Color(42, 43, 46), - is_empty=True, - domain=label_domain, - id=len(not_empty_labels), - ) - - label_schema = LabelSchemaEntity() - exclusive_group = LabelGroup( - name="labels", labels=not_empty_labels, group_type=LabelGroupType.EXCLUSIVE - ) - empty_group = LabelGroup( - name="empty", labels=[empty_label], group_type=LabelGroupType.EMPTY_LABEL - ) - label_schema.add_group(exclusive_group) - label_schema.add_group(empty_group) - return label_schema - - def init_environment(self, params, model_template, number_of_images=10): - labels_names = ("rectangle", "ellipse", "triangle") - labels_schema = self.generate_label_schema(labels_names) - labels_list = labels_schema.get_labels(False) - environment = TaskEnvironment( - model=None, - hyper_parameters=params, - label_schema=labels_schema, - model_template=model_template, - ) - - warnings.filterwarnings( - "ignore", message=".* coordinates .* are out of bounds.*" - ) - items = [] - for i in range(0, number_of_images): - image_numpy, shapes = generate_random_annotated_image( - image_width=640, - image_height=480, - labels=labels_list, - max_shapes=20, - min_size=50, - max_size=100, - random_seed=None, - ) - # Convert all shapes to polygons - out_shapes = [] - for shape in shapes: - shape_labels = shape.get_labels(include_empty=True) - - in_shape = shape.shape - if isinstance(in_shape, Rectangle): - points = [ - Point(in_shape.x1, in_shape.y1), - Point(in_shape.x2, in_shape.y1), - Point(in_shape.x2, in_shape.y2), - Point(in_shape.x1, in_shape.y2), - ] - elif isinstance(in_shape, Ellipse): - points = [ - Point(x, y) - for x, y in in_shape.get_evenly_distributed_ellipse_coordinates() - ] - elif isinstance(in_shape, Polygon): - points = in_shape.points - - out_shapes.append( - Annotation(Polygon(points=points), labels=shape_labels) - ) - - image = Image(data=image_numpy) - annotation = AnnotationSceneEntity( - kind=AnnotationSceneKind.ANNOTATION, annotations=out_shapes - ) - items.append(DatasetItemEntity(media=image, annotation_scene=annotation)) - warnings.resetwarnings() - - rng = random.Random() - rng.shuffle(items) - for i, _ in enumerate(items): - subset_region = i / number_of_images - if subset_region >= 0.8: - subset = Subset.TESTING - elif subset_region >= 0.6: - subset = Subset.VALIDATION - else: - subset = Subset.TRAINING - - items[i].subset = subset - - dataset = DatasetEntity(items) - - return environment, dataset - - @staticmethod - def setup_configurable_parameters(template_dir, num_iters=10): - model_template = parse_model_template(osp.join(template_dir, "template.yaml")) - - hyper_parameters = create(model_template.hyper_parameters.data) - hyper_parameters.learning_parameters.learning_rate_fixed_iters = 0 - hyper_parameters.learning_parameters.learning_rate_warmup_iters = 1 - hyper_parameters.learning_parameters.num_iters = num_iters - hyper_parameters.learning_parameters.num_checkpoints = 1 - - return hyper_parameters, model_template - - @e2e_pytest_api - def test_cancel_training_segmentation(self): - """ - Tests starting and cancelling training. - - Flow of the test: - - Creates a randomly annotated project with a small dataset. - - Start training and give cancel training signal after 10 seconds. Assert that training - stops within 35 seconds after that - - Start training and give cancel signal immediately. Assert that training stops within 25 seconds. - - This test should be finished in under one minute on a workstation. - """ - hyper_parameters, model_template = self.setup_configurable_parameters( - DEFAULT_SEG_TEMPLATE_DIR, num_iters=200 - ) - segmentation_environment, dataset = self.init_environment( - hyper_parameters, model_template, 64 - ) - - segmentation_task = SegmentationTrainTask( - task_environment=segmentation_environment - ) - - executor = ThreadPoolExecutor(max_workers=1, thread_name_prefix="train_thread") - - output_model = ModelEntity( - dataset, segmentation_environment.get_model_configuration(), - ) - - training_progress_curve = [] - - def progress_callback(progress: float, score: Optional[float] = None): - training_progress_curve.append(progress) - - train_parameters = TrainParameters - train_parameters.update_progress = progress_callback - - # Test stopping after some time - start_time = time.time() - train_future = executor.submit( - segmentation_task.train, dataset, output_model, train_parameters - ) - # give train_thread some time to initialize the model - while not segmentation_task._is_training: - time.sleep(10) - segmentation_task.cancel_training() - - # stopping process has to happen in less than 35 seconds - train_future.result() - assert training_progress_curve[-1] == 100 - assert time.time() - start_time < 100, "Expected to stop within 100 seconds." - - # Test stopping immediately - start_time = time.time() - train_future = executor.submit(segmentation_task.train, dataset, output_model) - segmentation_task.cancel_training() - - train_future.result() - assert ( - time.time() - start_time < 25 - ) # stopping process has to happen in less than 25 seconds - - @e2e_pytest_api - def test_training_progress_tracking(self): - hyper_parameters, model_template = self.setup_configurable_parameters( - DEFAULT_SEG_TEMPLATE_DIR, num_iters=5 - ) - segmentation_environment, dataset = self.init_environment( - hyper_parameters, model_template, 12 - ) - - task = SegmentationTrainTask(task_environment=segmentation_environment) - print("Task initialized, model training starts.") - - training_progress_curve = [] - - def progress_callback(progress: float, score: Optional[float] = None): - training_progress_curve.append(progress) - - train_parameters = TrainParameters - train_parameters.update_progress = progress_callback - output_model = ModelEntity( - dataset, segmentation_environment.get_model_configuration(), - ) - task.train(dataset, output_model, train_parameters) - - assert len(training_progress_curve) > 0 - assert np.all(training_progress_curve[1:] >= training_progress_curve[:-1]) - - @e2e_pytest_api - def test_inference_progress_tracking(self): - hyper_parameters, model_template = self.setup_configurable_parameters( - DEFAULT_SEG_TEMPLATE_DIR, num_iters=10 - ) - segmentation_environment, dataset = self.init_environment( - hyper_parameters, model_template, 12 - ) - - task = SegmentationInferenceTask(task_environment=segmentation_environment) - print("Task initialized, model inference starts.") - - inference_progress_curve = [] - - def progress_callback(progress: int): - assert isinstance(progress, int) - inference_progress_curve.append(progress) - - inference_parameters = InferenceParameters - inference_parameters.update_progress = progress_callback - task.infer(dataset.with_empty_annotations(), inference_parameters) - - assert len(inference_progress_curve) > 0 - assert np.all(inference_progress_curve[1:] >= inference_progress_curve[:-1]) - - @e2e_pytest_api - def test_inference_task(self): - # Prepare pretrained weights - hyper_parameters, model_template = self.setup_configurable_parameters( - DEFAULT_SEG_TEMPLATE_DIR, num_iters=2 - ) - segmentation_environment, dataset = self.init_environment( - hyper_parameters, model_template, 30 - ) - val_dataset = dataset.get_subset(Subset.VALIDATION) - - train_task = SegmentationTrainTask(task_environment=segmentation_environment) - - training_progress_curve = [] - - def progress_callback(progress: float, score: Optional[float] = None): - training_progress_curve.append(progress) - - train_parameters = TrainParameters - train_parameters.update_progress = progress_callback - trained_model = ModelEntity( - dataset, segmentation_environment.get_model_configuration(), - ) - train_task.train(dataset, trained_model, train_parameters) - performance_after_train = eval(train_task, trained_model, val_dataset) - - # Create InferenceTask - segmentation_environment.model = trained_model - inference_task = SegmentationInferenceTask( - task_environment=segmentation_environment - ) - - performance_after_load = eval(inference_task, trained_model, val_dataset) - - assert performance_after_train == performance_after_load - - # Export - exported_model = ModelEntity( - dataset, segmentation_environment.get_model_configuration(), _id=ObjectId() - ) - inference_task.export(ExportType.OPENVINO, exported_model) diff --git a/external/model-preparation-algorithm/tests/test_ote_classification_training.py b/external/model-preparation-algorithm/tests/test_ote_classification_training.py new file mode 100644 index 00000000000..0e10a942727 --- /dev/null +++ b/external/model-preparation-algorithm/tests/test_ote_classification_training.py @@ -0,0 +1,211 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +import logging +import os +import os.path as osp +from copy import deepcopy +from pprint import pformat +from typing import Any, Callable, Dict, List, Optional, Type + +import pytest +from ote_sdk.test_suite.e2e_test_system import DataCollector, e2e_pytest_performance +from ote_sdk.test_suite.training_test_case import ( + OTETestCaseInterface, + generate_ote_integration_test_case_class, +) +from ote_sdk.test_suite.training_tests_common import ( + KEEP_CONFIG_FIELD_VALUE, + REALLIFE_USECASE_CONSTANT, + ROOT_PATH_KEY, + make_path_be_abs, +) +from ote_sdk.test_suite.training_tests_helper import ( + DefaultOTETestCreationParametersInterface, + OTETestHelper, + OTETrainingTestInterface, +) + +from tests.mpa_common import ( + _create_classification_dataset_and_labels_schema, + _get_dataset_params_from_dataset_definitions, + get_test_action_classes, +) + +logger = logging.getLogger(__name__) + + +@pytest.fixture +def ote_test_domain_fx(): + return "custom-classification-cls-incr" + + +class ClassificationClsIncrTrainingTestParameters(DefaultOTETestCreationParametersInterface): + def test_case_class(self) -> Type[OTETestCaseInterface]: + return generate_ote_integration_test_case_class(get_test_action_classes()) + + def test_bunches(self) -> List[Dict[str, Any]]: + test_bunches = [ + dict( + model_name=[ + "Custom_Image_Classification_EfficinetNet-B0", + "Custom_Image_Classification_EfficientNet-V2-S", + "Custom_Image_Classification_MobileNet-V3-large-1x", + ], + dataset_name=["cifar10_cls_incr"], + usecase="precommit", + ), + dict( + model_name=[ + "Custom_Image_Classification_EfficinetNet-B0", + "Custom_Image_Classification_EfficientNet-V2-S", + "Custom_Image_Classification_MobileNet-V3-large-1x", + ], + dataset_name=["cifar10_cls_incr"], + num_training_iters=KEEP_CONFIG_FIELD_VALUE, + batch_size=KEEP_CONFIG_FIELD_VALUE, + usecase=REALLIFE_USECASE_CONSTANT, + ), + ] + + return deepcopy(test_bunches) + + def default_test_parameters(self) -> Dict[str, Any]: + DEFAULT_TEST_PARAMETERS = { + "num_training_iters": 2, + "batch_size": 16, + } + return deepcopy(DEFAULT_TEST_PARAMETERS) + + +class TestOTEReallifeMPACls(OTETrainingTestInterface): + """ + The main class of running test in this file. + """ + + PERFORMANCE_RESULTS = None # it is required for e2e system + helper = OTETestHelper(ClassificationClsIncrTrainingTestParameters()) + + @classmethod + def get_list_of_tests(cls, usecase: Optional[str] = None): + """ + This method should be a classmethod. It is called before fixture initialization, during + tests discovering. + """ + return cls.helper.get_list_of_tests(usecase) + + @pytest.fixture + def params_factories_for_test_actions_fx( + self, + current_test_parameters_fx, + dataset_definitions_fx, + template_paths_fx, + ote_current_reference_dir_fx, + ) -> Dict[str, Callable[[], Dict]]: + logger.debug("params_factories_for_test_actions_fx: begin") + + test_parameters = deepcopy(current_test_parameters_fx) + dataset_definitions = deepcopy(dataset_definitions_fx) + template_paths = deepcopy(template_paths_fx) + + def _training_params_factory() -> Dict: + if dataset_definitions is None: + pytest.skip('The parameter "--dataset-definitions" is not set') + + model_name = test_parameters["model_name"] + dataset_name = test_parameters["dataset_name"] + num_training_iters = test_parameters["num_training_iters"] + batch_size = test_parameters["batch_size"] + + dataset_params = _get_dataset_params_from_dataset_definitions(dataset_definitions, dataset_name) + + if model_name not in template_paths: + raise ValueError( + f"Model {model_name} is absent in template_paths, " + f"template_paths.keys={list(template_paths.keys())}" + ) + template_path = make_path_be_abs(template_paths[model_name], template_paths[ROOT_PATH_KEY]) + + logger.debug("training params factory: Before creating dataset and labels_schema") + dataset, labels_schema = _create_classification_dataset_and_labels_schema(dataset_params, model_name) + ckpt_path = None + if hasattr(dataset_params, "pre_trained_model"): + ckpt_path = osp.join( + osp.join(dataset_params.pre_trained_model, model_name), + "weights.pth", + ) + logger.info(f"Pretrained path : {ckpt_path}") + logger.debug("training params factory: After creating dataset and labels_schema") + + return { + "dataset": dataset, + "labels_schema": labels_schema, + "template_path": template_path, + "num_training_iters": num_training_iters, + "batch_size": batch_size, + "checkpoint": ckpt_path, + } + + params_factories_for_test_actions = { + "training": _training_params_factory, + } + logger.debug("params_factories_for_test_actions_fx: end") + return params_factories_for_test_actions + + @pytest.fixture + def test_case_fx(self, current_test_parameters_fx, params_factories_for_test_actions_fx): + """ + This fixture returns the test case class OTEIntegrationTestCase that should be used for the current test. + Note that the cache from the test helper allows to store the instance of the class + between the tests. + If the main parameters used for this test are the same as the main parameters used for the previous test, + the instance of the test case class will be kept and re-used. It is helpful for tests that can + re-use the result of operations (model training, model optimization, etc) made for the previous tests, + if these operations are time-consuming. + If the main parameters used for this test differs w.r.t. the previous test, a new instance of + test case class will be created. + """ + test_case = type(self).helper.get_test_case(current_test_parameters_fx, params_factories_for_test_actions_fx) + return test_case + + # TODO: move to common fixtures + @pytest.fixture + def data_collector_fx(self, request) -> DataCollector: + setup = deepcopy(request.node.callspec.params) + setup["environment_name"] = os.environ.get("TT_ENVIRONMENT_NAME", "no-env") + setup["test_type"] = os.environ.get("TT_TEST_TYPE", "no-test-type") # TODO: get from e2e test type + setup["scenario"] = "api" # TODO(lbeynens): get from a fixture! + setup["test"] = request.node.name + setup["subject"] = "custom-classification-cls-incr" + setup["project"] = "ote" + if "test_parameters" in setup: + assert isinstance(setup["test_parameters"], dict) + if "dataset_name" not in setup: + setup["dataset_name"] = setup["test_parameters"].get("dataset_name") + if "model_name" not in setup: + setup["model_name"] = setup["test_parameters"].get("model_name") + if "test_stage" not in setup: + setup["test_stage"] = setup["test_parameters"].get("test_stage") + if "usecase" not in setup: + setup["usecase"] = setup["test_parameters"].get("usecase") + logger.info(f"creating DataCollector: setup=\n{pformat(setup, width=140)}") + data_collector = DataCollector(name="TestOTEIntegration", setup=setup) + with data_collector: + logger.info("data_collector is created") + yield data_collector + logger.info("data_collector is released") + + @e2e_pytest_performance + def test( + self, + test_parameters, + test_case_fx, + data_collector_fx, + cur_test_expected_metrics_callback_fx, + ): + test_case_fx.run_stage( + test_parameters["test_stage"], + data_collector_fx, + cur_test_expected_metrics_callback_fx, + ) diff --git a/external/model-preparation-algorithm/tests/test_ote_detection_training.py b/external/model-preparation-algorithm/tests/test_ote_detection_training.py new file mode 100644 index 00000000000..b64577debd4 --- /dev/null +++ b/external/model-preparation-algorithm/tests/test_ote_detection_training.py @@ -0,0 +1,247 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +import logging +import os +import os.path as osp +from copy import deepcopy +from pprint import pformat +from typing import Any, Callable, Dict, List, Optional, Type + +import pytest +from ote_sdk.entities.label import Domain +from ote_sdk.test_suite.e2e_test_system import DataCollector, e2e_pytest_performance +from ote_sdk.test_suite.training_test_case import ( + OTETestCaseInterface, + generate_ote_integration_test_case_class, +) +from ote_sdk.test_suite.training_tests_common import ( + KEEP_CONFIG_FIELD_VALUE, + REALLIFE_USECASE_CONSTANT, + ROOT_PATH_KEY, + make_path_be_abs, +) +from ote_sdk.test_suite.training_tests_helper import ( + DefaultOTETestCreationParametersInterface, + OTETestHelper, + OTETrainingTestInterface, +) + +from tests.mpa_common import ( + get_test_action_classes, + _create_object_detection_dataset_and_labels_schema, + _get_dataset_params_from_dataset_definitions, +) + +logger = logging.getLogger(__name__) + + +@pytest.fixture +def ote_test_domain_fx(): + return "custom-detection-cls-incr" + + +class DetectionClsIncrTrainingTestParameters(DefaultOTETestCreationParametersInterface): + def test_case_class(self) -> Type[OTETestCaseInterface]: + return generate_ote_integration_test_case_class(get_test_action_classes()) + + def test_bunches(self) -> List[Dict[str, Any]]: + test_bunches = [ + dict( + model_name=["Custom_Object_Detection_Gen3_ATSS"], + dataset_name="coco_cls_incr", + usecase="precommit", + ), + dict( + model_name=["Custom_Object_Detection_Gen3_ATSS"], + dataset_name="coco_cls_incr", + num_training_iters=KEEP_CONFIG_FIELD_VALUE, + batch_size=KEEP_CONFIG_FIELD_VALUE, + usecase=REALLIFE_USECASE_CONSTANT, + ), + ] + return deepcopy(test_bunches) + + +def get_dummy_compressed_model(task): + """ + Return compressed model without initialization + """ + # pylint:disable=protected-access + from mmdet.integration.nncf import wrap_nncf_model + from mmdet.apis.fake_input import get_fake_input + + # Disable quantaizers initialization + for compression in task._config.nncf_config["compression"]: + if compression["algorithm"] == "quantization": + compression["initializer"] = {"batchnorm_adaptation": {"num_bn_adaptation_samples": 0}} + + _, compressed_model = wrap_nncf_model(task._model, task._config, get_fake_input_func=get_fake_input) + return compressed_model + + +class TestOTEReallifeMPADet(OTETrainingTestInterface): + """ + The main class of running test in this file. + """ + + PERFORMANCE_RESULTS = None # it is required for e2e system + helper = OTETestHelper(DetectionClsIncrTrainingTestParameters()) + + @classmethod + def get_list_of_tests(cls, usecase: Optional[str] = None): + """ + This method should be a classmethod. It is called before fixture initialization, during + tests discovering. + """ + return cls.helper.get_list_of_tests(usecase) + + @pytest.fixture + def params_factories_for_test_actions_fx( + self, + current_test_parameters_fx, + dataset_definitions_fx, + template_paths_fx, + ote_current_reference_dir_fx, + ) -> Dict[str, Callable[[], Dict]]: + logger.debug("params_factories_for_test_actions_fx: begin") + + test_parameters = deepcopy(current_test_parameters_fx) + dataset_definitions = deepcopy(dataset_definitions_fx) + template_paths = deepcopy(template_paths_fx) + + def _training_params_factory() -> Dict: + if dataset_definitions is None: + pytest.skip('The parameter "--dataset-definitions" is not set') + + model_name = test_parameters["model_name"] + dataset_name = test_parameters["dataset_name"] + num_training_iters = test_parameters["num_training_iters"] + batch_size = test_parameters["batch_size"] + + dataset_params = _get_dataset_params_from_dataset_definitions(dataset_definitions, dataset_name) + + if model_name not in template_paths: + raise ValueError( + f"Model {model_name} is absent in template_paths, " + f"template_paths.keys={list(template_paths.keys())}" + ) + template_path = make_path_be_abs(template_paths[model_name], template_paths[ROOT_PATH_KEY]) + + logger.debug("training params factory: Before creating dataset and labels_schema") + dataset, labels_schema = _create_object_detection_dataset_and_labels_schema(dataset_params) + ckpt_path = None + if hasattr(dataset_params, "pre_trained_model"): + ckpt_path = osp.join( + osp.join(dataset_params.pre_trained_model, model_name), + "weights.pth", + ) + logger.debug("training params factory: After creating dataset and labels_schema") + + return { + "dataset": dataset, + "labels_schema": labels_schema, + "template_path": template_path, + "num_training_iters": num_training_iters, + "batch_size": batch_size, + "checkpoint": ckpt_path, + } + + def _nncf_graph_params_factory() -> Dict: + if dataset_definitions is None: + pytest.skip('The parameter "--dataset-definitions" is not set') + + model_name = test_parameters["model_name"] + if "Custom_Object_Detection" in model_name: + domain = Domain.DETECTION + elif "Custom_Counting_Instance_Segmentation" in model_name: + domain = Domain.INSTANCE_SEGMENTATION + else: + domain = None + dataset_name = test_parameters["dataset_name"] + + dataset_params = _get_dataset_params_from_dataset_definitions(dataset_definitions, dataset_name) + + if model_name not in template_paths: + raise ValueError( + f"Model {model_name} is absent in template_paths, " + f"template_paths.keys={list(template_paths.keys())}" + ) + template_path = make_path_be_abs(template_paths[model_name], template_paths[ROOT_PATH_KEY]) + + logger.debug("training params factory: Before creating dataset and labels_schema") + dataset, labels_schema = _create_object_detection_dataset_and_labels_schema(dataset_params, domain) + logger.debug("training params factory: After creating dataset and labels_schema") + + return { + "dataset": dataset, + "labels_schema": labels_schema, + "template_path": template_path, + "reference_dir": ote_current_reference_dir_fx, + "fn_get_compressed_model": get_dummy_compressed_model, + } + + params_factories_for_test_actions = { + "training": _training_params_factory, + # "nncf_graph": _nncf_graph_params_factory, + } + logger.debug("params_factories_for_test_actions_fx: end") + return params_factories_for_test_actions + + @pytest.fixture + def test_case_fx(self, current_test_parameters_fx, params_factories_for_test_actions_fx): + """ + This fixture returns the test case class OTEIntegrationTestCase that should be used for the current test. + Note that the cache from the test helper allows to store the instance of the class + between the tests. + If the main parameters used for this test are the same as the main parameters used for the previous test, + the instance of the test case class will be kept and re-used. It is helpful for tests that can + re-use the result of operations (model training, model optimization, etc) made for the previous tests, + if these operations are time-consuming. + If the main parameters used for this test differs w.r.t. the previous test, a new instance of + test case class will be created. + """ + test_case = type(self).helper.get_test_case(current_test_parameters_fx, params_factories_for_test_actions_fx) + return test_case + + # TODO: move to common fixtures + @pytest.fixture + def data_collector_fx(self, request) -> DataCollector: + setup = deepcopy(request.node.callspec.params) + setup["environment_name"] = os.environ.get("TT_ENVIRONMENT_NAME", "no-env") + setup["test_type"] = os.environ.get("TT_TEST_TYPE", "no-test-type") # TODO: get from e2e test type + setup["scenario"] = "api" # TODO: get from a fixture! + setup["test"] = request.node.name + setup["subject"] = "custom-detection-cls-incr" + setup["project"] = "ote" + if "test_parameters" in setup: + assert isinstance(setup["test_parameters"], dict) + if "dataset_name" not in setup: + setup["dataset_name"] = setup["test_parameters"].get("dataset_name") + if "model_name" not in setup: + setup["model_name"] = setup["test_parameters"].get("model_name") + if "test_stage" not in setup: + setup["test_stage"] = setup["test_parameters"].get("test_stage") + if "usecase" not in setup: + setup["usecase"] = setup["test_parameters"].get("usecase") + logger.info(f"creating DataCollector: setup=\n{pformat(setup, width=140)}") + data_collector = DataCollector(name="TestOTEIntegration", setup=setup) + with data_collector: + logger.info("data_collector is created") + yield data_collector + logger.info("data_collector is released") + + @e2e_pytest_performance + def test( + self, + test_parameters, + test_case_fx, + data_collector_fx, + cur_test_expected_metrics_callback_fx, + ): + test_case_fx.run_stage( + test_parameters["test_stage"], + data_collector_fx, + cur_test_expected_metrics_callback_fx, + ) diff --git a/external/model-preparation-algorithm/tests/test_ote_segmentation_training.py b/external/model-preparation-algorithm/tests/test_ote_segmentation_training.py new file mode 100644 index 00000000000..506e7f90a7c --- /dev/null +++ b/external/model-preparation-algorithm/tests/test_ote_segmentation_training.py @@ -0,0 +1,205 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +import logging +import os +from copy import deepcopy +from pprint import pformat +from typing import Any, Callable, Dict, List, Optional, Type + +import pytest +from ote_sdk.test_suite.e2e_test_system import DataCollector, e2e_pytest_performance +from ote_sdk.test_suite.training_test_case import ( + OTETestCaseInterface, + generate_ote_integration_test_case_class, +) +from ote_sdk.test_suite.training_tests_common import ( + KEEP_CONFIG_FIELD_VALUE, + REALLIFE_USECASE_CONSTANT, + ROOT_PATH_KEY, + make_path_be_abs, +) +from ote_sdk.test_suite.training_tests_helper import ( + DefaultOTETestCreationParametersInterface, + OTETestHelper, + OTETrainingTestInterface, +) + +from tests.mpa_common import ( + get_test_action_classes, + _create_segmentation_dataset_and_labels_schema, + _get_dataset_params_from_dataset_definitions, +) + +logger = logging.getLogger(__name__) + + +@pytest.fixture +def ote_test_domain_fx(): + return "custom-segmentation-cls-incr" + + +class SegmentationClsIncrTrainingTestParameters(DefaultOTETestCreationParametersInterface): + def test_case_class(self) -> Type[OTETestCaseInterface]: + return generate_ote_integration_test_case_class(get_test_action_classes()) + + def test_bunches(self) -> List[Dict[str, Any]]: + test_bunches = [ + dict( + model_name=[ + "Custom_Semantic_Segmentation_Lite-HRNet-18-mod2_OCR", + "Custom_Semantic_Segmentation_Lite-HRNet-s-mod2_OCR", + "Custom_Semantic_Segmentation_Lite-HRNet-x-mod3_OCR", + ], + dataset_name="voc_cls_incr", + usecase="precommit", + ), + dict( + model_name=[ + "Custom_Semantic_Segmentation_Lite-HRNet-18-mod2_OCR", + "Custom_Semantic_Segmentation_Lite-HRNet-s-mod2_OCR", + "Custom_Semantic_Segmentation_Lite-HRNet-x-mod3_OCR", + ], + dataset_name="voc_cls_incr", + num_training_iters=KEEP_CONFIG_FIELD_VALUE, + batch_size=KEEP_CONFIG_FIELD_VALUE, + usecase=REALLIFE_USECASE_CONSTANT, + ), + ] + return deepcopy(test_bunches) + + +class TestOTEReallifeMPASeg(OTETrainingTestInterface): + """ + The main class of running test in this file. + """ + + PERFORMANCE_RESULTS = None # it is required for e2e system + helper = OTETestHelper(SegmentationClsIncrTrainingTestParameters()) + + @classmethod + def get_list_of_tests(cls, usecase: Optional[str] = None): + """ + This method should be a classmethod. It is called before fixture initialization, during + tests discovering. + """ + return cls.helper.get_list_of_tests(usecase) + + @pytest.fixture + def params_factories_for_test_actions_fx( + self, + current_test_parameters_fx, + dataset_definitions_fx, + template_paths_fx, + ote_current_reference_dir_fx, + ) -> Dict[str, Callable[[], Dict]]: + logger.debug("params_factories_for_test_actions_fx: begin") + + test_parameters = deepcopy(current_test_parameters_fx) + dataset_definitions = deepcopy(dataset_definitions_fx) + template_paths = deepcopy(template_paths_fx) + + def _training_params_factory() -> Dict: + if dataset_definitions is None: + pytest.skip('The parameter "--dataset-definitions" is not set') + + model_name = test_parameters["model_name"] + dataset_name = test_parameters["dataset_name"] + num_training_iters = test_parameters["num_training_iters"] + batch_size = test_parameters["batch_size"] + + dataset_params = _get_dataset_params_from_dataset_definitions(dataset_definitions, dataset_name) + + if model_name not in template_paths: + raise ValueError( + f"Model {model_name} is absent in template_paths, " + f"template_paths.keys={list(template_paths.keys())}" + ) + template_path = make_path_be_abs(template_paths[model_name], template_paths[ROOT_PATH_KEY]) + + logger.debug("training params factory: Before creating dataset and labels_schema") + dataset, labels_schema = _create_segmentation_dataset_and_labels_schema(dataset_params) + import os.path as osp + + ckpt_path = None + if hasattr(dataset_params, "pre_trained_model"): + ckpt_path = osp.join( + osp.join(dataset_params.pre_trained_model, model_name), + "weights.pth", + ) + logger.debug("training params factory: After creating dataset and labels_schema") + + return { + "dataset": dataset, + "labels_schema": labels_schema, + "template_path": template_path, + "num_training_iters": num_training_iters, + "batch_size": batch_size, + "checkpoint": ckpt_path, + } + + params_factories_for_test_actions = { + "training": _training_params_factory, + } + logger.debug("params_factories_for_test_actions_fx: end") + return params_factories_for_test_actions + + @pytest.fixture + def test_case_fx(self, current_test_parameters_fx, params_factories_for_test_actions_fx): + """ + This fixture returns the test case class OTEIntegrationTestCase that should be used for the current test. + Note that the cache from the test helper allows to store the instance of the class + between the tests. + If the main parameters used for this test are the same as the main parameters used for the previous test, + the instance of the test case class will be kept and re-used. It is helpful for tests that can + re-use the result of operations (model training, model optimization, etc) made for the previous tests, + if these operations are time-consuming. + If the main parameters used for this test differs w.r.t. the previous test, a new instance of + test case class will be created. + """ + test_case = type(self).helper.get_test_case(current_test_parameters_fx, params_factories_for_test_actions_fx) + return test_case + + # TODO: move to common fixtures + @pytest.fixture + def data_collector_fx(self, request) -> DataCollector: + setup = deepcopy(request.node.callspec.params) + setup["environment_name"] = os.environ.get("TT_ENVIRONMENT_NAME", "no-env") + setup["test_type"] = os.environ.get("TT_TEST_TYPE", "no-test-type") # TODO: get from e2e test type + setup["scenario"] = "api" # TODO: get from a fixture! + setup["test"] = request.node.name + setup["subject"] = "custom-segmentation-cls-incr" + setup["project"] = "ote" + if "test_parameters" in setup: + assert isinstance(setup["test_parameters"], dict) + if "dataset_name" not in setup: + setup["dataset_name"] = setup["test_parameters"].get("dataset_name") + if "model_name" not in setup: + setup["model_name"] = setup["test_parameters"].get("model_name") + if "test_stage" not in setup: + setup["test_stage"] = setup["test_parameters"].get("test_stage") + if "usecase" not in setup: + setup["usecase"] = setup["test_parameters"].get("usecase") + logger.info(f"creating DataCollector: setup=\n{pformat(setup, width=140)}") + data_collector = DataCollector(name="TestOTEIntegration", setup=setup) + with data_collector: + logger.info("data_collector is created") + yield data_collector + logger.info("data_collector is released") + + @e2e_pytest_performance + def test( + self, + test_parameters, + test_case_fx, + data_collector_fx, + cur_test_expected_metrics_callback_fx, + ): + if "pot_evaluation" in test_parameters["test_stage"]: + pytest.xfail("Known issue CVS-84576") + test_case_fx.run_stage( + test_parameters["test_stage"], + data_collector_fx, + cur_test_expected_metrics_callback_fx, + ) diff --git a/external/model-preparation-algorithm/tests/test_ote_training.py b/external/model-preparation-algorithm/tests/test_ote_training.py deleted file mode 100644 index 2614efe7839..00000000000 --- a/external/model-preparation-algorithm/tests/test_ote_training.py +++ /dev/null @@ -1,710 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import logging -import os -import os.path as osp -from collections import namedtuple -from copy import deepcopy -from pprint import pformat -from typing import Any, Callable, Dict, List, Optional, Type - -import pytest -from ote_sdk.entities.datasets import DatasetEntity -from ote_sdk.entities.label import Domain -from ote_sdk.entities.label_schema import LabelSchemaEntity -from ote_sdk.entities.subset import Subset - -from torchreid_tasks.utils import ClassificationDatasetAdapter -from detection_tasks.extension.datasets.data_utils import load_dataset_items_coco_format -from segmentation_tasks.extension.datasets.mmdataset import load_dataset_items - -from ote_sdk.test_suite.e2e_test_system import DataCollector, e2e_pytest_performance -from ote_sdk.test_suite.training_test_case import ( - OTETestCaseInterface, - generate_ote_integration_test_case_class, -) -from ote_sdk.test_suite.training_tests_common import ( - make_path_be_abs, - make_paths_be_abs, - KEEP_CONFIG_FIELD_VALUE, - REALLIFE_USECASE_CONSTANT, - ROOT_PATH_KEY, -) -from ote_sdk.test_suite.training_tests_helper import ( - OTETestHelper, - DefaultOTETestCreationParametersInterface, - OTETrainingTestInterface, -) -from ote_sdk.test_suite.training_tests_actions import ( - OTETestTrainingAction, - BaseOTETestAction, - OTETestTrainingEvaluationAction, - OTETestExportAction, - OTETestExportEvaluationAction, - OTETestPotAction, - OTETestPotEvaluationAction, -) - - -logger = logging.getLogger(__name__) - - -def DATASET_PARAMETERS_FIELDS() -> List[str]: - return deepcopy( - [ - "annotations_train", - "images_train_dir", - "annotations_val", - "images_val_dir", - "annotations_test", - "images_test_dir", - "pre_trained_model", - ] - ) - - -DatasetParameters = namedtuple("DatasetParameters", DATASET_PARAMETERS_FIELDS()) - - -def get_test_action_classes() -> List[Type[BaseOTETestAction]]: - return [ - OTETestTrainingAction, - OTETestTrainingEvaluationAction, - OTETestExportAction, - OTETestExportEvaluationAction, - OTETestPotAction, - OTETestPotEvaluationAction, - ] - - -def _get_dataset_params_from_dataset_definitions(dataset_definitions, dataset_name): - if dataset_name not in dataset_definitions: - raise ValueError( - f"dataset {dataset_name} is absent in dataset_definitions, " - f"dataset_definitions.keys={list(dataset_definitions.keys())}" - ) - cur_dataset_definition = dataset_definitions[dataset_name] - training_parameters_fields = { - k: v - for k, v in cur_dataset_definition.items() - if k in DATASET_PARAMETERS_FIELDS() - } - make_paths_be_abs(training_parameters_fields, dataset_definitions[ROOT_PATH_KEY]) - - assert set(DATASET_PARAMETERS_FIELDS()) == set( - training_parameters_fields.keys() - ), f"ERROR: dataset definitions for name={dataset_name} does not contain all required fields" - assert all( - training_parameters_fields.values() - ), f"ERROR: dataset definitions for name={dataset_name} contains empty values for some required fields" - - params = DatasetParameters(**training_parameters_fields) - return params - - -def _create_classification_dataset_and_labels_schema(dataset_params, model_name): - logger.debug(f"Using for train annotation file {dataset_params.annotations_train}") - logger.debug(f"Using for val annotation file {dataset_params.annotations_val}") - - dataset = ClassificationDatasetAdapter( - train_data_root=osp.join(dataset_params.images_train_dir), - train_ann_file=osp.join(dataset_params.annotations_train), - val_data_root=osp.join(dataset_params.images_val_dir), - val_ann_file=osp.join(dataset_params.annotations_val), - test_data_root=osp.join(dataset_params.images_test_dir), - test_ann_file=osp.join(dataset_params.annotations_test), - ) - - labels_schema = LabelSchemaEntity.from_labels(dataset.get_labels()) - return dataset, labels_schema - - -def _create_object_detection_dataset_and_labels_schema(dataset_params): - logger.debug(f"Using for train annotation file {dataset_params.annotations_train}") - logger.debug(f"Using for val annotation file {dataset_params.annotations_val}") - labels_list = [] - items = [] - items.extend( - load_dataset_items_coco_format( - ann_file_path=dataset_params.annotations_train, - data_root_dir=dataset_params.images_train_dir, - domain=Domain.DETECTION, - subset=Subset.TRAINING, - labels_list=labels_list, - ) - ) - items.extend( - load_dataset_items_coco_format( - ann_file_path=dataset_params.annotations_val, - data_root_dir=dataset_params.images_val_dir, - domain=Domain.DETECTION, - subset=Subset.VALIDATION, - labels_list=labels_list, - ) - ) - items.extend( - load_dataset_items_coco_format( - ann_file_path=dataset_params.annotations_test, - data_root_dir=dataset_params.images_test_dir, - domain=Domain.DETECTION, - subset=Subset.TESTING, - labels_list=labels_list, - ) - ) - dataset = DatasetEntity(items=items) - labels_schema = LabelSchemaEntity.from_labels(dataset.get_labels()) - return dataset, labels_schema - - -def _create_segmentation_dataset_and_labels_schema(dataset_params): - logger.debug(f"Using for train annotation file {dataset_params.annotations_train}") - logger.debug(f"Using for val annotation file {dataset_params.annotations_val}") - labels_list = [] - items = load_dataset_items( - ann_file_path=dataset_params.annotations_train, - data_root_dir=dataset_params.images_train_dir, - subset=Subset.TRAINING, - labels_list=labels_list, - ) - items.extend( - load_dataset_items( - ann_file_path=dataset_params.annotations_val, - data_root_dir=dataset_params.images_val_dir, - subset=Subset.VALIDATION, - labels_list=labels_list, - ) - ) - items.extend( - load_dataset_items( - ann_file_path=dataset_params.annotations_test, - data_root_dir=dataset_params.images_test_dir, - subset=Subset.TESTING, - labels_list=labels_list, - ) - ) - dataset = DatasetEntity(items=items) - labels_schema = LabelSchemaEntity.from_labels(labels_list) - return dataset, labels_schema - - -class ClassificationClsIncrTrainingTestParameters( - DefaultOTETestCreationParametersInterface -): - def test_case_class(self) -> Type[OTETestCaseInterface]: - return generate_ote_integration_test_case_class(get_test_action_classes()) - - def test_bunches(self) -> List[Dict[str, Any]]: - test_bunches = [ - dict( - model_name=[ - "Custom_Image_Classification_EfficinetNet-B0", - "Custom_Image_Classification_EfficinetNet-V2-S", - "Custom_Image_Classification_MobileNet-V3-large-1x", - "Custom_Image_Classification_MobileNet-V3-large-0.75x", - "Custom_Image_Classification_MobileNet-V3-small", - ], - dataset_name=["cifar10_cls_incr"], - usecase="precommit", - ), - dict( - model_name=[ - "Custom_Image_Classification_EfficinetNet-B0", - "Custom_Image_Classification_EfficinetNet-V2-S", - "Custom_Image_Classification_MobileNet-V3-large-1x", - "Custom_Image_Classification_MobileNet-V3-large-0.75x", - "Custom_Image_Classification_MobileNet-V3-small", - ], - dataset_name=["cifar10_cls_incr"], - num_training_iters=KEEP_CONFIG_FIELD_VALUE, - batch_size=KEEP_CONFIG_FIELD_VALUE, - usecase=REALLIFE_USECASE_CONSTANT, - ), - ] - - return deepcopy(test_bunches) - - def default_test_parameters(self) -> Dict[str, Any]: - DEFAULT_TEST_PARAMETERS = { - "num_training_iters": 2, - "batch_size": 16, - } - return deepcopy(DEFAULT_TEST_PARAMETERS) - - -class DetectionClsIncrTrainingTestParameters(DefaultOTETestCreationParametersInterface): - def test_case_class(self) -> Type[OTETestCaseInterface]: - return generate_ote_integration_test_case_class(get_test_action_classes()) - - def test_bunches(self) -> List[Dict[str, Any]]: - test_bunches = [ - dict( - model_name=[ - "Custom_Object_Detection_Gen3_ATSS", - "Custom_Object_Detection_Gen3_VFNet", - ], - dataset_name="coco_cls_incr", - usecase="precommit", - ), - dict( - model_name=[ - "Custom_Object_Detection_Gen3_ATSS", - "Custom_Object_Detection_Gen3_VFNet", - ], - dataset_name="coco_cls_incr", - num_training_iters=KEEP_CONFIG_FIELD_VALUE, - batch_size=KEEP_CONFIG_FIELD_VALUE, - usecase=REALLIFE_USECASE_CONSTANT, - ), - ] - return deepcopy(test_bunches) - - -class SegmentationClsIncrTrainingTestParameters( - DefaultOTETestCreationParametersInterface -): - def test_case_class(self) -> Type[OTETestCaseInterface]: - return generate_ote_integration_test_case_class(get_test_action_classes()) - - def test_bunches(self) -> List[Dict[str, Any]]: - test_bunches = [ - dict( - model_name=[ - "Custom_Semantic_Segmentation_Lite-HRNet-18-mod2_OCR", - "Custom_Semantic_Segmentation_Lite-HRNet-s-mod2_OCR", - "Custom_Semantic_Segmentation_Lite-HRNet-x-mod3_OCR", - ], - dataset_name="voc_cls_incr", - usecase="precommit", - ), - dict( - model_name=[ - "Custom_Semantic_Segmentation_Lite-HRNet-18-mod2_OCR", - "Custom_Semantic_Segmentation_Lite-HRNet-s-mod2_OCR", - "Custom_Semantic_Segmentation_Lite-HRNet-x-mod3_OCR", - ], - dataset_name="voc_cls_incr", - num_training_iters=KEEP_CONFIG_FIELD_VALUE, - batch_size=KEEP_CONFIG_FIELD_VALUE, - usecase=REALLIFE_USECASE_CONSTANT, - ), - ] - return deepcopy(test_bunches) - - -class TestOTEReallifeClassificationClsIncr(OTETrainingTestInterface): - """ - The main class of running test in this file. - """ - - PERFORMANCE_RESULTS = None # it is required for e2e system - helper = OTETestHelper(ClassificationClsIncrTrainingTestParameters()) - - @classmethod - def get_list_of_tests(cls, usecase: Optional[str] = None): - """ - This method should be a classmethod. It is called before fixture initialization, during - tests discovering. - """ - return cls.helper.get_list_of_tests(usecase) - - @pytest.fixture - def params_factories_for_test_actions_fx( - self, - current_test_parameters_fx, - dataset_definitions_fx, - template_paths_fx, - ote_current_reference_dir_fx, - ) -> Dict[str, Callable[[], Dict]]: - logger.debug("params_factories_for_test_actions_fx: begin") - - test_parameters = deepcopy(current_test_parameters_fx) - dataset_definitions = deepcopy(dataset_definitions_fx) - template_paths = deepcopy(template_paths_fx) - - def _training_params_factory() -> Dict: - if dataset_definitions is None: - pytest.skip('The parameter "--dataset-definitions" is not set') - - model_name = test_parameters["model_name"] - dataset_name = test_parameters["dataset_name"] - num_training_iters = test_parameters["num_training_iters"] - batch_size = test_parameters["batch_size"] - - dataset_params = _get_dataset_params_from_dataset_definitions( - dataset_definitions, dataset_name - ) - - if model_name not in template_paths: - raise ValueError( - f"Model {model_name} is absent in template_paths, " - f"template_paths.keys={list(template_paths.keys())}" - ) - template_path = make_path_be_abs( - template_paths[model_name], template_paths[ROOT_PATH_KEY] - ) - - logger.debug( - "training params factory: Before creating dataset and labels_schema" - ) - dataset, labels_schema = _create_classification_dataset_and_labels_schema( - dataset_params, model_name - ) - ckpt_path = None - if hasattr(dataset_params, "pre_trained_model"): - ckpt_path = osp.join( - osp.join(dataset_params.pre_trained_model, model_name), - "weights.pth", - ) - logger.info(f"Pretrained path : {ckpt_path}") - logger.debug( - "training params factory: After creating dataset and labels_schema" - ) - - return { - "dataset": dataset, - "labels_schema": labels_schema, - "template_path": template_path, - "num_training_iters": num_training_iters, - "batch_size": batch_size, - "checkpoint": ckpt_path, - } - - params_factories_for_test_actions = { - "training": _training_params_factory, - } - logger.debug("params_factories_for_test_actions_fx: end") - return params_factories_for_test_actions - - @pytest.fixture - def test_case_fx( - self, current_test_parameters_fx, params_factories_for_test_actions_fx - ): - """ - This fixture returns the test case class OTEIntegrationTestCase that should be used for the current test. - Note that the cache from the test helper allows to store the instance of the class - between the tests. - If the main parameters used for this test are the same as the main parameters used for the previous test, - the instance of the test case class will be kept and re-used. It is helpful for tests that can - re-use the result of operations (model training, model optimization, etc) made for the previous tests, - if these operations are time-consuming. - If the main parameters used for this test differs w.r.t. the previous test, a new instance of - test case class will be created. - """ - test_case = type(self).helper.get_test_case( - current_test_parameters_fx, params_factories_for_test_actions_fx - ) - return test_case - - @e2e_pytest_performance - def test( - self, - test_parameters, - test_case_fx, - data_collector_fx, - cur_test_expected_metrics_callback_fx, - ): - test_case_fx.run_stage( - test_parameters["test_stage"], - data_collector_fx, - cur_test_expected_metrics_callback_fx, - ) - - -class TestOTEReallifeObjectDetectionClsIncr(OTETrainingTestInterface): - """ - The main class of running test in this file. - """ - - PERFORMANCE_RESULTS = None # it is required for e2e system - helper = OTETestHelper(DetectionClsIncrTrainingTestParameters()) - - @classmethod - def get_list_of_tests(cls, usecase: Optional[str] = None): - """ - This method should be a classmethod. It is called before fixture initialization, during - tests discovering. - """ - return cls.helper.get_list_of_tests(usecase) - - @pytest.fixture - def params_factories_for_test_actions_fx( - self, - current_test_parameters_fx, - dataset_definitions_fx, - template_paths_fx, - ote_current_reference_dir_fx, - ) -> Dict[str, Callable[[], Dict]]: - logger.debug("params_factories_for_test_actions_fx: begin") - - test_parameters = deepcopy(current_test_parameters_fx) - dataset_definitions = deepcopy(dataset_definitions_fx) - template_paths = deepcopy(template_paths_fx) - - def _training_params_factory() -> Dict: - if dataset_definitions is None: - pytest.skip('The parameter "--dataset-definitions" is not set') - - model_name = test_parameters["model_name"] - dataset_name = test_parameters["dataset_name"] - num_training_iters = test_parameters["num_training_iters"] - batch_size = test_parameters["batch_size"] - - dataset_params = _get_dataset_params_from_dataset_definitions( - dataset_definitions, dataset_name - ) - - if model_name not in template_paths: - raise ValueError( - f"Model {model_name} is absent in template_paths, " - f"template_paths.keys={list(template_paths.keys())}" - ) - template_path = make_path_be_abs( - template_paths[model_name], template_paths[ROOT_PATH_KEY] - ) - - logger.debug( - "training params factory: Before creating dataset and labels_schema" - ) - dataset, labels_schema = _create_object_detection_dataset_and_labels_schema( - dataset_params - ) - ckpt_path = None - if hasattr(dataset_params, "pre_trained_model"): - ckpt_path = osp.join( - osp.join(dataset_params.pre_trained_model, model_name), - "weights.pth", - ) - logger.debug( - "training params factory: After creating dataset and labels_schema" - ) - - return { - "dataset": dataset, - "labels_schema": labels_schema, - "template_path": template_path, - "num_training_iters": num_training_iters, - "batch_size": batch_size, - "checkpoint": ckpt_path, - } - - params_factories_for_test_actions = { - "training": _training_params_factory, - } - logger.debug("params_factories_for_test_actions_fx: end") - return params_factories_for_test_actions - - @pytest.fixture - def test_case_fx( - self, current_test_parameters_fx, params_factories_for_test_actions_fx - ): - """ - This fixture returns the test case class OTEIntegrationTestCase that should be used for the current test. - Note that the cache from the test helper allows to store the instance of the class - between the tests. - If the main parameters used for this test are the same as the main parameters used for the previous test, - the instance of the test case class will be kept and re-used. It is helpful for tests that can - re-use the result of operations (model training, model optimization, etc) made for the previous tests, - if these operations are time-consuming. - If the main parameters used for this test differs w.r.t. the previous test, a new instance of - test case class will be created. - """ - test_case = type(self).helper.get_test_case( - current_test_parameters_fx, params_factories_for_test_actions_fx - ) - return test_case - - # TODO(lbeynens): move to common fixtures - @pytest.fixture - def data_collector_fx(self, request) -> DataCollector: - setup = deepcopy(request.node.callspec.params) - setup["environment_name"] = os.environ.get("TT_ENVIRONMENT_NAME", "no-env") - setup["test_type"] = os.environ.get( - "TT_TEST_TYPE", "no-test-type" - ) # TODO: get from e2e test type - setup["scenario"] = "api" # TODO(lbeynens): get from a fixture! - setup["test"] = request.node.name - setup["subject"] = "detection-cls-incr" - setup["project"] = "ote" - if "test_parameters" in setup: - assert isinstance(setup["test_parameters"], dict) - if "dataset_name" not in setup: - setup["dataset_name"] = setup["test_parameters"].get("dataset_name") - if "model_name" not in setup: - setup["model_name"] = setup["test_parameters"].get("model_name") - if "test_stage" not in setup: - setup["test_stage"] = setup["test_parameters"].get("test_stage") - if "usecase" not in setup: - setup["usecase"] = setup["test_parameters"].get("usecase") - logger.info(f"creating DataCollector: setup=\n{pformat(setup, width=140)}") - data_collector = DataCollector(name="TestOTEIntegration", setup=setup) - with data_collector: - logger.info("data_collector is created") - yield data_collector - logger.info("data_collector is released") - - @e2e_pytest_performance - def test( - self, - test_parameters, - test_case_fx, - data_collector_fx, - cur_test_expected_metrics_callback_fx, - ): - test_case_fx.run_stage( - test_parameters["test_stage"], - data_collector_fx, - cur_test_expected_metrics_callback_fx, - ) - - -class TestOTEReallifeSegmentationClsIncr(OTETrainingTestInterface): - """ - The main class of running test in this file. - """ - - PERFORMANCE_RESULTS = None # it is required for e2e system - helper = OTETestHelper(SegmentationClsIncrTrainingTestParameters()) - - @classmethod - def get_list_of_tests(cls, usecase: Optional[str] = None): - """ - This method should be a classmethod. It is called before fixture initialization, during - tests discovering. - """ - return cls.helper.get_list_of_tests(usecase) - - @pytest.fixture - def params_factories_for_test_actions_fx( - self, - current_test_parameters_fx, - dataset_definitions_fx, - template_paths_fx, - ote_current_reference_dir_fx, - ) -> Dict[str, Callable[[], Dict]]: - logger.debug("params_factories_for_test_actions_fx: begin") - - test_parameters = deepcopy(current_test_parameters_fx) - dataset_definitions = deepcopy(dataset_definitions_fx) - template_paths = deepcopy(template_paths_fx) - - def _training_params_factory() -> Dict: - if dataset_definitions is None: - pytest.skip('The parameter "--dataset-definitions" is not set') - - model_name = test_parameters["model_name"] - dataset_name = test_parameters["dataset_name"] - num_training_iters = test_parameters["num_training_iters"] - batch_size = test_parameters["batch_size"] - - dataset_params = _get_dataset_params_from_dataset_definitions( - dataset_definitions, dataset_name - ) - - if model_name not in template_paths: - raise ValueError( - f"Model {model_name} is absent in template_paths, " - f"template_paths.keys={list(template_paths.keys())}" - ) - template_path = make_path_be_abs( - template_paths[model_name], template_paths[ROOT_PATH_KEY] - ) - - logger.debug( - "training params factory: Before creating dataset and labels_schema" - ) - dataset, labels_schema = _create_segmentation_dataset_and_labels_schema( - dataset_params - ) - import os.path as osp - - ckpt_path = None - if hasattr(dataset_params, "pre_trained_model"): - ckpt_path = osp.join( - osp.join(dataset_params.pre_trained_model, model_name), - "weights.pth", - ) - logger.debug( - "training params factory: After creating dataset and labels_schema" - ) - - return { - "dataset": dataset, - "labels_schema": labels_schema, - "template_path": template_path, - "num_training_iters": num_training_iters, - "batch_size": batch_size, - "checkpoint": ckpt_path, - } - - params_factories_for_test_actions = { - "training": _training_params_factory, - } - logger.debug("params_factories_for_test_actions_fx: end") - return params_factories_for_test_actions - - @pytest.fixture - def test_case_fx( - self, current_test_parameters_fx, params_factories_for_test_actions_fx - ): - """ - This fixture returns the test case class OTEIntegrationTestCase that should be used for the current test. - Note that the cache from the test helper allows to store the instance of the class - between the tests. - If the main parameters used for this test are the same as the main parameters used for the previous test, - the instance of the test case class will be kept and re-used. It is helpful for tests that can - re-use the result of operations (model training, model optimization, etc) made for the previous tests, - if these operations are time-consuming. - If the main parameters used for this test differs w.r.t. the previous test, a new instance of - test case class will be created. - """ - test_case = type(self).helper.get_test_case( - current_test_parameters_fx, params_factories_for_test_actions_fx - ) - return test_case - - # TODO(lbeynens): move to common fixtures - @pytest.fixture - def data_collector_fx(self, request) -> DataCollector: - setup = deepcopy(request.node.callspec.params) - setup["environment_name"] = os.environ.get("TT_ENVIRONMENT_NAME", "no-env") - setup["test_type"] = os.environ.get( - "TT_TEST_TYPE", "no-test-type" - ) # TODO: get from e2e test type - setup["scenario"] = "api" # TODO(lbeynens): get from a fixture! - setup["test"] = request.node.name - setup["subject"] = "segmentation-cls-incr" - setup["project"] = "ote" - if "test_parameters" in setup: - assert isinstance(setup["test_parameters"], dict) - if "dataset_name" not in setup: - setup["dataset_name"] = setup["test_parameters"].get("dataset_name") - if "model_name" not in setup: - setup["model_name"] = setup["test_parameters"].get("model_name") - if "test_stage" not in setup: - setup["test_stage"] = setup["test_parameters"].get("test_stage") - if "usecase" not in setup: - setup["usecase"] = setup["test_parameters"].get("usecase") - logger.info(f"creating DataCollector: setup=\n{pformat(setup, width=140)}") - data_collector = DataCollector(name="TestOTEIntegration", setup=setup) - with data_collector: - logger.info("data_collector is created") - yield data_collector - logger.info("data_collector is released") - - @e2e_pytest_performance - def test( - self, - test_parameters, - test_case_fx, - data_collector_fx, - cur_test_expected_metrics_callback_fx, - ): - if "pot_evaluation" in test_parameters["test_stage"]: - pytest.xfail("Known issue CVS-84576") - test_case_fx.run_stage( - test_parameters["test_stage"], - data_collector_fx, - cur_test_expected_metrics_callback_fx, - ) diff --git a/ote_sdk/ote_sdk/entities/dataset_item.py b/ote_sdk/ote_sdk/entities/dataset_item.py index c938a1a16da..70417fba9f6 100644 --- a/ote_sdk/ote_sdk/entities/dataset_item.py +++ b/ote_sdk/ote_sdk/entities/dataset_item.py @@ -3,6 +3,7 @@ # Copyright (C) 2021-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # +# pylint: disable=cyclic-import import abc import copy diff --git a/ote_sdk/ote_sdk/entities/shapes/rectangle.py b/ote_sdk/ote_sdk/entities/shapes/rectangle.py index 165c2f57142..4993c3ccc6c 100644 --- a/ote_sdk/ote_sdk/entities/shapes/rectangle.py +++ b/ote_sdk/ote_sdk/entities/shapes/rectangle.py @@ -4,7 +4,7 @@ # # Conflict with Isort -# pylint: disable=wrong-import-order +# pylint: disable=wrong-import-order, cyclic-import import datetime import math diff --git a/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo_package/executors/sync_pipeline.py b/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo_package/executors/sync_pipeline.py index be8a2ef6a2f..f52eea59b7b 100644 --- a/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo_package/executors/sync_pipeline.py +++ b/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo_package/executors/sync_pipeline.py @@ -99,7 +99,7 @@ def run(self, input_stream: Union[int, str], loop: bool = False) -> None: for frame in streamer: # getting result for single image annotation_scene = self.single_run(frame) - output = self.visualizer.draw(frame, annotation_scene) + output = self.visualizer.draw(frame, annotation_scene, {}) self.visualizer.show(output) if self.visualizer.is_quit(): break diff --git a/ote_sdk/ote_sdk/utils/vis_utils.py b/ote_sdk/ote_sdk/utils/vis_utils.py new file mode 100644 index 00000000000..8ba992fac33 --- /dev/null +++ b/ote_sdk/ote_sdk/utils/vis_utils.py @@ -0,0 +1,30 @@ +""" +This module implements activation map +""" +# Copyright (C) 2021-2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from typing import Union + +import cv2 +import numpy as np + + +def get_actmap( + saliency_map: np.ndarray, + output_res: Union[tuple, list], +) -> np.ndarray: + """ + Get activation map (heatmap) from saliency map + + :param saliency_map: Saliency map with pixel values from 0-255 (np.ndarray) + :param output_res: Output resolution (Union[tuple, list]) + :return: activation map, heatmap (np.ndarray) + """ + if len(saliency_map.shape) == 3: + saliency_map = saliency_map[0] + + saliency_map = cv2.resize(saliency_map, output_res) + saliency_map = cv2.applyColorMap(saliency_map, cv2.COLORMAP_JET) + saliency_map = cv2.cvtColor(saliency_map, cv2.COLOR_BGR2RGB) + return saliency_map diff --git a/otx/api/entities/dataset_item.py b/otx/api/entities/dataset_item.py index 452be547695..4ca56c267ad 100644 --- a/otx/api/entities/dataset_item.py +++ b/otx/api/entities/dataset_item.py @@ -3,6 +3,7 @@ # Copyright (C) 2021-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # +# pylint: disable=cyclic-import import abc import copy diff --git a/otx/api/entities/shapes/rectangle.py b/otx/api/entities/shapes/rectangle.py index b279dd8f0f6..32456a0de5f 100644 --- a/otx/api/entities/shapes/rectangle.py +++ b/otx/api/entities/shapes/rectangle.py @@ -4,7 +4,7 @@ # # Conflict with Isort -# pylint: disable=wrong-import-order +# pylint: disable=wrong-import-order, cyclic-import import datetime import math diff --git a/otx/api/usecases/exportable_code/demo/demo_package/executors/sync_pipeline.py b/otx/api/usecases/exportable_code/demo/demo_package/executors/sync_pipeline.py index 1b268810a2f..eae537837ff 100644 --- a/otx/api/usecases/exportable_code/demo/demo_package/executors/sync_pipeline.py +++ b/otx/api/usecases/exportable_code/demo/demo_package/executors/sync_pipeline.py @@ -82,7 +82,7 @@ def run(self, input_stream: Union[int, str], loop: bool = False) -> None: for frame in streamer: # getting result for single image annotation_scene = self.single_run(frame) - output = self.visualizer.draw(frame, annotation_scene) + output = self.visualizer.draw(frame, annotation_scene, {}) self.visualizer.show(output) if self.visualizer.is_quit(): break diff --git a/otx/api/utils/vis_utils.py b/otx/api/utils/vis_utils.py new file mode 100644 index 00000000000..d24c6003e54 --- /dev/null +++ b/otx/api/utils/vis_utils.py @@ -0,0 +1,33 @@ +"""This module implements activation map.""" + +# Copyright (C) 2021-2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from typing import Union + +import cv2 +import numpy as np + + +def get_actmap( + saliency_map: np.ndarray, + output_res: Union[tuple, list], +) -> np.ndarray: + """Get activation map (heatmap) from saliency map. + + It will return activation map from saliency map + + Args: + saliency_map (np.ndarray): Saliency map with pixel values from 0-255 + output_res (Union[tuple, list]): Output resolution + + Returns: + Activation map, heatmap + """ + if len(saliency_map.shape) == 3: + saliency_map = saliency_map[0] + + saliency_map = cv2.resize(saliency_map, output_res) + saliency_map = cv2.applyColorMap(saliency_map, cv2.COLORMAP_JET) + saliency_map = cv2.cvtColor(saliency_map, cv2.COLOR_BGR2RGB) + return saliency_map diff --git a/tests/ote_cli/external/deep-object-reid/ote_cli b/tests/ote_cli/external/deep-object-reid/ote_cli deleted file mode 120000 index ff593f72ea6..00000000000 --- a/tests/ote_cli/external/deep-object-reid/ote_cli +++ /dev/null @@ -1 +0,0 @@ -../../../../external/deep-object-reid/tests/ote_cli \ No newline at end of file diff --git a/tests/ote_cli/external/mmdetection/ote_cli b/tests/ote_cli/external/mmdetection/ote_cli deleted file mode 120000 index 7c90d654753..00000000000 --- a/tests/ote_cli/external/mmdetection/ote_cli +++ /dev/null @@ -1 +0,0 @@ -../../../../external/mmdetection/tests/ote_cli \ No newline at end of file diff --git a/tests/ote_cli/external/mmsegmentation/ote_cli b/tests/ote_cli/external/mmsegmentation/ote_cli deleted file mode 120000 index 898e3d4a498..00000000000 --- a/tests/ote_cli/external/mmsegmentation/ote_cli +++ /dev/null @@ -1 +0,0 @@ -../../../../external/mmsegmentation/tests/ote_cli \ No newline at end of file diff --git a/tests/ote_cli/misc/test_docs.py b/tests/ote_cli/misc/test_docs.py index 360b89895fb..0ec075110b1 100644 --- a/tests/ote_cli/misc/test_docs.py +++ b/tests/ote_cli/misc/test_docs.py @@ -49,31 +49,34 @@ def test_help_stdoutputs_of_tools(self): @e2e_pytest_component def test_algorithms_table(self): - def algorithms_generate_table(templates): + def algorithms_table(templates): attributes = ["model_template_id", "name", "gigaflops", "size"] - header = attributes + ["Path"] - attributes_in_md = {"name": "Name", "model_template_id": "ID", "gigaflops": "Complexity (GFlops)", "size": "Model size (MB)", "Path": "Path"} - - table = [" | ".join([attributes_in_md[x] for x in header])] + [" | ".join(["-------" for _ in header])] - + algo_table = defaultdict(list) + for template in sorted(templates, key=lambda x: float(x.gigaflops)): - record = [str(getattr(template, attr)) for attr in attributes ] + if template.model_template_path.split("/")[-1] != "template.yaml": + continue + record = [str(getattr(template, attr)) for attr in attributes] record.append(os.path.relpath(template.model_template_path, './external')) - record = " | ".join(record) - table += [record] - return "\n".join(table) - + algo_table[record[0]] = record[1:] + return algo_table + + readme_table = defaultdict(list) # ["name", "gigaflops", "size", "Path"] with open("external/README.md", encoding="UTF-8") as read_file: full_text = '' for line in read_file: full_text += line - + if "|" in line: + model_lst = line.replace(" ", "").strip().split("|") + model_lst = " ".join(model_lst).split() + readme_table[model_lst[0]] = model_lst[1:] + registry = Registry(".") templates_per_task_type = defaultdict(list) for template in sorted(registry.templates, key=lambda x:str(x.task_type)): templates_per_task_type[template.task_type].append(template) for task_type, templates in templates_per_task_type.items(): - generated_table = algorithms_generate_table(templates) - print("\n", task_type) - print(generated_table) - assert generated_table in full_text, f"\n{generated_table} not in \n{full_text}\n for the task {task_type}\n" + algorithm_table = algorithms_table(templates) + for model_id in algorithm_table.keys(): + assert model_id in readme_table, f"\n {model_id} not in 'external/README.md' for {task_type}" + assert algorithm_table[model_id] == readme_table[model_id], f"\n {model_id}'s info in 'external/README.md' is wrong" diff --git a/tests/ote_cli/misc/test_template_files.py b/tests/ote_cli/misc/test_template_files.py index 128b2bb5c43..1c3730262dc 100644 --- a/tests/ote_cli/misc/test_template_files.py +++ b/tests/ote_cli/misc/test_template_files.py @@ -21,13 +21,31 @@ from ote_cli.registry import Registry -templates = Registry('external', experimental=True).templates +templates = Registry("external/model-preparation-algorithm").templates paths = [os.path.relpath(template.model_template_path) for template in templates] ids = [os.path.relpath(template.model_template_id) for template in templates] -class TestTemplates: + +class TestMPATemplates: @e2e_pytest_component @pytest.mark.parametrize("path", paths, ids=ids) def test_template(self, path): template = parse_model_template(path) assert template.hyper_parameters.data + + +anomaly_templates = Registry("external/model-preparation-algorithm").templates +anomaly_paths = [ + os.path.relpath(template.model_template_path) for template in anomaly_templates +] +anomaly_ids = [ + os.path.relpath(template.model_template_id) for template in anomaly_templates +] + + +class TestAnomalyTemplates: + @e2e_pytest_component + @pytest.mark.parametrize("path", anomaly_paths, ids=anomaly_ids) + def test_template(self, path): + template = parse_model_template(path) + assert template.hyper_parameters.data diff --git a/tests/ote_cli/pytest.ini b/tests/ote_cli/pytest.ini index 992e9a385bf..0f7e9a1548f 100644 --- a/tests/ote_cli/pytest.ini +++ b/tests/ote_cli/pytest.ini @@ -4,4 +4,4 @@ markers = priority_medium components reqids - component \ No newline at end of file + component