Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[MPA Detection] enable saliency map detection #1155

Merged
merged 9 commits into from
Jul 15, 2022
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import io
import os
from collections import defaultdict
from typing import List, Optional
from typing import List, Optional, Tuple, Iterable

import cv2
import numpy as np
Expand Down Expand Up @@ -34,6 +34,7 @@
ModelOptimizationType)
from ote_sdk.entities.model_template import TaskType
from ote_sdk.entities.resultset import ResultSetEntity
from ote_sdk.entities.result_media import ResultMediaEntity
from ote_sdk.entities.scored_label import ScoredLabel
from ote_sdk.entities.shapes.polygon import Point, Polygon
from ote_sdk.entities.shapes.rectangle import Rectangle
Expand Down Expand Up @@ -84,18 +85,48 @@ def infer(self,
self.confidence_threshold = self._hyperparams.postprocessing.confidence_threshold
logger.info(f'Confidence threshold {self.confidence_threshold}')

prediction_results, _ = self._infer_detector(dataset, inference_parameters)
self._add_predictions_to_dataset(prediction_results, dataset, self.confidence_threshold)
logger.info('Inference completed')
return dataset

def _infer_detector(self, dataset: DatasetEntity,
inference_parameters: Optional[InferenceParameters] = None) -> Tuple[Iterable, float]:
""" Inference wrapper

This method triggers the inference and returns `prediction_results` zipped with prediction results,
feature vectors, and saliency maps. `metric` is returned as a float value if InferenceParameters.is_evaluation
is set to true, otherwise, `None` is returned.

Args:
dataset (DatasetEntity): the validation or test dataset to be inferred with
inference_parameters (Optional[InferenceParameters], optional): Option to run evaluation or not.
If `InferenceParameters.is_evaluation=True` then metric is returned, otherwise, both metric and
saliency maps are empty. Defaults to None.

Returns:
Tuple[Iterable, float]: Iterable prediction results for each sample and metric for on the given dataset
"""
stage_module = 'DetectionInferrer'
self._data_cfg = self._init_test_data_cfg(dataset)
results = self._run_task(stage_module, mode='train', dataset=dataset, parameters=inference_parameters)
dump_features = True
dump_saliency_map = not inference_parameters.is_evaluation if inference_parameters else True
results = self._run_task(stage_module,
mode='train',
dataset=dataset,
eval=inference_parameters.is_evaluation if inference_parameters else False,
dump_features=dump_features,
dump_saliency_map=dump_saliency_map)
# TODO: InferenceProgressCallback register
logger.debug(f'result of run_task {stage_module} module = {results}')
output = results['outputs']
metric = output['metric']
predictions = output['detections']
featuremaps = [None for _ in range(len(predictions))]
prediction_results = zip(predictions, featuremaps)
self._add_predictions_to_dataset(prediction_results, dataset, self.confidence_threshold)
logger.info('Inference completed')
return dataset
assert len(output['detections']) == len(output['feature_vectors']) == len(output['saliency_maps']), \
'Number of elements should be the same, however, number of outputs are ' \
f"{len(output['detections'])}, {len(output['feature_vectors'])}, and {len(output['saliency_maps'])}"
prediction_results = zip(predictions, output['feature_vectors'], output['saliency_maps'])
return prediction_results, metric

def evaluate(self,
output_result_set: ResultSetEntity,
Expand Down Expand Up @@ -208,7 +239,7 @@ def _init_test_data_cfg(self, dataset: DatasetEntity):

def _add_predictions_to_dataset(self, prediction_results, dataset, confidence_threshold=0.0):
""" Loop over dataset again to assign predictions. Convert from MMDetection format to OTE format. """
for dataset_item, (all_results, feature_vector) in zip(dataset, prediction_results):
for dataset_item, (all_results, feature_vector, saliency_map) in zip(dataset, prediction_results):
width = dataset_item.width
height = dataset_item.height

Expand All @@ -226,6 +257,14 @@ def _add_predictions_to_dataset(self, prediction_results, dataset, confidence_th
if feature_vector is not None:
active_score = TensorEntity(name="representation_vector", numpy=feature_vector)
dataset_item.append_metadata_item(active_score)

if saliency_map is not None:
width, height = dataset_item.width, dataset_item.height
saliency_map = cv2.resize(saliency_map, (width, height), interpolation=cv2.INTER_NEAREST)
saliency_map_media = ResultMediaEntity(name="saliency_map", type="Saliency map",
annotation_scene=dataset_item.annotation_scene,
numpy=saliency_map, roi=dataset_item.roi)
dataset_item.append_metadata_item(saliency_map_media, model=self._task_environment.model)

def _patch_data_pipeline(self):
base_dir = os.path.abspath(os.path.dirname(self.template_file_path))
Expand Down Expand Up @@ -417,20 +456,9 @@ def train(self,

# get prediction on validation set
val_dataset = dataset.get_subset(Subset.VALIDATION)
self._data_cfg = self._init_test_data_cfg(val_dataset)
results = self._run_task(
'DetectionInferrer',
mode='train',
dataset=val_dataset,
eval=True
)
val_preds, val_map = self._infer_detector(val_dataset, InferenceParameters(is_evaluation=True))

preds_val_dataset = val_dataset.with_empty_annotations()
logger.debug(f'result of run_task {stage_module} module = {results}')
output = results['outputs']
val_preds = output['detections']
val_map = output['metric']
featuremaps = [None for _ in range(len(val_preds))]
val_preds = zip(val_preds, featuremaps)
self._add_predictions_to_dataset(val_preds, preds_val_dataset, 0.0)

result_set = ResultSetEntity(
Expand Down