Skip to content

Commit

Permalink
logger
Browse files Browse the repository at this point in the history
  • Loading branch information
negvet committed Mar 27, 2023
1 parent d1cf504 commit 8e6c279
Showing 1 changed file with 38 additions and 28 deletions.
66 changes: 38 additions & 28 deletions otx/cli/tools/explain.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,9 @@
add_hyper_parameters_sub_parser,
get_parser_and_hprams_data,
)
from otx.mpa.utils.logger import get_logger

logger = get_logger()

ESC_BUTTON = 27
SUPPORTED_EXPLAIN_ALGORITHMS = ["activationmap", "eigencam", "classwisesaliencymap"]
Expand Down Expand Up @@ -88,6 +91,38 @@ def get_args():
return parser.parse_args(), override_param


def _log_prior_to_saving(args, num_images):
logger.info("Explain report:")
if args.process_saliency_maps:
logger.info(
"Postprocessing applied. (1) saliency maps resized to the input image resolution "
"and (2) color map applied."
)
else:
logger.info("No postprocessing applied. Raw low-resolution saliency maps saved as .tiff format images.")

if args.explain_all_classes:
logger.info(f"Saliency maps generated for each class, per each of {num_images} images.")
else:
logger.info(
"Saliency maps generated ONLY for predicted class(es), if any. "
"Use --explain-all-classes flag to generate explanations for all classes."
)


def _log_after_saving(explain_predicted_classes, explained_image_counter, args, num_images):
if explain_predicted_classes and explained_image_counter == 0:
logger.info(
"No predictions were made for provided model-data pair -> no saliency maps generated. "
"Please adjust training pipeline or use different model-data pair."
)
if explained_image_counter > 0:
logger.info(
f"Saliency maps saved to {args.save_explanation_to} for {explained_image_counter} "
f"out of {num_images} images."
)


def main():
"""Main function that is used for model explanation."""

Expand Down Expand Up @@ -144,30 +179,15 @@ def main():
)
assert len(explained_dataset) == len(image_files)

if args.process_saliency_maps:
print(
"Postprocessing applied. (1) saliency maps resized to the input image resolution "
"and (2) color map applied."
)
else:
print("No postprocessing applied. Raw low-resolution saliency maps saved as .tiff format images.")

if args.explain_all_classes:
print(f"Saliency maps generated for each class, per each of {len(image_files)} images.")
else:
print(
"Saliency maps generated ONLY for predicted class, if any. "
"Use --explain-all-classes flag to generate explanations for all classes."
)

_log_prior_to_saving(args, len(image_files))
explained_image_counter = 0
for explained_data, (_, filename) in zip(explained_dataset, image_files):
metadata_list = explained_data.get_metadata()
if len(metadata_list) > 0:
explained_image_counter += 1
else:
if explain_predicted_classes: # Explain only predictions
print(f"No saliency maps generated for {filename} - model predicted nothing.")
logger.info(f"No saliency maps generated for {filename} - due to lack of confident predictions.")
for metadata in metadata_list:
saliency_data = metadata.data
fname = f"{Path(Path(filename).name).stem}_{saliency_data.name}".replace(" ", "_")
Expand All @@ -179,17 +199,7 @@ def main():
fname=fname,
weight=args.overlay_weight,
)

if explain_predicted_classes and explained_image_counter == 0:
print(
"No predictions were made for provided model-data pair -> no saliency maps generated. "
"Please adjust training pipeline or use different model-data pair."
)
if explained_image_counter > 0:
print(
f"Saliency maps saved to {args.save_explanation_to} for {explained_image_counter} "
f"out of {len(image_files)} images."
)
_log_after_saving(explain_predicted_classes, explained_image_counter, args, len(image_files))

return dict(retcode=0, template=template.name)

Expand Down

0 comments on commit 8e6c279

Please sign in to comment.