From 3c0d8a216d26a06a5fb3b85f187db8c6ed9f6e35 Mon Sep 17 00:00:00 2001 From: Nicki Skafte Detlefsen Date: Tue, 2 May 2023 16:28:07 +0200 Subject: [PATCH] Fix docstring in MAP (#1751) --- src/torchmetrics/detection/mean_ap.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/torchmetrics/detection/mean_ap.py b/src/torchmetrics/detection/mean_ap.py index 7cfbc5f08bc..c7ba7a5b491 100644 --- a/src/torchmetrics/detection/mean_ap.py +++ b/src/torchmetrics/detection/mean_ap.py @@ -150,9 +150,12 @@ def _segm_iou(det: List[Tuple[np.ndarray, np.ndarray]], gt: List[Tuple[np.ndarra class MeanAveragePrecision(Metric): r"""Compute the `Mean-Average-Precision (mAP) and Mean-Average-Recall (mAR)`_ for object detection predictions. - Predicted boxes and targets have to be in Pascal VOC format (xmin-top left, ymin-top left, xmax-bottom right, - ymax-bottom right). The metric can both compute the mAP and mAR values per class or as an global average over all - classes. + .. math:: + \text{mAP} = \frac{1}{n} \sum_{i=1}^{n} AP_i + + where :math:`AP_i` is the average precision for class :math:`i` and :math:`n` is the number of classes. The average + precision is defined as the area under the precision-recall curve. If argument `class_metrics` is set to ``True``, + the metric will also return the mAP/mAR per class. As input to ``forward`` and ``update`` the metric accepts the following input: