Skip to content

Commit

Permalink
refactor dataset evaluation interface (open-mmlab#1209)
Browse files Browse the repository at this point in the history
  • Loading branch information
ly015 committed Mar 7, 2022
1 parent 91d7da2 commit 4358617
Show file tree
Hide file tree
Showing 47 changed files with 1,127 additions and 852 deletions.
29 changes: 20 additions & 9 deletions demo/MMPose_Tutorial.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -717,6 +717,7 @@
"import os\n",
"import os.path as osp\n",
"from collections import OrderedDict\n",
"import tempfile\n",
"\n",
"import numpy as np\n",
"\n",
Expand Down Expand Up @@ -810,7 +811,7 @@
"\t\tscale = scale * 1.25\n",
"\t\treturn center, scale\n",
"\n",
"\tdef evaluate(self, outputs, res_folder, metric='PCK', **kwargs):\n",
"\tdef evaluate(self, results, res_folder=None, metric='PCK', **kwargs):\n",
"\t\t\"\"\"Evaluate keypoint detection results. The pose prediction results will\n",
"\t\tbe saved in `${res_folder}/result_keypoints.json`.\n",
"\n",
Expand All @@ -821,15 +822,17 @@
"\t\theatmap width: W\n",
"\n",
"\t\tArgs:\n",
"\t\toutputs (list(preds, boxes, image_path, output_heatmap))\n",
"\t\tresults (list(preds, boxes, image_path, output_heatmap))\n",
"\t\t\t:preds (np.ndarray[N,K,3]): The first two dimensions are\n",
"\t\t\t\tcoordinates, score is the third dimension of the array.\n",
"\t\t\t:boxes (np.ndarray[N,6]): [center[0], center[1], scale[0]\n",
"\t\t\t\t, scale[1],area, score]\n",
"\t\t\t:image_paths (list[str]): For example, ['Test/source/0.jpg']\n",
"\t\t\t:output_heatmap (np.ndarray[N, K, H, W]): model outputs.\n",
"\n",
"\t\tres_folder (str): Path of directory to save the results.\n",
"\t\tres_folder (str, optional): The folder to save the testing\n",
" results. If not specified, a temp folder will be created.\n",
" Default: None.\n",
"\t\tmetric (str | list[str]): Metric to be performed.\n",
"\t\t\tOptions: 'PCK', 'NME'.\n",
"\n",
Expand All @@ -842,14 +845,19 @@
"\t\t\tif metric not in allowed_metrics:\n",
"\t\t\t\traise KeyError(f'metric {metric} is not supported')\n",
"\n",
"\t\tres_file = os.path.join(res_folder, 'result_keypoints.json')\n",
"\t\tif res_folder is not None:\n",
" tmp_folder = None\n",
" res_file = osp.join(res_folder, 'result_keypoints.json')\n",
" else:\n",
" tmp_folder = tempfile.TemporaryDirectory()\n",
" res_file = osp.join(tmp_folder.name, 'result_keypoints.json')\n",
"\n",
"\t\tkpts = []\n",
"\t\tfor output in outputs:\n",
"\t\t\tpreds = output['preds']\n",
"\t\t\tboxes = output['boxes']\n",
"\t\t\timage_paths = output['image_paths']\n",
"\t\t\tbbox_ids = output['bbox_ids']\n",
"\t\tfor result in results:\n",
"\t\t\tpreds = result['preds']\n",
"\t\t\tboxes = result['boxes']\n",
"\t\t\timage_paths = result['image_paths']\n",
"\t\t\tbbox_ids = result['bbox_ids']\n",
"\n",
"\t\t\tbatch_size = len(image_paths)\n",
"\t\t\tfor i in range(batch_size):\n",
Expand All @@ -867,6 +875,9 @@
"\t\tinfo_str = self._report_metric(res_file, metrics)\n",
"\t\tname_value = OrderedDict(info_str)\n",
"\n",
"\t\tif tmp_folder is not None:\n",
"\t\t\ttmp_folder.cleanup()\n",
"\n",
"\t\treturn name_value\n",
"\n",
"\tdef _report_metric(self, res_file, metrics, pck_thr=0.3):\n",
Expand Down
53 changes: 0 additions & 53 deletions mmpose/core/evaluation/eval_hooks.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
# Copyright (c) OpenMMLab. All rights reserved.
import tempfile
import warnings

from mmcv.runner import DistEvalHook as _DistEvalHook
Expand Down Expand Up @@ -55,32 +54,6 @@ def __init__(self,
super().__init__(dataloader, start, interval, by_epoch, save_best,
rule, test_fn, greater_keys, less_keys, **eval_kwargs)

def evaluate(self, runner, results):
"""Evaluate the results.
Args:
runner (:obj:`mmcv.Runner`): The underlined training runner.
results (list): Output results.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
eval_res = self.dataloader.dataset.evaluate(
results,
res_folder=tmp_dir,
logger=runner.logger,
**self.eval_kwargs)

for name, val in eval_res.items():
runner.log_buffer.output[name] = val
runner.log_buffer.ready = True

if self.save_best is not None:
if self.key_indicator == 'auto':
# infer from eval_results
self._init_rule(self.rule, list(eval_res.keys())[0])
return eval_res[self.key_indicator]

return None


class DistEvalHook(_DistEvalHook):

Expand Down Expand Up @@ -123,29 +96,3 @@ def __init__(self,
rule, test_fn, greater_keys, less_keys,
broadcast_bn_buffer, tmpdir, gpu_collect,
**eval_kwargs)

def evaluate(self, runner, results):
"""Evaluate the results.
Args:
runner (:obj:`mmcv.Runner`): The underlined training runner.
results (list): Output results.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
eval_res = self.dataloader.dataset.evaluate(
results,
res_folder=tmp_dir,
logger=runner.logger,
**self.eval_kwargs)

for name, val in eval_res.items():
runner.log_buffer.output[name] = val
runner.log_buffer.ready = True

if self.save_best is not None:
if self.key_indicator == 'auto':
# infer from eval_results
self._init_rule(self.rule, list(eval_res.keys())[0])
return eval_res[self.key_indicator]

return None
37 changes: 25 additions & 12 deletions mmpose/datasets/datasets/animal/animal_ap10k_dataset.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import tempfile
import warnings
from collections import OrderedDict, defaultdict

import json_tricks as json
import numpy as np
from mmcv import Config
from mmcv import Config, deprecated_api_warning
from xtcocotools.cocoeval import COCOeval

from ....core.post_processing import oks_nms, soft_oks_nms
Expand Down Expand Up @@ -164,7 +165,7 @@ def _load_coco_keypoint_annotation_kernel(self, img_id):

center, scale = self._xywh2cs(*obj['clean_bbox'][:4])

image_file = os.path.join(self.img_prefix, self.id2name[img_id])
image_file = osp.join(self.img_prefix, self.id2name[img_id])
rec.append({
'image_file': image_file,
'center': center,
Expand All @@ -187,7 +188,8 @@ def _load_coco_keypoint_annotation_kernel(self, img_id):

return rec, id2Cat

def evaluate(self, outputs, res_folder, metric='mAP', **kwargs):
@deprecated_api_warning(name_dict=dict(outputs='results'))
def evaluate(self, results, res_folder=None, metric='mAP', **kwargs):
"""Evaluate coco keypoint results. The pose prediction results will be
saved in ``${res_folder}/result_keypoints.json``.
Expand All @@ -198,7 +200,8 @@ def evaluate(self, outputs, res_folder, metric='mAP', **kwargs):
- heatmap width: W
Args:
outputs (list[dict]): Outputs containing the following items.
results (list[dict]): Testing results containing the following
items:
- preds (np.ndarray[N,K,3]): The first two dimensions are \
coordinates, score is the third dimension of the array.
Expand All @@ -208,7 +211,9 @@ def evaluate(self, outputs, res_folder, metric='mAP', **kwargs):
/000000393226.jpg']
- heatmap (np.ndarray[N, K, H, W]): model output heatmap
- bbox_id (list(int)).
res_folder (str): Path of directory to save the results.
res_folder (str, optional): The folder to save the testing
results. If not specified, a temp folder will be created.
Default: None.
metric (str | list[str]): Metric to be performed. Defaults: 'mAP'.
Returns:
Expand All @@ -220,15 +225,20 @@ def evaluate(self, outputs, res_folder, metric='mAP', **kwargs):
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')

res_file = os.path.join(res_folder, 'result_keypoints.json')
if res_folder is not None:
tmp_folder = None
res_file = osp.join(res_folder, 'result_keypoints.json')
else:
tmp_folder = tempfile.TemporaryDirectory()
res_file = osp.join(tmp_folder.name, 'result_keypoints.json')

kpts = defaultdict(list)

for output in outputs:
preds = output['preds']
boxes = output['boxes']
image_paths = output['image_paths']
bbox_ids = output['bbox_ids']
for result in results:
preds = result['preds']
boxes = result['boxes']
image_paths = result['image_paths']
bbox_ids = result['bbox_ids']

batch_size = len(image_paths)
for i in range(batch_size):
Expand Down Expand Up @@ -279,6 +289,9 @@ def evaluate(self, outputs, res_folder, metric='mAP', **kwargs):
info_str = self._do_python_keypoint_eval(res_file)
name_value = OrderedDict(info_str)

if tmp_folder is not None:
tmp_folder.cleanup()

return name_value

def _write_coco_keypoint_results(self, keypoints, res_file):
Expand Down
37 changes: 25 additions & 12 deletions mmpose/datasets/datasets/animal/animal_atrw_dataset.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import tempfile
import warnings
from collections import OrderedDict, defaultdict

import json_tricks as json
import numpy as np
from mmcv import Config
from mmcv import Config, deprecated_api_warning
from xtcocotools.cocoeval import COCOeval

from ....core.post_processing import oks_nms, soft_oks_nms
Expand Down Expand Up @@ -157,7 +158,7 @@ def _load_coco_keypoint_annotation_kernel(self, img_id):

center, scale = self._xywh2cs(*obj['clean_bbox'][:4], padding=1.0)

image_file = os.path.join(self.img_prefix, self.id2name[img_id])
image_file = osp.join(self.img_prefix, self.id2name[img_id])
rec.append({
'image_file': image_file,
'center': center,
Expand All @@ -174,7 +175,8 @@ def _load_coco_keypoint_annotation_kernel(self, img_id):

return rec

def evaluate(self, outputs, res_folder, metric='mAP', **kwargs):
@deprecated_api_warning(name_dict=dict(outputs='results'))
def evaluate(self, results, res_folder=None, metric='mAP', **kwargs):
"""Evaluate coco keypoint results. The pose prediction results will be
saved in ``${res_folder}/result_keypoints.json``.
Expand All @@ -185,7 +187,8 @@ def evaluate(self, outputs, res_folder, metric='mAP', **kwargs):
- heatmap width: W
Args:
outputs (list[dict]): Outputs containing the following items.
results (list[dict]): Testing results containing the following
items:
- preds (np.ndarray[N,K,3]): The first two dimensions are \
coordinates, score is the third dimension of the array.
Expand All @@ -195,7 +198,9 @@ def evaluate(self, outputs, res_folder, metric='mAP', **kwargs):
/000000393226.jpg']
- heatmap (np.ndarray[N, K, H, W]): model output heatmap
- bbox_id (list(int)).
res_folder (str): Path of directory to save the results.
res_folder (str, optional): The folder to save the testing
results. If not specified, a temp folder will be created.
Default: None.
metric (str | list[str]): Metric to be performed. Defaults: 'mAP'.
Returns:
Expand All @@ -207,15 +212,20 @@ def evaluate(self, outputs, res_folder, metric='mAP', **kwargs):
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')

res_file = os.path.join(res_folder, 'result_keypoints.json')
if res_folder is not None:
tmp_folder = None
res_file = osp.join(res_folder, 'result_keypoints.json')
else:
tmp_folder = tempfile.TemporaryDirectory()
res_file = osp.join(tmp_folder.name, 'result_keypoints.json')

kpts = defaultdict(list)

for output in outputs:
preds = output['preds']
boxes = output['boxes']
image_paths = output['image_paths']
bbox_ids = output['bbox_ids']
for result in results:
preds = result['preds']
boxes = result['boxes']
image_paths = result['image_paths']
bbox_ids = result['bbox_ids']

batch_size = len(image_paths)
for i in range(batch_size):
Expand Down Expand Up @@ -264,6 +274,9 @@ def evaluate(self, outputs, res_folder, metric='mAP', **kwargs):
info_str = self._do_python_keypoint_eval(res_file)
name_value = OrderedDict(info_str)

if tmp_folder is not None:
tmp_folder.cleanup()

return name_value

def _write_coco_keypoint_results(self, keypoints, res_file):
Expand Down
Loading

0 comments on commit 4358617

Please sign in to comment.