Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix grouping GT vs prediction by not assuming only pred have score #14

Open
wants to merge 9 commits into
base: master
Choose a base branch
from
33 changes: 27 additions & 6 deletions coco_explorer.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import argparse
import json
import os
import re

Expand All @@ -12,10 +13,28 @@


@st.cache(allow_output_mutation=True)
def get_inspector(coco_train, coco_predictions, images_path, eval_type, iou_min, iou_max):
coco = COCO(coco_train)
coco_dt = coco.loadRes(coco_predictions)
inspector = CoCoInspector(coco, coco_dt, base_path=images_path,
def get_inspector(coco_train, coco_predictions, images_path, eval_type,
iou_min, iou_max, filter_categories):
coco_gt = COCO(coco_train)
if coco_predictions is None:
coco_dt = coco_gt
else:
coco = json.load(open(coco_predictions))
if isinstance(coco, dict) and 'annotations' in coco:
coco = coco['annotations']
coco_dt = coco_gt.loadRes(coco)
if filter_categories:
filter_catids = [cat['id'] for cat in coco_gt.dataset['categories']
if cat['name'] in filter_categories.split(',')]
for ann in coco_gt.anns.values():
if ann['category_id'] in filter_catids:
coco_gt.dataset['annotations'].remove(ann)
coco_gt.createIndex()
for ann in coco_dt.anns.values():
if ann['category_id'] in filter_catids:
coco_dt.dataset['annotations'].remove(ann)
coco_dt.createIndex()
inspector = CoCoInspector(coco_gt, coco_dt, base_path=images_path,
iou_type=eval_type, iou_min=iou_min, iou_max=iou_max)
inspector.evaluate()
inspector.calculate_stats()
Expand All @@ -33,7 +52,7 @@ def app(args):
'CoCo scores'
])
inspector = get_inspector(args.coco_train, args.coco_predictions, args.images_path,
args.eval_type, ioumin, ioumax)
args.eval_type, ioumin, ioumax, args.filter_categories)
if topbox == 'inspect predictions visually':

st.sidebar.subheader('Inspect predictions')
Expand Down Expand Up @@ -192,7 +211,7 @@ def app(args):
parser = argparse.ArgumentParser()
parser.add_argument("--coco_train", type=str, required=True, metavar="PATH/TO/COCO.json",
help="COCO dataset to inspect")
parser.add_argument("--coco_predictions", type=str, required=True, metavar="PATH/TO/COCO.json",
parser.add_argument("--coco_predictions", type=str, default=None, metavar="PATH/TO/COCO.json",
help="COCO annotations to compare to")
parser.add_argument("--images_path", type=str, default=os.getcwd(), metavar="PATH/TO/IMAGES/",
help="Directory path to prepend to file_name paths in COCO")
Expand All @@ -202,6 +221,8 @@ def app(args):
help="Initial minimum IoU (overlap) (what constitutes a 'match')")
parser.add_argument("--iou_max", type=float, default=0.95,
help="Initial maximum IoU (overlap) (what constitutes a 'match')")
parser.add_argument("--filter_categories", type=str, default="", metavar="COMMA-SEPD-LIST",
help="Strip annotations for these categories after loading")
args = parser.parse_args()
if args.images_path[-1] != '/':
args.images_path += '/'
Expand Down
21 changes: 10 additions & 11 deletions cocoinspector.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def calculate_stats(self):
all_anns = self.coco_gt.loadAnns(self.coco_gt.getAnnIds())
dfannot = pd.DataFrame.from_records(all_anns)[['area', 'category_id', 'bbox']]

dfannot['ann_ar'] = dfannot.bbox.apply(lambda x: x[2] / x[3])
dfannot['ann_ar'] = dfannot.bbox.apply(lambda x: x[2] / x[3] if x[2] * x[3] else -1)
dfannot['category_name'] = dfannot.category_id.apply(lambda x: self.coco_gt.cats[x]['name'])

self.annot_df = dfannot
Expand Down Expand Up @@ -151,21 +151,20 @@ def get_detection_matches(self, image_id):
dtmatches = []
return list(set(gtmatches)), list(set(dtmatches))

def organize_annotations(self, all_annotations, gtmatches, dtmatches):
def organize_annotations(self, annotations, gtmatches, dtmatches, is_gt=True):
collect = []
for a in all_annotations:
for a in annotations:
a['label'] = self.coco_gt.cats[a['category_id']]['name']
if 'score' not in a:
if is_gt:
if a['id'] in dtmatches:
a['type'] = 'gt'
else:
a['type'] = 'fn'
collect.append(a)
continue
if a['id'] in gtmatches:
a['type'] = 'tp'
else:
a['type'] = 'fp'
if a['id'] in gtmatches:
a['type'] = 'tp'
else:
a['type'] = 'fp'
collect.append(a)
return collect

Expand All @@ -186,8 +185,8 @@ def visualize_image(self, image_id,
dt_annotations = self._get_detections(self.coco_dt, image_id,
cat_ids=[self.cat2id[cat] for cat in only_categories or []])
gtmatches, dtmatches = self.get_detection_matches(image_id)
annotations = annotations + dt_annotations
annotations = self.organize_annotations(annotations, gtmatches, dtmatches)
annotations = (self.organize_annotations(annotations, gtmatches, dtmatches, True) +
self.organize_annotations(dt_annotations, gtmatches, dtmatches, False))

image = Image.open(self._imageid2path(image_id))
# cannot work with 16/32 bit or float images due to Pillow#3011 Pillow#3159 Pillow#3838
Expand Down
8 changes: 4 additions & 4 deletions pycoco.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ def computeIoU(self, imgId, catId):
dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]
if len(gt) == 0 and len(dt) == 0:
return []
inds = np.argsort([-d['score'] for d in dt], kind='mergesort')
inds = np.argsort([-d.get('score', 0) for d in dt], kind='mergesort')
dt = [dt[i] for i in inds]
if len(dt) > p.maxDets[-1]:
dt = dt[0:p.maxDets[-1]]
Expand All @@ -194,7 +194,7 @@ def computeOks(self, imgId, catId):
# dimention here should be Nxm
gts = self._gts[imgId, catId]
dts = self._dts[imgId, catId]
inds = np.argsort([-d['score'] for d in dts], kind='mergesort')
inds = np.argsort([-d.get('score', 0) for d in dts], kind='mergesort')
dts = [dts[i] for i in inds]
if len(dts) > p.maxDets[-1]:
dts = dts[0:p.maxDets[-1]]
Expand Down Expand Up @@ -261,7 +261,7 @@ def evaluateImg(self, imgId, catId, aRng, maxDet):
# sort dt highest score first, sort gt ignore last
gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')
gt = [gt[i] for i in gtind]
dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')
dtind = np.argsort([-d.get('score', 0) for d in dt], kind='mergesort')
dt = [dt[i] for i in dtind[0:maxDet]]
iscrowd = [int(o['iscrowd']) for o in gt]
# load computed ious
Expand Down Expand Up @@ -312,7 +312,7 @@ def evaluateImg(self, imgId, catId, aRng, maxDet):
'gtIds': [g['id'] for g in gt],
'dtMatches': dtm,
'gtMatches': gtm,
'dtScores': [d['score'] for d in dt],
'dtScores': [d.get('score', 0) for d in dt],
'gtIgnore': gtIg,
'dtIgnore': dtIg,
}
Expand Down