From a4d6b35d42fae3e105f2d9124548a0a8a0068edd Mon Sep 17 00:00:00 2001 From: Rodrigo Berriel Date: Fri, 19 Apr 2019 22:06:03 +0200 Subject: [PATCH] Add a switch for POST_NMS per batch/image during training --- maskrcnn_benchmark/config/defaults.py | 3 +++ maskrcnn_benchmark/modeling/rpn/inference.py | 10 +++++++--- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/maskrcnn_benchmark/config/defaults.py b/maskrcnn_benchmark/config/defaults.py index 4e2fe1acc..ddc2c4e21 100644 --- a/maskrcnn_benchmark/config/defaults.py +++ b/maskrcnn_benchmark/config/defaults.py @@ -165,6 +165,9 @@ # all FPN levels _C.MODEL.RPN.FPN_POST_NMS_TOP_N_TRAIN = 2000 _C.MODEL.RPN.FPN_POST_NMS_TOP_N_TEST = 2000 +# Apply the post NMS per batch (default) or per image during training +# (default is True to be consistent with Detectron, see Issue #672) +_C.MODEL.RPN.FPN_POST_NMS_PER_BATCH = True # Custom rpn head, empty to use default conv or separable conv _C.MODEL.RPN.RPN_HEAD = "SingleConvRPNHead" diff --git a/maskrcnn_benchmark/modeling/rpn/inference.py b/maskrcnn_benchmark/modeling/rpn/inference.py index 556082645..9fd23c5bc 100644 --- a/maskrcnn_benchmark/modeling/rpn/inference.py +++ b/maskrcnn_benchmark/modeling/rpn/inference.py @@ -24,6 +24,7 @@ def __init__( min_size, box_coder=None, fpn_post_nms_top_n=None, + fpn_post_nms_per_batch=True, ): """ Arguments: @@ -47,6 +48,7 @@ def __init__( if fpn_post_nms_top_n is None: fpn_post_nms_top_n = post_nms_top_n self.fpn_post_nms_top_n = fpn_post_nms_top_n + self.fpn_post_nms_per_batch = fpn_post_nms_per_batch def add_gt_proposals(self, proposals, targets): """ @@ -154,9 +156,9 @@ def select_over_all_levels(self, boxlists): # different behavior during training and during testing: # during training, post_nms_top_n is over *all* the proposals combined, while # during testing, it is over the proposals for each image - # TODO resolve this difference and make it consistent. It should be per image, - # and not per batch - if self.training: + # NOTE: it should be per image, and not per batch. However, to be consistent + # with Detectron, the default is per batch (see Issue #672) + if self.training and self.fpn_post_nms_per_batch: objectness = torch.cat( [boxlist.get_field("objectness") for boxlist in boxlists], dim=0 ) @@ -189,6 +191,7 @@ def make_rpn_postprocessor(config, rpn_box_coder, is_train): if not is_train: pre_nms_top_n = config.MODEL.RPN.PRE_NMS_TOP_N_TEST post_nms_top_n = config.MODEL.RPN.POST_NMS_TOP_N_TEST + fpn_post_nms_per_batch = config.MODEL.RPN.FPN_POST_NMS_PER_BATCH nms_thresh = config.MODEL.RPN.NMS_THRESH min_size = config.MODEL.RPN.MIN_SIZE box_selector = RPNPostProcessor( @@ -198,5 +201,6 @@ def make_rpn_postprocessor(config, rpn_box_coder, is_train): min_size=min_size, box_coder=rpn_box_coder, fpn_post_nms_top_n=fpn_post_nms_top_n, + fpn_post_nms_per_batch=fpn_post_nms_per_batch, ) return box_selector