From f93b369282657c4a338d6cc947e7ebb213a20c1a Mon Sep 17 00:00:00 2001 From: dongdk Date: Sun, 25 Nov 2018 22:12:13 +0800 Subject: [PATCH 01/13] make pixel indexes 0-based for bounding box in pascal voc dataset --- maskrcnn_benchmark/data/datasets/voc.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/maskrcnn_benchmark/data/datasets/voc.py b/maskrcnn_benchmark/data/datasets/voc.py index 6b5776419..459985bd1 100644 --- a/maskrcnn_benchmark/data/datasets/voc.py +++ b/maskrcnn_benchmark/data/datasets/voc.py @@ -88,22 +88,24 @@ def _preprocess_annotation(self, target): boxes = [] gt_classes = [] difficult_boxes = [] + TO_REMOVE = 1 + for obj in target.iter("object"): difficult = int(obj.find("difficult").text) == 1 if not self.keep_difficult and difficult: continue name = obj.find("name").text.lower().strip() bb = obj.find("bndbox") + # Make pixel indexes 0-based + # Refer to "https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/pascal_voc.py#L208-L211" + box = [ + bb.find("xmin").text, + bb.find("ymin").text, + bb.find("xmax").text, + bb.find("ymax").text, + ] bndbox = tuple( - map( - int, - [ - bb.find("xmin").text, - bb.find("ymin").text, - bb.find("xmax").text, - bb.find("ymax").text, - ], - ) + map(lambda x: x - TO_REMOVE, list(map(int, box))) ) boxes.append(bndbox) From a46bfb97cdcea0a91334400859491628a0002a73 Mon Sep 17 00:00:00 2001 From: dongdk Date: Wed, 5 Dec 2018 10:56:17 +0800 Subject: [PATCH 02/13] replacing all instances of torch.distributed.deprecated with torch.distributed --- maskrcnn_benchmark/engine/inference.py | 4 ++-- maskrcnn_benchmark/engine/trainer.py | 2 +- maskrcnn_benchmark/utils/comm.py | 28 +++++++++++++------------- tools/test_net.py | 2 +- tools/train_net.py | 4 ++-- 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/maskrcnn_benchmark/engine/inference.py b/maskrcnn_benchmark/engine/inference.py index fbe0409f2..74d742d26 100644 --- a/maskrcnn_benchmark/engine/inference.py +++ b/maskrcnn_benchmark/engine/inference.py @@ -65,8 +65,8 @@ def inference( # convert to a torch.device for efficiency device = torch.device(device) num_devices = ( - torch.distributed.deprecated.get_world_size() - if torch.distributed.deprecated.is_initialized() + torch.distributed.get_world_size() + if torch.distributed.is_initialized() else 1 ) logger = logging.getLogger("maskrcnn_benchmark.inference") diff --git a/maskrcnn_benchmark/engine/trainer.py b/maskrcnn_benchmark/engine/trainer.py index 3616947af..57d0f76cb 100644 --- a/maskrcnn_benchmark/engine/trainer.py +++ b/maskrcnn_benchmark/engine/trainer.py @@ -4,7 +4,7 @@ import time import torch -from torch.distributed import deprecated as dist +import torch.distributed as dist from maskrcnn_benchmark.utils.comm import get_world_size from maskrcnn_benchmark.utils.metric_logger import MetricLogger diff --git a/maskrcnn_benchmark/utils/comm.py b/maskrcnn_benchmark/utils/comm.py index 48deb466a..8208f6c1e 100644 --- a/maskrcnn_benchmark/utils/comm.py +++ b/maskrcnn_benchmark/utils/comm.py @@ -13,21 +13,21 @@ def get_world_size(): - if not torch.distributed.deprecated.is_initialized(): + if not torch.distributed.is_initialized(): return 1 - return torch.distributed.deprecated.get_world_size() + return torch.distributed.get_world_size() def get_rank(): - if not torch.distributed.deprecated.is_initialized(): + if not torch.distributed.is_initialized(): return 0 - return torch.distributed.deprecated.get_rank() + return torch.distributed.get_rank() def is_main_process(): - if not torch.distributed.deprecated.is_initialized(): + if not torch.distributed.is_initialized(): return True - return torch.distributed.deprecated.get_rank() == 0 + return torch.distributed.get_rank() == 0 def synchronize(): @@ -35,10 +35,10 @@ def synchronize(): Helper function to synchronize between multiple processes when using distributed training """ - if not torch.distributed.deprecated.is_initialized(): + if not torch.distributed.is_initialized(): return - world_size = torch.distributed.deprecated.get_world_size() - rank = torch.distributed.deprecated.get_rank() + world_size = torch.distributed.get_world_size() + rank = torch.distributed.get_rank() if world_size == 1: return @@ -47,7 +47,7 @@ def _send_and_wait(r): tensor = torch.tensor(0, device="cuda") else: tensor = torch.tensor(1, device="cuda") - torch.distributed.deprecated.broadcast(tensor, r) + torch.distributed.broadcast(tensor, r) while tensor.item() == 1: time.sleep(1) @@ -103,11 +103,11 @@ def scatter_gather(data): # each process will then serialize the data to the folder defined by # the main process, and then the main process reads all of the serialized # files and returns them in a list - if not torch.distributed.deprecated.is_initialized(): + if not torch.distributed.is_initialized(): return [data] synchronize() # get rank of the current process - rank = torch.distributed.deprecated.get_rank() + rank = torch.distributed.get_rank() # the data to communicate should be small data_to_communicate = torch.empty(256, dtype=torch.uint8, device="cuda") @@ -119,7 +119,7 @@ def scatter_gather(data): synchronize() # the main process (rank=0) communicates the data to all processes - torch.distributed.deprecated.broadcast(data_to_communicate, 0) + torch.distributed.broadcast(data_to_communicate, 0) # get the data that was communicated tmp_dir = _decode(data_to_communicate) @@ -135,7 +135,7 @@ def scatter_gather(data): # only the master process returns the data if rank == 0: data_list = [] - world_size = torch.distributed.deprecated.get_world_size() + world_size = torch.distributed.get_world_size() for r in range(world_size): file_path = os.path.join(tmp_dir, file_template.format(r)) d = torch.load(file_path) diff --git a/tools/test_net.py b/tools/test_net.py index 8ccb76b12..abd35e6cb 100644 --- a/tools/test_net.py +++ b/tools/test_net.py @@ -41,7 +41,7 @@ def main(): if distributed: torch.cuda.set_device(args.local_rank) - torch.distributed.deprecated.init_process_group( + torch.distributed.init_process_group( backend="nccl", init_method="env://" ) diff --git a/tools/train_net.py b/tools/train_net.py index 5191a309f..b2a3799b6 100644 --- a/tools/train_net.py +++ b/tools/train_net.py @@ -35,7 +35,7 @@ def train(cfg, local_rank, distributed): scheduler = make_lr_scheduler(cfg, optimizer) if distributed: - model = torch.nn.parallel.deprecated.DistributedDataParallel( + model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[local_rank], output_device=local_rank, # this should be removed if we update BatchNorm stats broadcast_buffers=False, @@ -136,7 +136,7 @@ def main(): if args.distributed: torch.cuda.set_device(args.local_rank) - torch.distributed.deprecated.init_process_group( + torch.distributed.init_process_group( backend="nccl", init_method="env://" ) From 7bbf46f7da66136350d47efba0457179e6f1804b Mon Sep 17 00:00:00 2001 From: dongdk Date: Wed, 5 Dec 2018 11:06:04 +0800 Subject: [PATCH 03/13] replacing all instances of torch.distributed.deprecated with torch.distributed --- maskrcnn_benchmark/data/samplers/distributed.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/maskrcnn_benchmark/data/samplers/distributed.py b/maskrcnn_benchmark/data/samplers/distributed.py index 6b8b3353b..5de06e3d6 100644 --- a/maskrcnn_benchmark/data/samplers/distributed.py +++ b/maskrcnn_benchmark/data/samplers/distributed.py @@ -1,10 +1,9 @@ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -# Code is copy-pasted exactly as in torch.utils.data.distributed, -# with a modification in the import to use the deprecated backend +# Code is copy-pasted exactly as in torch.utils.data.distributed. # FIXME remove this once c10d fixes the bug it has import math import torch -import torch.distributed.deprecated as dist +import torch.distributed as dist from torch.utils.data.sampler import Sampler From baba31f4c1c49fbe928008854ba4fdc57b1cc1a7 Mon Sep 17 00:00:00 2001 From: dongdk Date: Tue, 15 Jan 2019 21:38:12 +0800 Subject: [PATCH 04/13] add GroupNorm --- .../e2e_faster_rcnn_R_50_FPN_1x_gn.yaml | 48 +++++++ ...e_faster_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml | 48 +++++++ .../e2e_mask_rcnn_R_50_FPN_1x_gn.yaml | 59 +++++++++ ...e2e_mask_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml | 59 +++++++++ ...cratch_e2e_faster_rcnn_R_50_FPN_3x_gn.yaml | 49 +++++++ ...e_faster_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml | 49 +++++++ .../scratch_e2e_mask_rcnn_R_50_FPN_3x_gn.yaml | 60 +++++++++ ...e2e_mask_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml | 62 +++++++++ maskrcnn_benchmark/config/defaults.py | 32 +++++ maskrcnn_benchmark/config/paths_catalog.py | 2 + maskrcnn_benchmark/layers/__init__.py | 7 +- maskrcnn_benchmark/layers/group_norm.py | 92 +++++++++++++ .../modeling/backbone/backbone.py | 1 + maskrcnn_benchmark/modeling/backbone/fpn.py | 34 +++-- .../modeling/backbone/resnet.py | 123 +++++++++++++++++- maskrcnn_benchmark/modeling/make_layers.py | 54 ++++++++ .../box_head/roi_box_feature_extractors.py | 77 +++++++++-- .../mask_head/roi_mask_feature_extractors.py | 12 +- maskrcnn_benchmark/utils/c2_model_loading.py | 12 ++ 19 files changed, 856 insertions(+), 24 deletions(-) create mode 100644 configs/gn_baselines/e2e_faster_rcnn_R_50_FPN_1x_gn.yaml create mode 100644 configs/gn_baselines/e2e_faster_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml create mode 100644 configs/gn_baselines/e2e_mask_rcnn_R_50_FPN_1x_gn.yaml create mode 100644 configs/gn_baselines/e2e_mask_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml create mode 100644 configs/gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_3x_gn.yaml create mode 100644 configs/gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml create mode 100644 configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_3x_gn.yaml create mode 100644 configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml create mode 100644 maskrcnn_benchmark/layers/group_norm.py create mode 100644 maskrcnn_benchmark/modeling/make_layers.py diff --git a/configs/gn_baselines/e2e_faster_rcnn_R_50_FPN_1x_gn.yaml b/configs/gn_baselines/e2e_faster_rcnn_R_50_FPN_1x_gn.yaml new file mode 100644 index 000000000..3efd0bf3c --- /dev/null +++ b/configs/gn_baselines/e2e_faster_rcnn_R_50_FPN_1x_gn.yaml @@ -0,0 +1,48 @@ +INPUT: + MIN_SIZE_TRAIN: 800 + MAX_SIZE_TRAIN: 1333 + MIN_SIZE_TEST: 800 + MAX_SIZE_TEST: 1333 +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + WEIGHT: "catalog://ImageNetPretrained/MSRA/R-50-GN" + BACKBONE: + CONV_BODY: "R-50-FPN" + OUT_CHANNELS: 256 + RESNETS: # use GN for backbone + TRANS_FUNC: "BottleneckWithGN" + STEM_FUNC: "StemWithGN" + FPN: + USE_GN: True # use GN for FPN + RPN: + USE_FPN: True + ANCHOR_STRIDE: (4, 8, 16, 32, 64) + PRE_NMS_TOP_N_TRAIN: 2000 + PRE_NMS_TOP_N_TEST: 1000 + POST_NMS_TOP_N_TEST: 1000 + FPN_POST_NMS_TOP_N_TEST: 1000 + ROI_HEADS: + USE_FPN: True + BATCH_SIZE_PER_IMAGE: 512 + POSITIVE_FRACTION: 0.25 + ROI_BOX_HEAD: + USE_GN: True # use GN for bbox head + POOLER_RESOLUTION: 7 + POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) + POOLER_SAMPLING_RATIO: 2 + FEATURE_EXTRACTOR: "FPN2MLPFeatureExtractor" + PREDICTOR: "FPNPredictor" +DATASETS: + TRAIN: ("coco_2014_train", "coco_2014_valminusminival") + TEST: ("coco_2014_minival",) +DATALOADER: + SIZE_DIVISIBILITY: 32 +SOLVER: + # Assume 4 gpus + BASE_LR: 0.01 + WEIGHT_DECAY: 0.0001 + STEPS: (120000, 180000) + MAX_ITER: 180000 + IMS_PER_BATCH: 8 +TEST: + IMS_PER_BATCH: 8 diff --git a/configs/gn_baselines/e2e_faster_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml b/configs/gn_baselines/e2e_faster_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml new file mode 100644 index 000000000..ac6771a5d --- /dev/null +++ b/configs/gn_baselines/e2e_faster_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml @@ -0,0 +1,48 @@ +INPUT: + MIN_SIZE_TRAIN: 800 + MAX_SIZE_TRAIN: 1333 + MIN_SIZE_TEST: 800 + MAX_SIZE_TEST: 1333 +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + WEIGHT: "catalog://ImageNetPretrained/MSRA/R-50-GN" + BACKBONE: + CONV_BODY: "R-50-FPN" + OUT_CHANNELS: 256 + RESNETS: # use GN for backbone + TRANS_FUNC: "BottleneckWithGN" + STEM_FUNC: "StemWithGN" + FPN: + USE_GN: True # use GN for FPN + RPN: + USE_FPN: True + ANCHOR_STRIDE: (4, 8, 16, 32, 64) + PRE_NMS_TOP_N_TRAIN: 2000 + PRE_NMS_TOP_N_TEST: 1000 + POST_NMS_TOP_N_TEST: 1000 + FPN_POST_NMS_TOP_N_TEST: 1000 + ROI_HEADS: + USE_FPN: True + BATCH_SIZE_PER_IMAGE: 512 + POSITIVE_FRACTION: 0.25 + ROI_BOX_HEAD: + USE_GN: True # use GN for bbox head + POOLER_RESOLUTION: 7 + POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) + POOLER_SAMPLING_RATIO: 2 + FEATURE_EXTRACTOR: "FPNXconv1fcFeatureExtractor" + PREDICTOR: "FPNPredictor" +DATASETS: + TRAIN: ("coco_2014_train", "coco_2014_valminusminival") + TEST: ("coco_2014_minival",) +DATALOADER: + SIZE_DIVISIBILITY: 32 +SOLVER: + # Assume 4 gpus + BASE_LR: 0.01 + WEIGHT_DECAY: 0.0001 + STEPS: (120000, 180000) + MAX_ITER: 180000 + IMS_PER_BATCH: 8 +TEST: + IMS_PER_BATCH: 8 \ No newline at end of file diff --git a/configs/gn_baselines/e2e_mask_rcnn_R_50_FPN_1x_gn.yaml b/configs/gn_baselines/e2e_mask_rcnn_R_50_FPN_1x_gn.yaml new file mode 100644 index 000000000..46d3e7a5b --- /dev/null +++ b/configs/gn_baselines/e2e_mask_rcnn_R_50_FPN_1x_gn.yaml @@ -0,0 +1,59 @@ +INPUT: + MIN_SIZE_TRAIN: 800 + MAX_SIZE_TRAIN: 1333 + MIN_SIZE_TEST: 800 + MAX_SIZE_TEST: 1333 +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + WEIGHT: "catalog://ImageNetPretrained/MSRA/R-50-GN" + BACKBONE: + CONV_BODY: "R-50-FPN" + OUT_CHANNELS: 256 + RESNETS: # use GN for backbone + TRANS_FUNC: "BottleneckWithGN" + STEM_FUNC: "StemWithGN" + FPN: + USE_GN: True # use GN for FPN + RPN: + USE_FPN: True + ANCHOR_STRIDE: (4, 8, 16, 32, 64) + PRE_NMS_TOP_N_TRAIN: 2000 + PRE_NMS_TOP_N_TEST: 1000 + POST_NMS_TOP_N_TEST: 1000 + FPN_POST_NMS_TOP_N_TEST: 1000 + ROI_HEADS: + USE_FPN: True + BATCH_SIZE_PER_IMAGE: 512 + POSITIVE_FRACTION: 0.25 + ROI_BOX_HEAD: + USE_GN: True # use GN for bbox head + POOLER_RESOLUTION: 7 + POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) + POOLER_SAMPLING_RATIO: 2 + FEATURE_EXTRACTOR: "FPN2MLPFeatureExtractor" + PREDICTOR: "FPNPredictor" + ROI_MASK_HEAD: + USE_GN: True # use GN for mask head + POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) + CONV_LAYERS: (256, 256, 256, 256) + FEATURE_EXTRACTOR: "MaskRCNNFPNFeatureExtractor" + PREDICTOR: "MaskRCNNC4Predictor" + POOLER_RESOLUTION: 14 + POOLER_SAMPLING_RATIO: 2 + RESOLUTION: 28 + SHARE_BOX_FEATURE_EXTRACTOR: False + MASK_ON: True +DATASETS: + TRAIN: ("coco_2014_train", "coco_2014_valminusminival") + TEST: ("coco_2014_minival",) +DATALOADER: + SIZE_DIVISIBILITY: 32 +SOLVER: + # Assume 4 gpus + BASE_LR: 0.01 + WEIGHT_DECAY: 0.0001 + STEPS: (120000, 180000) + MAX_ITER: 180000 + IMS_PER_BATCH: 8 +TEST: + IMS_PER_BATCH: 8 \ No newline at end of file diff --git a/configs/gn_baselines/e2e_mask_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml b/configs/gn_baselines/e2e_mask_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml new file mode 100644 index 000000000..2d2b9b629 --- /dev/null +++ b/configs/gn_baselines/e2e_mask_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml @@ -0,0 +1,59 @@ +INPUT: + MIN_SIZE_TRAIN: 800 + MAX_SIZE_TRAIN: 1333 + MIN_SIZE_TEST: 800 + MAX_SIZE_TEST: 1333 +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + WEIGHT: "catalog://ImageNetPretrained/MSRA/R-50-GN" + BACKBONE: + CONV_BODY: "R-50-FPN" + OUT_CHANNELS: 256 + RESNETS: # use GN for backbone + TRANS_FUNC: "BottleneckWithGN" + STEM_FUNC: "StemWithGN" + FPN: + USE_GN: True # use GN for FPN + RPN: + USE_FPN: True + ANCHOR_STRIDE: (4, 8, 16, 32, 64) + PRE_NMS_TOP_N_TRAIN: 2000 + PRE_NMS_TOP_N_TEST: 1000 + POST_NMS_TOP_N_TEST: 1000 + FPN_POST_NMS_TOP_N_TEST: 1000 + ROI_HEADS: + USE_FPN: True + BATCH_SIZE_PER_IMAGE: 512 + POSITIVE_FRACTION: 0.25 + ROI_BOX_HEAD: + USE_GN: True # use GN for bbox head + POOLER_RESOLUTION: 7 + POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) + POOLER_SAMPLING_RATIO: 2 + FEATURE_EXTRACTOR: "FPNXconv1fcFeatureExtractor" + PREDICTOR: "FPNPredictor" + ROI_MASK_HEAD: + USE_GN: True # use GN for mask head + POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) + CONV_LAYERS: (256, 256, 256, 256) + FEATURE_EXTRACTOR: "MaskRCNNFPNFeatureExtractor" + PREDICTOR: "MaskRCNNC4Predictor" + POOLER_RESOLUTION: 14 + POOLER_SAMPLING_RATIO: 2 + RESOLUTION: 28 + SHARE_BOX_FEATURE_EXTRACTOR: False + MASK_ON: True +DATASETS: + TRAIN: ("coco_2014_train", "coco_2014_valminusminival") + TEST: ("coco_2014_minival",) +DATALOADER: + SIZE_DIVISIBILITY: 32 +SOLVER: + # Assume 4 gpus + BASE_LR: 0.01 + WEIGHT_DECAY: 0.0001 + STEPS: (120000, 180000) + MAX_ITER: 180000 + IMS_PER_BATCH: 8 +TEST: + IMS_PER_BATCH: 8 \ No newline at end of file diff --git a/configs/gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_3x_gn.yaml b/configs/gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_3x_gn.yaml new file mode 100644 index 000000000..c900f4f7c --- /dev/null +++ b/configs/gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_3x_gn.yaml @@ -0,0 +1,49 @@ +INPUT: + MIN_SIZE_TRAIN: 800 + MAX_SIZE_TRAIN: 1333 + MIN_SIZE_TEST: 800 + MAX_SIZE_TEST: 1333 +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + WEIGHT: "" # no pretrained model + BACKBONE: + CONV_BODY: "R-50-FPN" + OUT_CHANNELS: 256 + FREEZE_CONV_BODY_AT: -1 # finetune all layers + RESNETS: # use GN for backbone + TRANS_FUNC: "BottleneckWithGN" + STEM_FUNC: "StemWithGN" + FPN: + USE_GN: True # use GN for FPN + RPN: + USE_FPN: True + ANCHOR_STRIDE: (4, 8, 16, 32, 64) + PRE_NMS_TOP_N_TRAIN: 2000 + PRE_NMS_TOP_N_TEST: 1000 + POST_NMS_TOP_N_TEST: 1000 + FPN_POST_NMS_TOP_N_TEST: 1000 + ROI_HEADS: + USE_FPN: True + BATCH_SIZE_PER_IMAGE: 512 + POSITIVE_FRACTION: 0.25 + ROI_BOX_HEAD: + USE_GN: True # use GN for bbox head + POOLER_RESOLUTION: 7 + POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) + POOLER_SAMPLING_RATIO: 2 + FEATURE_EXTRACTOR: "FPN2MLPFeatureExtractor" + PREDICTOR: "FPNPredictor" +DATASETS: + TRAIN: ("coco_2014_train", "coco_2014_valminusminival") + TEST: ("coco_2014_minival",) +DATALOADER: + SIZE_DIVISIBILITY: 32 +SOLVER: + # Assume 4 gpus + BASE_LR: 0.01 + WEIGHT_DECAY: 0.0001 + STEPS: (420000, 500000) # 3x lr schedule + MAX_ITER: 540000 + IMS_PER_BATCH: 8 +TEST: + IMS_PER_BATCH: 8 \ No newline at end of file diff --git a/configs/gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml b/configs/gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml new file mode 100644 index 000000000..010796fc7 --- /dev/null +++ b/configs/gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml @@ -0,0 +1,49 @@ +INPUT: + MIN_SIZE_TRAIN: 800 + MAX_SIZE_TRAIN: 1333 + MIN_SIZE_TEST: 800 + MAX_SIZE_TEST: 1333 +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + WEIGHT: "" # no pretrained model + BACKBONE: + CONV_BODY: "R-50-FPN" + OUT_CHANNELS: 256 + FREEZE_CONV_BODY_AT: -1 # finetune all layers + RESNETS: # use GN for backbone + TRANS_FUNC: "BottleneckWithGN" + STEM_FUNC: "StemWithGN" + FPN: + USE_GN: True # use GN for FPN + RPN: + USE_FPN: True + ANCHOR_STRIDE: (4, 8, 16, 32, 64) + PRE_NMS_TOP_N_TRAIN: 2000 + PRE_NMS_TOP_N_TEST: 1000 + POST_NMS_TOP_N_TEST: 1000 + FPN_POST_NMS_TOP_N_TEST: 1000 + ROI_HEADS: + USE_FPN: True + BATCH_SIZE_PER_IMAGE: 512 + POSITIVE_FRACTION: 0.25 + ROI_BOX_HEAD: + USE_GN: True # use GN for bbox head + POOLER_RESOLUTION: 7 + POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) + POOLER_SAMPLING_RATIO: 2 + FEATURE_EXTRACTOR: "FPNXconv1fcFeatureExtractor" + PREDICTOR: "FPNPredictor" +DATASETS: + TRAIN: ("coco_2014_train", "coco_2014_valminusminival") + TEST: ("coco_2014_minival",) +DATALOADER: + SIZE_DIVISIBILITY: 32 +SOLVER: + # Assume 4 gpus + BASE_LR: 0.01 + WEIGHT_DECAY: 0.0001 + STEPS: (420000, 500000) # 3x lr schedule + MAX_ITER: 540000 + IMS_PER_BATCH: 8 +TEST: + IMS_PER_BATCH: 8 \ No newline at end of file diff --git a/configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_3x_gn.yaml b/configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_3x_gn.yaml new file mode 100644 index 000000000..077c197f4 --- /dev/null +++ b/configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_3x_gn.yaml @@ -0,0 +1,60 @@ +INPUT: + MIN_SIZE_TRAIN: 800 + MAX_SIZE_TRAIN: 1280 + MIN_SIZE_TEST: 800 + MAX_SIZE_TEST: 1400 +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + WEIGHT: "" # no pretrained model + BACKBONE: + CONV_BODY: "R-50-FPN" + OUT_CHANNELS: 224 # reduce memory + FREEZE_CONV_BODY_AT: -1 # finetune all layers + RESNETS: # use GN for backbone + TRANS_FUNC: "BottleneckWithGN" + STEM_FUNC: "StemWithGN" + FPN: + USE_GN: True # use GN for FPN + RPN: + USE_FPN: True + ANCHOR_STRIDE: (4, 8, 16, 32, 64) + PRE_NMS_TOP_N_TRAIN: 2000 + PRE_NMS_TOP_N_TEST: 1000 + POST_NMS_TOP_N_TEST: 1000 + FPN_POST_NMS_TOP_N_TEST: 1000 + ROI_HEADS: + USE_FPN: True + BATCH_SIZE_PER_IMAGE: 320 + POSITIVE_FRACTION: 0.25 + ROI_BOX_HEAD: + USE_GN: True # use GN for bbox head + POOLER_RESOLUTION: 7 + POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) + POOLER_SAMPLING_RATIO: 2 + FEATURE_EXTRACTOR: "FPN2MLPFeatureExtractor" + PREDICTOR: "FPNPredictor" + ROI_MASK_HEAD: + USE_GN: True # use GN for mask head + POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) + CONV_LAYERS: (224, 224, 224, 256) + FEATURE_EXTRACTOR: "MaskRCNNFPNFeatureExtractor" + PREDICTOR: "MaskRCNNC4Predictor" + POOLER_RESOLUTION: 14 + POOLER_SAMPLING_RATIO: 2 + RESOLUTION: 28 + SHARE_BOX_FEATURE_EXTRACTOR: False + MASK_ON: True +DATASETS: + TRAIN: ("coco_2014_train", "coco_2014_valminusminival") + TEST: ("coco_2014_minival",) +DATALOADER: + SIZE_DIVISIBILITY: 32 +SOLVER: + # Assume 4 gpus + BASE_LR: 0.01 + WEIGHT_DECAY: 0.0001 + STEPS: (420000, 500000) # 3x lr schedule + MAX_ITER: 540000 + IMS_PER_BATCH: 8 +TEST: + IMS_PER_BATCH: 8 diff --git a/configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml b/configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml new file mode 100644 index 000000000..3352f7d02 --- /dev/null +++ b/configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml @@ -0,0 +1,62 @@ +INPUT: + MIN_SIZE_TRAIN: 800 + MAX_SIZE_TRAIN: 1280 + MIN_SIZE_TEST: 800 + MAX_SIZE_TEST: 1400 +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + WEIGHT: "" # no pretrained model + BACKBONE: + CONV_BODY: "R-50-FPN" + OUT_CHANNELS: 224 # reduce memory + FREEZE_CONV_BODY_AT: -1 # finetune all layers + RESNETS: # use GN for backbone + TRANS_FUNC: "BottleneckWithGN" + STEM_FUNC: "StemWithGN" + FPN: + USE_GN: True # use GN for FPN + RPN: + USE_FPN: True + ANCHOR_STRIDE: (4, 8, 16, 32, 64) + PRE_NMS_TOP_N_TRAIN: 2000 + PRE_NMS_TOP_N_TEST: 1000 + POST_NMS_TOP_N_TEST: 1000 + FPN_POST_NMS_TOP_N_TEST: 1000 + ROI_HEADS: + USE_FPN: True + BATCH_SIZE_PER_IMAGE: 320 + POSITIVE_FRACTION: 0.25 + ROI_BOX_HEAD: + USE_GN: True # use GN for bbox head + POOLER_RESOLUTION: 7 + POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) + POOLER_SAMPLING_RATIO: 2 + CONV_HEAD_DIM: 224 + NUM_STACKED_CONVS: 4 + FEATURE_EXTRACTOR: "FPNXconv1fcFeatureExtractor" + PREDICTOR: "FPNPredictor" + ROI_MASK_HEAD: + USE_GN: True # use GN for mask head + POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) + CONV_LAYERS: (224, 224, 224, 256) + FEATURE_EXTRACTOR: "MaskRCNNFPNFeatureExtractor" + PREDICTOR: "MaskRCNNC4Predictor" + POOLER_RESOLUTION: 14 + POOLER_SAMPLING_RATIO: 2 + RESOLUTION: 28 + SHARE_BOX_FEATURE_EXTRACTOR: False + MASK_ON: True +DATASETS: + TRAIN: ("coco_2014_train", "coco_2014_valminusminival") + TEST: ("coco_2014_minival",) +DATALOADER: + SIZE_DIVISIBILITY: 32 +SOLVER: + # Assume 4 gpus + BASE_LR: 0.01 + WEIGHT_DECAY: 0.0001 + STEPS: (420000, 500000) # 3x lr schedule + MAX_ITER: 540000 + IMS_PER_BATCH: 8 +TEST: + IMS_PER_BATCH: 8 \ No newline at end of file diff --git a/maskrcnn_benchmark/config/defaults.py b/maskrcnn_benchmark/config/defaults.py index ea8c4a773..27b80d3e7 100644 --- a/maskrcnn_benchmark/config/defaults.py +++ b/maskrcnn_benchmark/config/defaults.py @@ -75,6 +75,19 @@ # are not batched with portrait images. _C.DATALOADER.ASPECT_RATIO_GROUPING = True + +# ---------------------------------------------------------------------------- # +# Group Norm options +# ---------------------------------------------------------------------------- # +_C.GROUP_NORM = CN() +# Number of dimensions per group in GroupNorm (-1 if using NUM_GROUPS) +_C.GROUP_NORM.DIM_PER_GP = -1 +# Number of groups in GroupNorm (-1 if using DIM_PER_GP) +_C.GROUP_NORM.NUM_GROUPS = 32 +# GroupNorm's small constant in the denominator +_C.GROUP_NORM.EPSILON = 1e-5 + + # ---------------------------------------------------------------------------- # # Backbone options # ---------------------------------------------------------------------------- # @@ -89,6 +102,15 @@ # Add StopGrad at a specified stage so the bottom layers are frozen _C.MODEL.BACKBONE.FREEZE_CONV_BODY_AT = 2 _C.MODEL.BACKBONE.OUT_CHANNELS = 256 * 4 +# GN for backbone +_C.MODEL.BACKBONE.USE_GN = False + + +# ---------------------------------------------------------------------------- # +# FPN options +# ---------------------------------------------------------------------------- # +_C.MODEL.FPN = CN() +_C.MODEL.FPN.USE_GN = False # ---------------------------------------------------------------------------- # @@ -182,6 +204,12 @@ _C.MODEL.ROI_BOX_HEAD.NUM_CLASSES = 81 # Hidden layer dimension when using an MLP for the RoI box head _C.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM = 1024 +# GN +_C.MODEL.ROI_BOX_HEAD.USE_GN = False +# Dilation +_C.MODEL.ROI_BOX_HEAD.DILATION = 1 +_C.MODEL.ROI_BOX_HEAD.CONV_HEAD_DIM = 256 +_C.MODEL.ROI_BOX_HEAD.NUM_STACKED_CONVS = 4 _C.MODEL.ROI_MASK_HEAD = CN() @@ -197,6 +225,10 @@ # Whether or not resize and translate masks to the input image. _C.MODEL.ROI_MASK_HEAD.POSTPROCESS_MASKS = False _C.MODEL.ROI_MASK_HEAD.POSTPROCESS_MASKS_THRESHOLD = 0.5 +# Dilation +_C.MODEL.ROI_MASK_HEAD.DILATION = 1 +# GN +_C.MODEL.ROI_MASK_HEAD.USE_GN = False # ---------------------------------------------------------------------------- # # ResNe[X]t options (ResNets = {ResNet, ResNeXt} diff --git a/maskrcnn_benchmark/config/paths_catalog.py b/maskrcnn_benchmark/config/paths_catalog.py index 9dad3227d..a15e37d47 100644 --- a/maskrcnn_benchmark/config/paths_catalog.py +++ b/maskrcnn_benchmark/config/paths_catalog.py @@ -113,7 +113,9 @@ class ModelCatalog(object): S3_C2_DETECTRON_URL = "https://dl.fbaipublicfiles.com/detectron" C2_IMAGENET_MODELS = { "MSRA/R-50": "ImageNetPretrained/MSRA/R-50.pkl", + "MSRA/R-50-GN": "ImageNetPretrained/47261647/R-50-GN.pkl", "MSRA/R-101": "ImageNetPretrained/MSRA/R-101.pkl", + "MSRA/R-101-GN": "ImageNetPretrained/47592356/R-101-GN.pkl", "FAIR/20171220/X-101-32x8d": "ImageNetPretrained/20171220/X-101-32x8d.pkl", } diff --git a/maskrcnn_benchmark/layers/__init__.py b/maskrcnn_benchmark/layers/__init__.py index 0b7f77c8b..1eb43324d 100644 --- a/maskrcnn_benchmark/layers/__init__.py +++ b/maskrcnn_benchmark/layers/__init__.py @@ -11,5 +11,10 @@ from .roi_pool import ROIPool from .roi_pool import roi_pool from .smooth_l1_loss import smooth_l1_loss +from .group_norm import GroupNorm, gn_layer_from_cfg -__all__ = ["nms", "roi_align", "ROIAlign", "roi_pool", "ROIPool", "smooth_l1_loss", "Conv2d", "ConvTranspose2d", "interpolate", "FrozenBatchNorm2d"] +__all__ = ["nms", "roi_align", "ROIAlign", "roi_pool", "ROIPool", + "smooth_l1_loss", "Conv2d", "ConvTranspose2d", "interpolate", + "FrozenBatchNorm2d", + "GroupNorm", "gn_layer_from_cfg", + ] diff --git a/maskrcnn_benchmark/layers/group_norm.py b/maskrcnn_benchmark/layers/group_norm.py new file mode 100644 index 000000000..fe90f287f --- /dev/null +++ b/maskrcnn_benchmark/layers/group_norm.py @@ -0,0 +1,92 @@ +""" +Group Normalization Layer from PANet +url: https://github.com/ShuLiu1993/PANet +""" + +import torch +import torch.nn as nn +from maskrcnn_benchmark.config import cfg + + +class GroupNorm(nn.Module): + + def __init__(self, num_groups, num_channels, eps=1e-5, affine=True): + super().__init__() + self.num_groups = num_groups + self.num_channels = num_channels + self.eps = eps + self.affine = affine + if self.affine: + self.weight = nn.Parameter(torch.Tensor(num_channels)) + self.bias = nn.Parameter(torch.Tensor(num_channels)) + else: + self.register_parameter('weight', None) + self.register_parameter('bias', None) + self.reset_parameters() + + def reset_parameters(self): + if self.affine: + self.weight.data.fill_(1) + self.bias.data.zero_() + + def forward(self, x): + return group_norm( + x, self.num_groups, self.weight, self.bias, self.eps + ) + + def extra_repr(self): + return '{num_groups}, {num_channels}, eps={eps}, ' \ + 'affine={affine}'.format(**self.__dict__) + + +def group_norm(x, num_groups, weight=None, bias=None, eps=1e-5): + input_shape = x.shape + ndim = len(input_shape) + N, C = input_shape[:2] + G = num_groups + + assert C % G == 0, "input channel dimension must divisible by number of groups" + + x = x.view(N, G, -1) + mean = x.mean(-1, keepdim=True) + var = x.var(-1, keepdim=True) + x = (x - mean) / (var + eps).sqrt() + x = x.view(input_shape) + + view_shape = (1, -1) + (1,) * (ndim - 2) + if weight is not None: + return x * weight.view(view_shape) + bias.view(view_shape) + + return x + + +def get_group_gn(dim, dim_per_gp, num_groups): + """get number of groups used by GroupNorm, based on number of channels.""" + assert dim_per_gp == -1 or num_groups == -1, \ + "GroupNorm: can only specify G or C/G." + + if dim_per_gp > 0: + assert dim % dim_per_gp == 0, \ + "dim: {}, dim_per_gp: {}".format(dim, dim_per_gp) + group_gn = dim // dim_per_gp + else: + assert dim % num_groups == 0, \ + "dim: {}, num_groups: {}".format(dim, num_groups) + group_gn = num_groups + + return group_gn + + +def gn_layer_from_cfg(out_channels, affine=True, divisor=1): + out_channels = out_channels // divisor + dim_per_gp = cfg.GROUP_NORM.DIM_PER_GP // divisor + num_groups = cfg.GROUP_NORM.NUM_GROUPS // divisor + eps = cfg.GROUP_NORM.EPSILON # default: 1e-5 + return GroupNorm( + get_group_gn(out_channels, dim_per_gp, num_groups), + out_channels, + eps, + affine + ) + + diff --git a/maskrcnn_benchmark/modeling/backbone/backbone.py b/maskrcnn_benchmark/modeling/backbone/backbone.py index 7a4fea9ae..6e9c263fd 100644 --- a/maskrcnn_benchmark/modeling/backbone/backbone.py +++ b/maskrcnn_benchmark/modeling/backbone/backbone.py @@ -31,6 +31,7 @@ def build_resnet_fpn_backbone(cfg): ], out_channels=out_channels, top_blocks=fpn_module.LastLevelMaxPool(), + use_gn=cfg.MODEL.FPN.USE_GN ) model = nn.Sequential(OrderedDict([("body", body), ("fpn", fpn)])) return model diff --git a/maskrcnn_benchmark/modeling/backbone/fpn.py b/maskrcnn_benchmark/modeling/backbone/fpn.py index c9ee8c674..66f1e8171 100644 --- a/maskrcnn_benchmark/modeling/backbone/fpn.py +++ b/maskrcnn_benchmark/modeling/backbone/fpn.py @@ -2,6 +2,7 @@ import torch import torch.nn.functional as F from torch import nn +from maskrcnn_benchmark.layers import gn_layer_from_cfg class FPN(nn.Module): @@ -11,7 +12,10 @@ class FPN(nn.Module): order, and must be consecutive """ - def __init__(self, in_channels_list, out_channels, top_blocks=None): + def __init__( + self, in_channels_list, out_channels, + top_blocks=None, use_gn=False + ): """ Arguments: in_channels_list (list[int]): number of channels for each feature map that @@ -27,13 +31,27 @@ def __init__(self, in_channels_list, out_channels, top_blocks=None): for idx, in_channels in enumerate(in_channels_list, 1): inner_block = "fpn_inner{}".format(idx) layer_block = "fpn_layer{}".format(idx) - inner_block_module = nn.Conv2d(in_channels, out_channels, 1) - layer_block_module = nn.Conv2d(out_channels, out_channels, 3, 1, 1) - for module in [inner_block_module, layer_block_module]: - # Caffe2 implementation uses XavierFill, which in fact - # corresponds to kaiming_uniform_ in PyTorch - nn.init.kaiming_uniform_(module.weight, a=1) - nn.init.constant_(module.bias, 0) + if use_gn: + inner_block_module = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1, bias=False), + gn_layer_from_cfg(out_channels) + ) + layer_block_module = nn.Sequential( + nn.Conv2d(out_channels, out_channels, 3, 1, 1, bias=False), + gn_layer_from_cfg(out_channels) + ) + for modules in [inner_block_module, layer_block_module]: + for l in modules.modules(): + if isinstance(l, nn.Conv2d): + nn.init.kaiming_uniform_(l.weight, a=1) + else: + inner_block_module = nn.Conv2d(in_channels, out_channels, 1) + layer_block_module = nn.Conv2d(out_channels, out_channels, 3, 1, 1) + for module in [inner_block_module, layer_block_module]: + # Caffe2 implementation uses XavierFill, which in fact + # corresponds to kaiming_uniform_ in PyTorch + nn.init.kaiming_uniform_(module.weight, a=1) + nn.init.constant_(module.bias, 0) self.add_module(inner_block, inner_block_module) self.add_module(layer_block, layer_block_module) self.inner_blocks.append(inner_block) diff --git a/maskrcnn_benchmark/modeling/backbone/resnet.py b/maskrcnn_benchmark/modeling/backbone/resnet.py index 5a703cfb7..6a30419f0 100644 --- a/maskrcnn_benchmark/modeling/backbone/resnet.py +++ b/maskrcnn_benchmark/modeling/backbone/resnet.py @@ -7,6 +7,12 @@ "BottleneckWithFixedBatchNorm", "ResNet50StagesTo4", ) +OR: + model = ResNet( + "StemWithGN", + "BottleneckWithGN", + "ResNet50StagesTo4", + ) Custom implementations may be written in user code and hooked in via the `register_*` functions. """ @@ -16,6 +22,7 @@ import torch.nn.functional as F from torch import nn +from maskrcnn_benchmark.layers import gn_layer_from_cfg from maskrcnn_benchmark.layers import FrozenBatchNorm2d from maskrcnn_benchmark.layers import Conv2d from maskrcnn_benchmark.utils.registry import Registry @@ -289,13 +296,125 @@ def forward(self, x): x = F.relu_(x) x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1) return x + return x + + +class BottleneckWithGN(nn.Module): + def __init__( + self, + in_channels, + bottleneck_channels, + out_channels, + num_groups=1, + stride_in_1x1=True, + stride=1, + dilation=1 + ): + super(BottleneckWithGN, self).__init__() + + self.downsample = None + if in_channels != out_channels: + down_stride = stride if dilation == 1 else 1 + self.downsample = nn.Sequential( + Conv2d( + in_channels, out_channels, + kernel_size=1, stride=down_stride, bias=False + ), + gn_layer_from_cfg(out_channels) + ) + + if dilation > 1: + stride = 1 # reset to be 1 + + # The original MSRA ResNet models have stride in the first 1x1 conv + # The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have + # stride in the 3x3 conv + stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride) + + self.conv1 = Conv2d( + in_channels, + bottleneck_channels, + kernel_size=1, + stride=stride_1x1, + bias=False, + ) + self.gn1 = gn_layer_from_cfg(bottleneck_channels) + # TODO: specify init for the above + + self.conv2 = Conv2d( + bottleneck_channels, + bottleneck_channels, + kernel_size=3, + stride=stride_3x3, + padding=dilation, # dilation * (kernel_size - 1) // 2, + bias=False, + groups=num_groups, + dilation=dilation + ) + self.gn2 = gn_layer_from_cfg(bottleneck_channels) + + self.conv3 = Conv2d( + bottleneck_channels, out_channels, kernel_size=1, bias=False + ) + self.gn3 = gn_layer_from_cfg(out_channels) + + # for l in [self.conv1, self.conv2, self.conv3]: + # nn.init.kaiming_uniform_(l.weight, a=1) + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.gn1(out) + out = F.relu_(out) + + out = self.conv2(out) + out = self.gn2(out) + out = F.relu_(out) + + out0 = self.conv3(out) + out = self.gn3(out0) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = F.relu_(out) + + return out + + +class StemWithGN(nn.Module): + def __init__(self, cfg): + super(StemWithGN, self).__init__() + + out_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS + + self.conv1 = Conv2d( + 3, out_channels, kernel_size=7, stride=2, padding=3, bias=False + ) + self.gn1 = gn_layer_from_cfg(out_channels) + + # for l in [self.conv1,]: + # nn.init.kaiming_uniform_(l.weight, a=1) + + def forward(self, x): + x = self.conv1(x) + x = self.gn1(x) + x = F.relu_(x) + x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1) + return x _TRANSFORMATION_MODULES = Registry({ - "BottleneckWithFixedBatchNorm": BottleneckWithFixedBatchNorm + "BottleneckWithFixedBatchNorm": BottleneckWithFixedBatchNorm, + "BottleneckWithGN": BottleneckWithGN, }) -_STEM_MODULES = Registry({"StemWithFixedBatchNorm": StemWithFixedBatchNorm}) +_STEM_MODULES = Registry({ + "StemWithFixedBatchNorm": StemWithFixedBatchNorm, + "StemWithGN": StemWithGN, +}) _STAGE_SPECS = Registry({ "R-50-C4": ResNet50StagesTo4, diff --git a/maskrcnn_benchmark/modeling/make_layers.py b/maskrcnn_benchmark/modeling/make_layers.py new file mode 100644 index 000000000..4cebe9063 --- /dev/null +++ b/maskrcnn_benchmark/modeling/make_layers.py @@ -0,0 +1,54 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +""" +Miscellaneous utility functions +""" + +import torch +from torch import nn +from torch.nn import functional as F +from maskrcnn_benchmark.layers import Conv2d +from maskrcnn_benchmark.layers import gn_layer_from_cfg +from maskrcnn_benchmark.modeling.poolers import Pooler +from maskrcnn_benchmark.modeling.backbone import resnet + + +def make_conv3x3( + in_chs, out_chs, dilation=1, stride=1, use_gn=False, kaiming_init=True +): + conv = Conv2d( + in_chs, + out_chs, + kernel_size=3, + stride=stride, + padding=dilation, + dilation=dilation, + bias=False if use_gn else True + ) + if kaiming_init: + nn.init.kaiming_normal_( + conv.weight, mode="fan_out", nonlinearity="relu" + ) + else: + torch.nn.init.normal_(conv.weight, std=0.01) + if not use_gn: + nn.init.constant_(conv.bias, 0) + if use_gn: + return nn.Sequential( + conv, gn_layer_from_cfg(out_chs), nn.ReLU(inplace=True) + ) + return nn.Sequential(conv, nn.ReLU(inplace=True)) + + +def make_fc(dim_in, hidden_dim, use_gn): + ''' + Caffe2 implementation uses XavierFill, which in fact + corresponds to kaiming_uniform_ in PyTorch + ''' + if use_gn: + fc = nn.Linear(dim_in, hidden_dim, bias=False) + nn.init.kaiming_uniform_(fc.weight, a=1) + return nn.Sequential(fc, gn_layer_from_cfg(hidden_dim)) + fc = nn.Linear(dim_in, hidden_dim) + nn.init.kaiming_uniform_(fc.weight, a=1) + nn.init.constant_(fc.bias, 0) + return fc diff --git a/maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_feature_extractors.py b/maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_feature_extractors.py index 91e8acc44..d79f5cc0a 100644 --- a/maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_feature_extractors.py +++ b/maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_feature_extractors.py @@ -1,10 +1,13 @@ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +import torch from torch import nn from torch.nn import functional as F from maskrcnn_benchmark.modeling import registry from maskrcnn_benchmark.modeling.backbone import resnet from maskrcnn_benchmark.modeling.poolers import Pooler +from maskrcnn_benchmark.layers import gn_layer_from_cfg +from maskrcnn_benchmark.modeling.make_layers import make_fc @registry.ROI_BOX_FEATURE_EXTRACTORS.register("ResNet50Conv5ROIFeatureExtractor") @@ -60,15 +63,10 @@ def __init__(self, cfg): ) input_size = cfg.MODEL.BACKBONE.OUT_CHANNELS * resolution ** 2 representation_size = cfg.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM + use_gn = cfg.MODEL.ROI_BOX_HEAD.USE_GN self.pooler = pooler - self.fc6 = nn.Linear(input_size, representation_size) - self.fc7 = nn.Linear(representation_size, representation_size) - - for l in [self.fc6, self.fc7]: - # Caffe2 implementation uses XavierFill, which in fact - # corresponds to kaiming_uniform_ in PyTorch - nn.init.kaiming_uniform_(l.weight, a=1) - nn.init.constant_(l.bias, 0) + self.fc6 = make_fc(input_size, representation_size, use_gn) + self.fc7 = make_fc(representation_size, representation_size, use_gn) def forward(self, x, proposals): x = self.pooler(x, proposals) @@ -80,6 +78,69 @@ def forward(self, x, proposals): return x +@registry.ROI_BOX_FEATURE_EXTRACTORS.register("FPNXconv1fcFeatureExtractor") +class FPNXconv1fcFeatureExtractor(nn.Module): + """ + Heads for FPN for classification + """ + + def __init__(self, cfg): + super(FPNXconv1fcFeatureExtractor, self).__init__() + + resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION + scales = cfg.MODEL.ROI_BOX_HEAD.POOLER_SCALES + sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO + pooler = Pooler( + output_size=(resolution, resolution), + scales=scales, + sampling_ratio=sampling_ratio, + ) + self.pooler = pooler + + use_gn = cfg.MODEL.ROI_BOX_HEAD.USE_GN + in_channels = cfg.MODEL.BACKBONE.OUT_CHANNELS + conv_head_dim = cfg.MODEL.ROI_BOX_HEAD.CONV_HEAD_DIM + num_stacked_convs = cfg.MODEL.ROI_BOX_HEAD.NUM_STACKED_CONVS + dilation = cfg.MODEL.ROI_BOX_HEAD.DILATION + + xconvs = [] + for ix in range(num_stacked_convs): + xconvs.append( + nn.Conv2d( + in_channels, + conv_head_dim, + kernel_size=3, + stride=1, + padding=dilation, + dilation=dilation, + bias=False if use_gn else True + ) + ) + in_channels = conv_head_dim + if use_gn: + xconvs.append(gn_layer_from_cfg(in_channels)) + xconvs.append(nn.ReLU(inplace=True)) + + self.add_module("xconvs", nn.Sequential(*xconvs)) + for modules in [self.xconvs,]: + for l in modules.modules(): + if isinstance(l, nn.Conv2d): + torch.nn.init.normal_(l.weight, std=0.01) + if not use_gn: + torch.nn.init.constant_(l.bias, 0) + + input_size = conv_head_dim * resolution ** 2 + representation_size = cfg.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM + self.fc6 = make_fc(input_size, representation_size, use_gn) + + def forward(self, x, proposals): + x = self.pooler(x, proposals) + x = self.xconvs(x) + x = x.view(x.size(0), -1) + x = F.relu(self.fc6(x)) + return x + + def make_roi_box_feature_extractor(cfg): func = registry.ROI_BOX_FEATURE_EXTRACTORS[ cfg.MODEL.ROI_BOX_HEAD.FEATURE_EXTRACTOR diff --git a/maskrcnn_benchmark/modeling/roi_heads/mask_head/roi_mask_feature_extractors.py b/maskrcnn_benchmark/modeling/roi_heads/mask_head/roi_mask_feature_extractors.py index 66f2c2665..6a2d7e8b0 100644 --- a/maskrcnn_benchmark/modeling/roi_heads/mask_head/roi_mask_feature_extractors.py +++ b/maskrcnn_benchmark/modeling/roi_heads/mask_head/roi_mask_feature_extractors.py @@ -5,6 +5,8 @@ from ..box_head.roi_box_feature_extractors import ResNet50Conv5ROIFeatureExtractor from maskrcnn_benchmark.modeling.poolers import Pooler from maskrcnn_benchmark.layers import Conv2d +from maskrcnn_benchmark.modeling.make_layers import make_conv3x3 + class MaskRCNNFPNFeatureExtractor(nn.Module): @@ -32,17 +34,17 @@ def __init__(self, cfg): input_size = cfg.MODEL.BACKBONE.OUT_CHANNELS self.pooler = pooler + use_gn = cfg.MODEL.ROI_MASK_HEAD.USE_GN layers = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS + dilation = cfg.MODEL.ROI_MASK_HEAD.DILATION next_feature = input_size self.blocks = [] for layer_idx, layer_features in enumerate(layers, 1): layer_name = "mask_fcn{}".format(layer_idx) - module = Conv2d(next_feature, layer_features, 3, stride=1, padding=1) - # Caffe2 implementation uses MSRAFill, which in fact - # corresponds to kaiming_normal_ in PyTorch - nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu") - nn.init.constant_(module.bias, 0) + module = make_conv3x3(next_feature, layer_features, + dilation=dilation, stride=1, use_gn=use_gn + ) self.add_module(layer_name, module) next_feature = layer_features self.blocks.append(layer_name) diff --git a/maskrcnn_benchmark/utils/c2_model_loading.py b/maskrcnn_benchmark/utils/c2_model_loading.py index 41464902b..d7dfa0fa2 100644 --- a/maskrcnn_benchmark/utils/c2_model_loading.py +++ b/maskrcnn_benchmark/utils/c2_model_loading.py @@ -47,6 +47,18 @@ def _rename_basic_resnet_weights(layer_keys): layer_keys = [k.replace(".branch1.", ".downsample.0.") for k in layer_keys] layer_keys = [k.replace(".branch1_bn.", ".downsample.1.") for k in layer_keys] + # GroupNorm + layer_keys = [k.replace("conv1.gn.s", "gn1.weight") for k in layer_keys] + layer_keys = [k.replace("conv1.gn.bias", "gn1.bias") for k in layer_keys] + layer_keys = [k.replace("conv2.gn.s", "gn2.weight") for k in layer_keys] + layer_keys = [k.replace("conv2.gn.bias", "gn2.bias") for k in layer_keys] + layer_keys = [k.replace("conv3.gn.s", "gn3.weight") for k in layer_keys] + layer_keys = [k.replace("conv3.gn.bias", "gn3.bias") for k in layer_keys] + layer_keys = [k.replace("downsample.0.gn.s", "downsample.1.weight") \ + for k in layer_keys] + layer_keys = [k.replace("downsample.0.gn.bias", "downsample.1.bias") \ + for k in layer_keys] + return layer_keys def _rename_fpn_weights(layer_keys, stage_names): From 4877e36b9b1241f98890ac141e8e4400f25dbea4 Mon Sep 17 00:00:00 2001 From: dongdk Date: Wed, 16 Jan 2019 15:48:05 +0800 Subject: [PATCH 05/13] add GroupNorm -- sort out yaml files --- ...2e_faster_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml | 2 ++ .../e2e_mask_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml | 2 ++ ...scratch_e2e_faster_rcnn_R_50_FPN_3x_gn.yaml | 6 +++--- ...2e_faster_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml | 8 +++++--- .../scratch_e2e_mask_rcnn_R_50_FPN_3x_gn.yaml | 16 ++++++++-------- ..._e2e_mask_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml | 18 +++++++++--------- maskrcnn_benchmark/modeling/backbone/resnet.py | 2 ++ 7 files changed, 31 insertions(+), 23 deletions(-) diff --git a/configs/gn_baselines/e2e_faster_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml b/configs/gn_baselines/e2e_faster_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml index ac6771a5d..a609de9d8 100644 --- a/configs/gn_baselines/e2e_faster_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml +++ b/configs/gn_baselines/e2e_faster_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml @@ -30,6 +30,8 @@ MODEL: POOLER_RESOLUTION: 7 POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) POOLER_SAMPLING_RATIO: 2 + CONV_HEAD_DIM: 256 + NUM_STACKED_CONVS: 4 FEATURE_EXTRACTOR: "FPNXconv1fcFeatureExtractor" PREDICTOR: "FPNPredictor" DATASETS: diff --git a/configs/gn_baselines/e2e_mask_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml b/configs/gn_baselines/e2e_mask_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml index 2d2b9b629..451ec216e 100644 --- a/configs/gn_baselines/e2e_mask_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml +++ b/configs/gn_baselines/e2e_mask_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml @@ -30,6 +30,8 @@ MODEL: POOLER_RESOLUTION: 7 POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) POOLER_SAMPLING_RATIO: 2 + CONV_HEAD_DIM: 256 + NUM_STACKED_CONVS: 4 FEATURE_EXTRACTOR: "FPNXconv1fcFeatureExtractor" PREDICTOR: "FPNPredictor" ROI_MASK_HEAD: diff --git a/configs/gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_3x_gn.yaml b/configs/gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_3x_gn.yaml index c900f4f7c..25a838407 100644 --- a/configs/gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_3x_gn.yaml +++ b/configs/gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_3x_gn.yaml @@ -9,7 +9,7 @@ MODEL: BACKBONE: CONV_BODY: "R-50-FPN" OUT_CHANNELS: 256 - FREEZE_CONV_BODY_AT: -1 # finetune all layers + FREEZE_CONV_BODY_AT: 0 # finetune all layers RESNETS: # use GN for backbone TRANS_FUNC: "BottleneckWithGN" STEM_FUNC: "StemWithGN" @@ -39,10 +39,10 @@ DATASETS: DATALOADER: SIZE_DIVISIBILITY: 32 SOLVER: - # Assume 4 gpus + # Assume 8 gpus BASE_LR: 0.01 WEIGHT_DECAY: 0.0001 - STEPS: (420000, 500000) # 3x lr schedule + STEPS: (420000, 500000) MAX_ITER: 540000 IMS_PER_BATCH: 8 TEST: diff --git a/configs/gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml b/configs/gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml index 010796fc7..32b8b4d25 100644 --- a/configs/gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml +++ b/configs/gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml @@ -9,7 +9,7 @@ MODEL: BACKBONE: CONV_BODY: "R-50-FPN" OUT_CHANNELS: 256 - FREEZE_CONV_BODY_AT: -1 # finetune all layers + FREEZE_CONV_BODY_AT: 0 # finetune all layers RESNETS: # use GN for backbone TRANS_FUNC: "BottleneckWithGN" STEM_FUNC: "StemWithGN" @@ -31,6 +31,8 @@ MODEL: POOLER_RESOLUTION: 7 POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) POOLER_SAMPLING_RATIO: 2 + CONV_HEAD_DIM: 256 + NUM_STACKED_CONVS: 4 FEATURE_EXTRACTOR: "FPNXconv1fcFeatureExtractor" PREDICTOR: "FPNPredictor" DATASETS: @@ -39,10 +41,10 @@ DATASETS: DATALOADER: SIZE_DIVISIBILITY: 32 SOLVER: - # Assume 4 gpus + # Assume 8 gpus BASE_LR: 0.01 WEIGHT_DECAY: 0.0001 - STEPS: (420000, 500000) # 3x lr schedule + STEPS: (420000, 500000) MAX_ITER: 540000 IMS_PER_BATCH: 8 TEST: diff --git a/configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_3x_gn.yaml b/configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_3x_gn.yaml index 077c197f4..5606391c7 100644 --- a/configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_3x_gn.yaml +++ b/configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_3x_gn.yaml @@ -1,15 +1,15 @@ INPUT: MIN_SIZE_TRAIN: 800 - MAX_SIZE_TRAIN: 1280 + MAX_SIZE_TRAIN: 1333 MIN_SIZE_TEST: 800 - MAX_SIZE_TEST: 1400 + MAX_SIZE_TEST: 1333 MODEL: META_ARCHITECTURE: "GeneralizedRCNN" WEIGHT: "" # no pretrained model BACKBONE: CONV_BODY: "R-50-FPN" - OUT_CHANNELS: 224 # reduce memory - FREEZE_CONV_BODY_AT: -1 # finetune all layers + OUT_CHANNELS: 256 + FREEZE_CONV_BODY_AT: 0 # finetune all layers RESNETS: # use GN for backbone TRANS_FUNC: "BottleneckWithGN" STEM_FUNC: "StemWithGN" @@ -24,7 +24,7 @@ MODEL: FPN_POST_NMS_TOP_N_TEST: 1000 ROI_HEADS: USE_FPN: True - BATCH_SIZE_PER_IMAGE: 320 + BATCH_SIZE_PER_IMAGE: 512 POSITIVE_FRACTION: 0.25 ROI_BOX_HEAD: USE_GN: True # use GN for bbox head @@ -36,7 +36,7 @@ MODEL: ROI_MASK_HEAD: USE_GN: True # use GN for mask head POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) - CONV_LAYERS: (224, 224, 224, 256) + CONV_LAYERS: (256, 256, 256, 256) FEATURE_EXTRACTOR: "MaskRCNNFPNFeatureExtractor" PREDICTOR: "MaskRCNNC4Predictor" POOLER_RESOLUTION: 14 @@ -50,10 +50,10 @@ DATASETS: DATALOADER: SIZE_DIVISIBILITY: 32 SOLVER: - # Assume 4 gpus + # Assume 8 gpus BASE_LR: 0.01 WEIGHT_DECAY: 0.0001 - STEPS: (420000, 500000) # 3x lr schedule + STEPS: (420000, 500000) MAX_ITER: 540000 IMS_PER_BATCH: 8 TEST: diff --git a/configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml b/configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml index 3352f7d02..ffbc66b96 100644 --- a/configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml +++ b/configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml @@ -1,15 +1,15 @@ INPUT: MIN_SIZE_TRAIN: 800 - MAX_SIZE_TRAIN: 1280 + MAX_SIZE_TRAIN: 1333 MIN_SIZE_TEST: 800 - MAX_SIZE_TEST: 1400 + MAX_SIZE_TEST: 1333 MODEL: META_ARCHITECTURE: "GeneralizedRCNN" WEIGHT: "" # no pretrained model BACKBONE: CONV_BODY: "R-50-FPN" - OUT_CHANNELS: 224 # reduce memory - FREEZE_CONV_BODY_AT: -1 # finetune all layers + OUT_CHANNELS: 256 + FREEZE_CONV_BODY_AT: 0 # finetune all layers RESNETS: # use GN for backbone TRANS_FUNC: "BottleneckWithGN" STEM_FUNC: "StemWithGN" @@ -24,21 +24,21 @@ MODEL: FPN_POST_NMS_TOP_N_TEST: 1000 ROI_HEADS: USE_FPN: True - BATCH_SIZE_PER_IMAGE: 320 + BATCH_SIZE_PER_IMAGE: 512 POSITIVE_FRACTION: 0.25 ROI_BOX_HEAD: USE_GN: True # use GN for bbox head POOLER_RESOLUTION: 7 POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) POOLER_SAMPLING_RATIO: 2 - CONV_HEAD_DIM: 224 + CONV_HEAD_DIM: 256 NUM_STACKED_CONVS: 4 FEATURE_EXTRACTOR: "FPNXconv1fcFeatureExtractor" PREDICTOR: "FPNPredictor" ROI_MASK_HEAD: USE_GN: True # use GN for mask head POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) - CONV_LAYERS: (224, 224, 224, 256) + CONV_LAYERS: (256, 256, 256, 256) FEATURE_EXTRACTOR: "MaskRCNNFPNFeatureExtractor" PREDICTOR: "MaskRCNNC4Predictor" POOLER_RESOLUTION: 14 @@ -52,10 +52,10 @@ DATASETS: DATALOADER: SIZE_DIVISIBILITY: 32 SOLVER: - # Assume 4 gpus + # Assume 8 gpus BASE_LR: 0.01 WEIGHT_DECAY: 0.0001 - STEPS: (420000, 500000) # 3x lr schedule + STEPS: (420000, 500000) MAX_ITER: 540000 IMS_PER_BATCH: 8 TEST: diff --git a/maskrcnn_benchmark/modeling/backbone/resnet.py b/maskrcnn_benchmark/modeling/backbone/resnet.py index 6a30419f0..59606e9a9 100644 --- a/maskrcnn_benchmark/modeling/backbone/resnet.py +++ b/maskrcnn_benchmark/modeling/backbone/resnet.py @@ -111,6 +111,8 @@ def __init__(self, cfg): self._freeze_backbone(cfg.MODEL.BACKBONE.FREEZE_CONV_BODY_AT) def _freeze_backbone(self, freeze_at): + if freeze_at < 0: + return for stage_index in range(freeze_at): if stage_index == 0: m = self.stem # stage 0 is the stem From d4ae039ebb0a6ad81407c9112ee66188dc9f7cbe Mon Sep 17 00:00:00 2001 From: dongdk Date: Fri, 18 Jan 2019 13:33:37 +0800 Subject: [PATCH 06/13] use torch.nn.GroupNorm instead, replace 'use_gn' with 'conv_block' and use 'BaseStem'&'Bottleneck' to simply codes --- ...cratch_e2e_faster_rcnn_R_50_FPN_3x_gn.yaml | 8 +- ...e_faster_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml | 8 +- .../scratch_e2e_mask_rcnn_R_50_FPN_3x_gn.yaml | 8 +- ...e2e_mask_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml | 8 +- maskrcnn_benchmark/config/defaults.py | 24 +-- maskrcnn_benchmark/layers/__init__.py | 2 - maskrcnn_benchmark/layers/group_norm.py | 92 --------- .../modeling/backbone/backbone.py | 7 +- maskrcnn_benchmark/modeling/backbone/fpn.py | 27 +-- .../modeling/backbone/resnet.py | 180 ++++++++---------- maskrcnn_benchmark/modeling/make_layers.py | 100 +++++++++- .../box_head/roi_box_feature_extractors.py | 4 +- maskrcnn_benchmark/utils/c2_model_loading.py | 17 +- 13 files changed, 215 insertions(+), 270 deletions(-) delete mode 100644 maskrcnn_benchmark/layers/group_norm.py diff --git a/configs/gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_3x_gn.yaml b/configs/gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_3x_gn.yaml index 25a838407..a13049f58 100644 --- a/configs/gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_3x_gn.yaml +++ b/configs/gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_3x_gn.yaml @@ -40,10 +40,10 @@ DATALOADER: SIZE_DIVISIBILITY: 32 SOLVER: # Assume 8 gpus - BASE_LR: 0.01 + BASE_LR: 0.02 WEIGHT_DECAY: 0.0001 - STEPS: (420000, 500000) - MAX_ITER: 540000 - IMS_PER_BATCH: 8 + STEPS: (210000, 250000) + MAX_ITER: 270000 + IMS_PER_BATCH: 16 TEST: IMS_PER_BATCH: 8 \ No newline at end of file diff --git a/configs/gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml b/configs/gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml index 32b8b4d25..d04feddf5 100644 --- a/configs/gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml +++ b/configs/gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml @@ -42,10 +42,10 @@ DATALOADER: SIZE_DIVISIBILITY: 32 SOLVER: # Assume 8 gpus - BASE_LR: 0.01 + BASE_LR: 0.02 WEIGHT_DECAY: 0.0001 - STEPS: (420000, 500000) - MAX_ITER: 540000 - IMS_PER_BATCH: 8 + STEPS: (210000, 250000) + MAX_ITER: 270000 + IMS_PER_BATCH: 16 TEST: IMS_PER_BATCH: 8 \ No newline at end of file diff --git a/configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_3x_gn.yaml b/configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_3x_gn.yaml index 5606391c7..c8eae5e0d 100644 --- a/configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_3x_gn.yaml +++ b/configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_3x_gn.yaml @@ -51,10 +51,10 @@ DATALOADER: SIZE_DIVISIBILITY: 32 SOLVER: # Assume 8 gpus - BASE_LR: 0.01 + BASE_LR: 0.02 WEIGHT_DECAY: 0.0001 - STEPS: (420000, 500000) - MAX_ITER: 540000 - IMS_PER_BATCH: 8 + STEPS: (210000, 250000) + MAX_ITER: 270000 + IMS_PER_BATCH: 16 TEST: IMS_PER_BATCH: 8 diff --git a/configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml b/configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml index ffbc66b96..db8233790 100644 --- a/configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml +++ b/configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml @@ -53,10 +53,10 @@ DATALOADER: SIZE_DIVISIBILITY: 32 SOLVER: # Assume 8 gpus - BASE_LR: 0.01 + BASE_LR: 0.02 WEIGHT_DECAY: 0.0001 - STEPS: (420000, 500000) - MAX_ITER: 540000 - IMS_PER_BATCH: 8 + STEPS: (210000, 250000) + MAX_ITER: 270000 + IMS_PER_BATCH: 16 TEST: IMS_PER_BATCH: 8 \ No newline at end of file diff --git a/maskrcnn_benchmark/config/defaults.py b/maskrcnn_benchmark/config/defaults.py index 27b80d3e7..4503812fd 100644 --- a/maskrcnn_benchmark/config/defaults.py +++ b/maskrcnn_benchmark/config/defaults.py @@ -76,18 +76,6 @@ _C.DATALOADER.ASPECT_RATIO_GROUPING = True -# ---------------------------------------------------------------------------- # -# Group Norm options -# ---------------------------------------------------------------------------- # -_C.GROUP_NORM = CN() -# Number of dimensions per group in GroupNorm (-1 if using NUM_GROUPS) -_C.GROUP_NORM.DIM_PER_GP = -1 -# Number of groups in GroupNorm (-1 if using DIM_PER_GP) -_C.GROUP_NORM.NUM_GROUPS = 32 -# GroupNorm's small constant in the denominator -_C.GROUP_NORM.EPSILON = 1e-5 - - # ---------------------------------------------------------------------------- # # Backbone options # ---------------------------------------------------------------------------- # @@ -113,6 +101,18 @@ _C.MODEL.FPN.USE_GN = False +# ---------------------------------------------------------------------------- # +# Group Norm options +# ---------------------------------------------------------------------------- # +_C.MODEL.GROUP_NORM = CN() +# Number of dimensions per group in GroupNorm (-1 if using NUM_GROUPS) +_C.MODEL.GROUP_NORM.DIM_PER_GP = -1 +# Number of groups in GroupNorm (-1 if using DIM_PER_GP) +_C.MODEL.GROUP_NORM.NUM_GROUPS = 32 +# GroupNorm's small constant in the denominator +_C.MODEL.GROUP_NORM.EPSILON = 1e-5 + + # ---------------------------------------------------------------------------- # # RPN options # ---------------------------------------------------------------------------- # diff --git a/maskrcnn_benchmark/layers/__init__.py b/maskrcnn_benchmark/layers/__init__.py index 1eb43324d..eb21200f2 100644 --- a/maskrcnn_benchmark/layers/__init__.py +++ b/maskrcnn_benchmark/layers/__init__.py @@ -11,10 +11,8 @@ from .roi_pool import ROIPool from .roi_pool import roi_pool from .smooth_l1_loss import smooth_l1_loss -from .group_norm import GroupNorm, gn_layer_from_cfg __all__ = ["nms", "roi_align", "ROIAlign", "roi_pool", "ROIPool", "smooth_l1_loss", "Conv2d", "ConvTranspose2d", "interpolate", "FrozenBatchNorm2d", - "GroupNorm", "gn_layer_from_cfg", ] diff --git a/maskrcnn_benchmark/layers/group_norm.py b/maskrcnn_benchmark/layers/group_norm.py deleted file mode 100644 index fe90f287f..000000000 --- a/maskrcnn_benchmark/layers/group_norm.py +++ /dev/null @@ -1,92 +0,0 @@ -""" -Group Normalization Layer from PANet -url: https://github.com/ShuLiu1993/PANet -""" - -import torch -import torch.nn as nn -from maskrcnn_benchmark.config import cfg - - -class GroupNorm(nn.Module): - - def __init__(self, num_groups, num_channels, eps=1e-5, affine=True): - super().__init__() - self.num_groups = num_groups - self.num_channels = num_channels - self.eps = eps - self.affine = affine - if self.affine: - self.weight = nn.Parameter(torch.Tensor(num_channels)) - self.bias = nn.Parameter(torch.Tensor(num_channels)) - else: - self.register_parameter('weight', None) - self.register_parameter('bias', None) - self.reset_parameters() - - def reset_parameters(self): - if self.affine: - self.weight.data.fill_(1) - self.bias.data.zero_() - - def forward(self, x): - return group_norm( - x, self.num_groups, self.weight, self.bias, self.eps - ) - - def extra_repr(self): - return '{num_groups}, {num_channels}, eps={eps}, ' \ - 'affine={affine}'.format(**self.__dict__) - - -def group_norm(x, num_groups, weight=None, bias=None, eps=1e-5): - input_shape = x.shape - ndim = len(input_shape) - N, C = input_shape[:2] - G = num_groups - - assert C % G == 0, "input channel dimension must divisible by number of groups" - - x = x.view(N, G, -1) - mean = x.mean(-1, keepdim=True) - var = x.var(-1, keepdim=True) - x = (x - mean) / (var + eps).sqrt() - x = x.view(input_shape) - - view_shape = (1, -1) + (1,) * (ndim - 2) - if weight is not None: - return x * weight.view(view_shape) + bias.view(view_shape) - - return x - - -def get_group_gn(dim, dim_per_gp, num_groups): - """get number of groups used by GroupNorm, based on number of channels.""" - assert dim_per_gp == -1 or num_groups == -1, \ - "GroupNorm: can only specify G or C/G." - - if dim_per_gp > 0: - assert dim % dim_per_gp == 0, \ - "dim: {}, dim_per_gp: {}".format(dim, dim_per_gp) - group_gn = dim // dim_per_gp - else: - assert dim % num_groups == 0, \ - "dim: {}, num_groups: {}".format(dim, num_groups) - group_gn = num_groups - - return group_gn - - -def gn_layer_from_cfg(out_channels, affine=True, divisor=1): - out_channels = out_channels // divisor - dim_per_gp = cfg.GROUP_NORM.DIM_PER_GP // divisor - num_groups = cfg.GROUP_NORM.NUM_GROUPS // divisor - eps = cfg.GROUP_NORM.EPSILON # default: 1e-5 - return GroupNorm( - get_group_gn(out_channels, dim_per_gp, num_groups), - out_channels, - eps, - affine - ) - - diff --git a/maskrcnn_benchmark/modeling/backbone/backbone.py b/maskrcnn_benchmark/modeling/backbone/backbone.py index 6e9c263fd..6b080b19b 100644 --- a/maskrcnn_benchmark/modeling/backbone/backbone.py +++ b/maskrcnn_benchmark/modeling/backbone/backbone.py @@ -4,12 +4,15 @@ from torch import nn from maskrcnn_benchmark.modeling import registry - +from maskrcnn_benchmark.modeling.make_layers import conv_with_kaiming_uniform from . import fpn as fpn_module from . import resnet @registry.BACKBONES.register("R-50-C4") +@registry.BACKBONES.register("R-50-C5") +@registry.BACKBONES.register("R-101-C4") +@registry.BACKBONES.register("R-101-C5") def build_resnet_backbone(cfg): body = resnet.ResNet(cfg) model = nn.Sequential(OrderedDict([("body", body)])) @@ -30,8 +33,8 @@ def build_resnet_fpn_backbone(cfg): in_channels_stage2 * 8, ], out_channels=out_channels, + conv_block=conv_with_kaiming_uniform(cfg.MODEL.FPN.USE_GN), top_blocks=fpn_module.LastLevelMaxPool(), - use_gn=cfg.MODEL.FPN.USE_GN ) model = nn.Sequential(OrderedDict([("body", body), ("fpn", fpn)])) return model diff --git a/maskrcnn_benchmark/modeling/backbone/fpn.py b/maskrcnn_benchmark/modeling/backbone/fpn.py index 66f1e8171..da970ac08 100644 --- a/maskrcnn_benchmark/modeling/backbone/fpn.py +++ b/maskrcnn_benchmark/modeling/backbone/fpn.py @@ -2,7 +2,6 @@ import torch import torch.nn.functional as F from torch import nn -from maskrcnn_benchmark.layers import gn_layer_from_cfg class FPN(nn.Module): @@ -13,8 +12,7 @@ class FPN(nn.Module): """ def __init__( - self, in_channels_list, out_channels, - top_blocks=None, use_gn=False + self, in_channels_list, out_channels, conv_block, top_blocks=None ): """ Arguments: @@ -31,27 +29,8 @@ def __init__( for idx, in_channels in enumerate(in_channels_list, 1): inner_block = "fpn_inner{}".format(idx) layer_block = "fpn_layer{}".format(idx) - if use_gn: - inner_block_module = nn.Sequential( - nn.Conv2d(in_channels, out_channels, 1, bias=False), - gn_layer_from_cfg(out_channels) - ) - layer_block_module = nn.Sequential( - nn.Conv2d(out_channels, out_channels, 3, 1, 1, bias=False), - gn_layer_from_cfg(out_channels) - ) - for modules in [inner_block_module, layer_block_module]: - for l in modules.modules(): - if isinstance(l, nn.Conv2d): - nn.init.kaiming_uniform_(l.weight, a=1) - else: - inner_block_module = nn.Conv2d(in_channels, out_channels, 1) - layer_block_module = nn.Conv2d(out_channels, out_channels, 3, 1, 1) - for module in [inner_block_module, layer_block_module]: - # Caffe2 implementation uses XavierFill, which in fact - # corresponds to kaiming_uniform_ in PyTorch - nn.init.kaiming_uniform_(module.weight, a=1) - nn.init.constant_(module.bias, 0) + inner_block_module = conv_block(in_channels, out_channels, 1) + layer_block_module = conv_block(out_channels, out_channels, 3, 1) self.add_module(inner_block, inner_block_module) self.add_module(layer_block, layer_block_module) self.inner_blocks.append(inner_block) diff --git a/maskrcnn_benchmark/modeling/backbone/resnet.py b/maskrcnn_benchmark/modeling/backbone/resnet.py index 59606e9a9..1e2834681 100644 --- a/maskrcnn_benchmark/modeling/backbone/resnet.py +++ b/maskrcnn_benchmark/modeling/backbone/resnet.py @@ -22,9 +22,9 @@ import torch.nn.functional as F from torch import nn -from maskrcnn_benchmark.layers import gn_layer_from_cfg from maskrcnn_benchmark.layers import FrozenBatchNorm2d from maskrcnn_benchmark.layers import Conv2d +from maskrcnn_benchmark.modeling.make_layers import group_norm from maskrcnn_benchmark.utils.registry import Registry @@ -51,6 +51,16 @@ StageSpec(index=i, block_count=c, return_features=r) for (i, c, r) in ((1, 3, False), (2, 4, False), (3, 6, True)) ) +# ResNet-101 (including all stages) +ResNet101StagesTo5 = tuple( + StageSpec(index=i, block_count=c, return_features=r) + for (i, c, r) in ((1, 3, False), (2, 4, False), (3, 23, False), (4, 3, True)) +) +# ResNet-101 up to stage 4 (excludes stage 5) +ResNet101StagesTo4 = tuple( + StageSpec(index=i, block_count=c, return_features=r) + for (i, c, r) in ((1, 3, False), (2, 4, False), (3, 23, True)) +) # ResNet-50-FPN (including all stages) ResNet50FPNStagesTo5 = tuple( StageSpec(index=i, block_count=c, return_features=r) @@ -141,6 +151,7 @@ def __init__( stride_in_1x1=True, stride_init=None, res2_out_channels=256, + dilation=1 ): super(ResNetHead, self).__init__() @@ -167,6 +178,7 @@ def __init__( num_groups, stride_in_1x1, first_stride=stride, + dilation=dilation ) stride = None self.add_module(name, module) @@ -187,6 +199,7 @@ def _make_stage( num_groups, stride_in_1x1, first_stride, + dilation=1 ): blocks = [] stride = first_stride @@ -199,6 +212,7 @@ def _make_stage( num_groups, stride_in_1x1, stride, + dilation=dilation ) ) stride = 1 @@ -206,27 +220,34 @@ def _make_stage( return nn.Sequential(*blocks) -class BottleneckWithFixedBatchNorm(nn.Module): +class Bottleneck(nn.Module): def __init__( self, in_channels, bottleneck_channels, out_channels, - num_groups=1, - stride_in_1x1=True, - stride=1, + num_groups, + stride_in_1x1, + stride, + dilation, + norm_func ): - super(BottleneckWithFixedBatchNorm, self).__init__() + super(Bottleneck, self).__init__() self.downsample = None if in_channels != out_channels: + down_stride = stride if dilation == 1 else 1 self.downsample = nn.Sequential( Conv2d( - in_channels, out_channels, kernel_size=1, stride=stride, bias=False + in_channels, out_channels, + kernel_size=1, stride=down_stride, bias=False ), - FrozenBatchNorm2d(out_channels), + norm_func(out_channels), ) + if dilation > 1: + stride = 1 # reset to be 1 + # The original MSRA ResNet models have stride in the first 1x1 conv # The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have # stride in the 3x3 conv @@ -239,7 +260,7 @@ def __init__( stride=stride_1x1, bias=False, ) - self.bn1 = FrozenBatchNorm2d(bottleneck_channels) + self.bn1 = norm_func(bottleneck_channels) # TODO: specify init for the above self.conv2 = Conv2d( @@ -247,16 +268,17 @@ def __init__( bottleneck_channels, kernel_size=3, stride=stride_3x3, - padding=1, + padding=dilation, # dilation * (kernel_size - 1) // 2, bias=False, groups=num_groups, + dilation=dilation ) - self.bn2 = FrozenBatchNorm2d(bottleneck_channels) + self.bn2 = norm_func(bottleneck_channels) self.conv3 = Conv2d( bottleneck_channels, out_channels, kernel_size=1, bias=False ) - self.bn3 = FrozenBatchNorm2d(out_channels) + self.bn3 = norm_func(out_channels) def forward(self, x): identity = x @@ -281,16 +303,16 @@ def forward(self, x): return out -class StemWithFixedBatchNorm(nn.Module): - def __init__(self, cfg): - super(StemWithFixedBatchNorm, self).__init__() +class BaseStem(nn.Module): + def __init__(self, cfg, norm_func): + super(BaseStem, self).__init__() out_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS self.conv1 = Conv2d( 3, out_channels, kernel_size=7, stride=2, padding=3, bias=False ) - self.bn1 = FrozenBatchNorm2d(out_channels) + self.bn1 = norm_func(out_channels) def forward(self, x): x = self.conv1(x) @@ -298,10 +320,9 @@ def forward(self, x): x = F.relu_(x) x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1) return x - return x -class BottleneckWithGN(nn.Module): +class BottleneckWithFixedBatchNorm(Bottleneck): def __init__( self, in_channels, @@ -312,100 +333,51 @@ def __init__( stride=1, dilation=1 ): - super(BottleneckWithGN, self).__init__() - - self.downsample = None - if in_channels != out_channels: - down_stride = stride if dilation == 1 else 1 - self.downsample = nn.Sequential( - Conv2d( - in_channels, out_channels, - kernel_size=1, stride=down_stride, bias=False - ), - gn_layer_from_cfg(out_channels) - ) - - if dilation > 1: - stride = 1 # reset to be 1 - - # The original MSRA ResNet models have stride in the first 1x1 conv - # The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have - # stride in the 3x3 conv - stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride) - - self.conv1 = Conv2d( - in_channels, - bottleneck_channels, - kernel_size=1, - stride=stride_1x1, - bias=False, + super(BottleneckWithFixedBatchNorm, self).__init__( + in_channels=in_channels, + bottleneck_channels=bottleneck_channels, + out_channels=out_channels, + num_groups=num_groups, + stride_in_1x1=stride_in_1x1, + stride=stride, + dilation=dilation, + norm_func=FrozenBatchNorm2d ) - self.gn1 = gn_layer_from_cfg(bottleneck_channels) - # TODO: specify init for the above - self.conv2 = Conv2d( - bottleneck_channels, - bottleneck_channels, - kernel_size=3, - stride=stride_3x3, - padding=dilation, # dilation * (kernel_size - 1) // 2, - bias=False, - groups=num_groups, - dilation=dilation - ) - self.gn2 = gn_layer_from_cfg(bottleneck_channels) - self.conv3 = Conv2d( - bottleneck_channels, out_channels, kernel_size=1, bias=False - ) - self.gn3 = gn_layer_from_cfg(out_channels) - - # for l in [self.conv1, self.conv2, self.conv3]: - # nn.init.kaiming_uniform_(l.weight, a=1) - - def forward(self, x): - identity = x - - out = self.conv1(x) - out = self.gn1(out) - out = F.relu_(out) - - out = self.conv2(out) - out = self.gn2(out) - out = F.relu_(out) - - out0 = self.conv3(out) - out = self.gn3(out0) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - out = F.relu_(out) - - return out - - -class StemWithGN(nn.Module): +class StemWithFixedBatchNorm(BaseStem): def __init__(self, cfg): - super(StemWithGN, self).__init__() + super(StemWithFixedBatchNorm, self).__init__( + cfg, norm_func=FrozenBatchNorm2d + ) - out_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS - self.conv1 = Conv2d( - 3, out_channels, kernel_size=7, stride=2, padding=3, bias=False +class BottleneckWithGN(Bottleneck): + def __init__( + self, + in_channels, + bottleneck_channels, + out_channels, + num_groups=1, + stride_in_1x1=True, + stride=1, + dilation=1 + ): + super(BottleneckWithGN, self).__init__( + in_channels=in_channels, + bottleneck_channels=bottleneck_channels, + out_channels=out_channels, + num_groups=num_groups, + stride_in_1x1=stride_in_1x1, + stride=stride, + dilation=dilation, + norm_func=group_norm ) - self.gn1 = gn_layer_from_cfg(out_channels) - # for l in [self.conv1,]: - # nn.init.kaiming_uniform_(l.weight, a=1) - def forward(self, x): - x = self.conv1(x) - x = self.gn1(x) - x = F.relu_(x) - x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1) - return x +class StemWithGN(BaseStem): + def __init__(self, cfg): + super(StemWithGN, self).__init__(cfg, norm_func=group_norm) _TRANSFORMATION_MODULES = Registry({ @@ -421,6 +393,8 @@ def forward(self, x): _STAGE_SPECS = Registry({ "R-50-C4": ResNet50StagesTo4, "R-50-C5": ResNet50StagesTo5, + "R-101-C4": ResNet101StagesTo4, + "R-101-C5": ResNet101StagesTo5, "R-50-FPN": ResNet50FPNStagesTo5, "R-101-FPN": ResNet101FPNStagesTo5, }) diff --git a/maskrcnn_benchmark/modeling/make_layers.py b/maskrcnn_benchmark/modeling/make_layers.py index 4cebe9063..9ea5d0ca4 100644 --- a/maskrcnn_benchmark/modeling/make_layers.py +++ b/maskrcnn_benchmark/modeling/make_layers.py @@ -6,18 +6,56 @@ import torch from torch import nn from torch.nn import functional as F +from maskrcnn_benchmark.config import cfg from maskrcnn_benchmark.layers import Conv2d -from maskrcnn_benchmark.layers import gn_layer_from_cfg from maskrcnn_benchmark.modeling.poolers import Pooler -from maskrcnn_benchmark.modeling.backbone import resnet + + +def get_group_gn(dim, dim_per_gp, num_groups): + """get number of groups used by GroupNorm, based on number of channels.""" + assert dim_per_gp == -1 or num_groups == -1, \ + "GroupNorm: can only specify G or C/G." + + if dim_per_gp > 0: + assert dim % dim_per_gp == 0, \ + "dim: {}, dim_per_gp: {}".format(dim, dim_per_gp) + group_gn = dim // dim_per_gp + else: + assert dim % num_groups == 0, \ + "dim: {}, num_groups: {}".format(dim, num_groups) + group_gn = num_groups + + return group_gn + + +def _group_norm(): + def gn_layer_from_cfg(out_channels, affine=True, divisor=1): + out_channels = out_channels // divisor + dim_per_gp = cfg.MODEL.GROUP_NORM.DIM_PER_GP // divisor + num_groups = cfg.MODEL.GROUP_NORM.NUM_GROUPS // divisor + eps = cfg.MODEL.GROUP_NORM.EPSILON # default: 1e-5 + return torch.nn.GroupNorm( + get_group_gn(out_channels, dim_per_gp, num_groups), + out_channels, + eps, + affine + ) + return gn_layer_from_cfg +group_norm = _group_norm() def make_conv3x3( - in_chs, out_chs, dilation=1, stride=1, use_gn=False, kaiming_init=True + in_channels, + out_channels, + dilation=1, + stride=1, + use_gn=False, + use_relu=False, + kaiming_init=True ): conv = Conv2d( - in_chs, - out_chs, + in_channels, + out_channels, kernel_size=3, stride=stride, padding=dilation, @@ -33,10 +71,18 @@ def make_conv3x3( if not use_gn: nn.init.constant_(conv.bias, 0) if use_gn: - return nn.Sequential( - conv, gn_layer_from_cfg(out_chs), nn.ReLU(inplace=True) - ) - return nn.Sequential(conv, nn.ReLU(inplace=True)) + if use_relu: + return nn.Sequential( + conv, group_norm(out_channels), nn.ReLU(inplace=True) + ) + else: + return nn.Sequential( + conv, group_norm(out_channels) + ) + if use_relu: + return nn.Sequential(conv, nn.ReLU(inplace=True)) + else: + return conv def make_fc(dim_in, hidden_dim, use_gn): @@ -47,8 +93,42 @@ def make_fc(dim_in, hidden_dim, use_gn): if use_gn: fc = nn.Linear(dim_in, hidden_dim, bias=False) nn.init.kaiming_uniform_(fc.weight, a=1) - return nn.Sequential(fc, gn_layer_from_cfg(hidden_dim)) + return nn.Sequential(fc, group_norm(hidden_dim)) fc = nn.Linear(dim_in, hidden_dim) nn.init.kaiming_uniform_(fc.weight, a=1) nn.init.constant_(fc.bias, 0) return fc + + +def conv_with_kaiming_uniform(use_gn=False): + def make_conv( + in_channels, out_channels, kernel_size, stride=1, dilation=1, use_relu=False + ): + module = Conv2d( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=dilation * (kernel_size - 1) // 2, + dilation=dilation, + bias=False if use_gn else True + ) + # Caffe2 implementation uses XavierFill, which in fact + # corresponds to kaiming_uniform_ in PyTorch + nn.init.kaiming_uniform_(module.weight, a=1) + if not use_gn: + nn.init.constant_(module.bias, 0) + if use_gn: + if use_relu: + return nn.Sequential( + module, group_norm(out_channels), nn.ReLU(inplace=True) + ) + else: + return nn.Sequential( + module, group_norm(out_channels) + ) + if use_relu: + return nn.Sequential(module, nn.ReLU(inplace=True)) + else: + return module + return make_conv diff --git a/maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_feature_extractors.py b/maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_feature_extractors.py index d79f5cc0a..d56d7a01f 100644 --- a/maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_feature_extractors.py +++ b/maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_feature_extractors.py @@ -6,7 +6,7 @@ from maskrcnn_benchmark.modeling import registry from maskrcnn_benchmark.modeling.backbone import resnet from maskrcnn_benchmark.modeling.poolers import Pooler -from maskrcnn_benchmark.layers import gn_layer_from_cfg +from maskrcnn_benchmark.modeling.make_layers import group_norm from maskrcnn_benchmark.modeling.make_layers import make_fc @@ -118,7 +118,7 @@ def __init__(self, cfg): ) in_channels = conv_head_dim if use_gn: - xconvs.append(gn_layer_from_cfg(in_channels)) + xconvs.append(group_norm(in_channels)) xconvs.append(nn.ReLU(inplace=True)) self.add_module("xconvs", nn.Sequential(*xconvs)) diff --git a/maskrcnn_benchmark/utils/c2_model_loading.py b/maskrcnn_benchmark/utils/c2_model_loading.py index d7dfa0fa2..1781407f3 100644 --- a/maskrcnn_benchmark/utils/c2_model_loading.py +++ b/maskrcnn_benchmark/utils/c2_model_loading.py @@ -48,12 +48,12 @@ def _rename_basic_resnet_weights(layer_keys): layer_keys = [k.replace(".branch1_bn.", ".downsample.1.") for k in layer_keys] # GroupNorm - layer_keys = [k.replace("conv1.gn.s", "gn1.weight") for k in layer_keys] - layer_keys = [k.replace("conv1.gn.bias", "gn1.bias") for k in layer_keys] - layer_keys = [k.replace("conv2.gn.s", "gn2.weight") for k in layer_keys] - layer_keys = [k.replace("conv2.gn.bias", "gn2.bias") for k in layer_keys] - layer_keys = [k.replace("conv3.gn.s", "gn3.weight") for k in layer_keys] - layer_keys = [k.replace("conv3.gn.bias", "gn3.bias") for k in layer_keys] + layer_keys = [k.replace("conv1.gn.s", "bn1.weight") for k in layer_keys] + layer_keys = [k.replace("conv1.gn.bias", "bn1.bias") for k in layer_keys] + layer_keys = [k.replace("conv2.gn.s", "bn2.weight") for k in layer_keys] + layer_keys = [k.replace("conv2.gn.bias", "bn2.bias") for k in layer_keys] + layer_keys = [k.replace("conv3.gn.s", "bn3.weight") for k in layer_keys] + layer_keys = [k.replace("conv3.gn.bias", "bn3.bias") for k in layer_keys] layer_keys = [k.replace("downsample.0.gn.s", "downsample.1.weight") \ for k in layer_keys] layer_keys = [k.replace("downsample.0.gn.bias", "downsample.1.bias") \ @@ -152,12 +152,15 @@ def _load_c2_pickled_weights(file_path): @C2_FORMAT_LOADER.register("R-50-C4") +@C2_FORMAT_LOADER.register("R-50-C5") +@C2_FORMAT_LOADER.register("R-101-C4") +@C2_FORMAT_LOADER.register("R-101-C5") @C2_FORMAT_LOADER.register("R-50-FPN") @C2_FORMAT_LOADER.register("R-101-FPN") def load_resnet_c2_format(cfg, f): state_dict = _load_c2_pickled_weights(f) conv_body = cfg.MODEL.BACKBONE.CONV_BODY - arch = conv_body.replace("-C4", "").replace("-FPN", "") + arch = conv_body.replace("-C4", "").replace("-C5", "").replace("-FPN", "") stages = _C2_STAGE_NAMES[arch] state_dict = _rename_weights_for_resnet(state_dict, stages) return dict(model=state_dict) From 333864df3953bd0afdd39afc9933c97452656ff3 Mon Sep 17 00:00:00 2001 From: dongdk Date: Fri, 18 Jan 2019 21:28:43 +0800 Subject: [PATCH 07/13] modification on 'group_norm' and 'conv_with_kaiming_uniform' function --- maskrcnn_benchmark/config/defaults.py | 1 + .../modeling/backbone/backbone.py | 4 ++- .../modeling/backbone/resnet.py | 2 +- maskrcnn_benchmark/modeling/make_layers.py | 29 +++++++++---------- 4 files changed, 18 insertions(+), 18 deletions(-) diff --git a/maskrcnn_benchmark/config/defaults.py b/maskrcnn_benchmark/config/defaults.py index 4503812fd..5ed3fa35c 100644 --- a/maskrcnn_benchmark/config/defaults.py +++ b/maskrcnn_benchmark/config/defaults.py @@ -99,6 +99,7 @@ # ---------------------------------------------------------------------------- # _C.MODEL.FPN = CN() _C.MODEL.FPN.USE_GN = False +_C.MODEL.FPN.USE_RELU = False # ---------------------------------------------------------------------------- # diff --git a/maskrcnn_benchmark/modeling/backbone/backbone.py b/maskrcnn_benchmark/modeling/backbone/backbone.py index 6b080b19b..c9dc392de 100644 --- a/maskrcnn_benchmark/modeling/backbone/backbone.py +++ b/maskrcnn_benchmark/modeling/backbone/backbone.py @@ -33,7 +33,9 @@ def build_resnet_fpn_backbone(cfg): in_channels_stage2 * 8, ], out_channels=out_channels, - conv_block=conv_with_kaiming_uniform(cfg.MODEL.FPN.USE_GN), + conv_block=conv_with_kaiming_uniform( + cfg.MODEL.FPN.USE_GN, cfg.MODEL.FPN.USE_RELU + ), top_blocks=fpn_module.LastLevelMaxPool(), ) model = nn.Sequential(OrderedDict([("body", body), ("fpn", fpn)])) diff --git a/maskrcnn_benchmark/modeling/backbone/resnet.py b/maskrcnn_benchmark/modeling/backbone/resnet.py index 1e2834681..dd6073e9d 100644 --- a/maskrcnn_benchmark/modeling/backbone/resnet.py +++ b/maskrcnn_benchmark/modeling/backbone/resnet.py @@ -268,7 +268,7 @@ def __init__( bottleneck_channels, kernel_size=3, stride=stride_3x3, - padding=dilation, # dilation * (kernel_size - 1) // 2, + padding=dilation, bias=False, groups=num_groups, dilation=dilation diff --git a/maskrcnn_benchmark/modeling/make_layers.py b/maskrcnn_benchmark/modeling/make_layers.py index 9ea5d0ca4..b0e914b9a 100644 --- a/maskrcnn_benchmark/modeling/make_layers.py +++ b/maskrcnn_benchmark/modeling/make_layers.py @@ -28,20 +28,17 @@ def get_group_gn(dim, dim_per_gp, num_groups): return group_gn -def _group_norm(): - def gn_layer_from_cfg(out_channels, affine=True, divisor=1): - out_channels = out_channels // divisor - dim_per_gp = cfg.MODEL.GROUP_NORM.DIM_PER_GP // divisor - num_groups = cfg.MODEL.GROUP_NORM.NUM_GROUPS // divisor - eps = cfg.MODEL.GROUP_NORM.EPSILON # default: 1e-5 - return torch.nn.GroupNorm( - get_group_gn(out_channels, dim_per_gp, num_groups), - out_channels, - eps, - affine - ) - return gn_layer_from_cfg -group_norm = _group_norm() +def group_norm(out_channels, affine=True, divisor=1): + out_channels = out_channels // divisor + dim_per_gp = cfg.MODEL.GROUP_NORM.DIM_PER_GP // divisor + num_groups = cfg.MODEL.GROUP_NORM.NUM_GROUPS // divisor + eps = cfg.MODEL.GROUP_NORM.EPSILON # default: 1e-5 + return torch.nn.GroupNorm( + get_group_gn(out_channels, dim_per_gp, num_groups), + out_channels, + eps, + affine + ) def make_conv3x3( @@ -100,9 +97,9 @@ def make_fc(dim_in, hidden_dim, use_gn): return fc -def conv_with_kaiming_uniform(use_gn=False): +def conv_with_kaiming_uniform(use_gn=False, use_relu=False): def make_conv( - in_channels, out_channels, kernel_size, stride=1, dilation=1, use_relu=False + in_channels, out_channels, kernel_size, stride=1, dilation=1 ): module = Conv2d( in_channels, From 58da4d58dcb506aa867ee3f7b4d9736b3463f819 Mon Sep 17 00:00:00 2001 From: dongdk Date: Mon, 21 Jan 2019 11:00:26 +0800 Subject: [PATCH 08/13] modification on yaml files in configs/gn_baselines/ and reduce the amount of indentation and code duplication --- .../e2e_faster_rcnn_R_50_FPN_1x_gn.yaml | 10 ++--- ...e_faster_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml | 10 ++--- .../e2e_mask_rcnn_R_50_FPN_1x_gn.yaml | 10 ++--- ...e2e_mask_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml | 10 ++--- maskrcnn_benchmark/modeling/make_layers.py | 41 ++++++++----------- 5 files changed, 36 insertions(+), 45 deletions(-) diff --git a/configs/gn_baselines/e2e_faster_rcnn_R_50_FPN_1x_gn.yaml b/configs/gn_baselines/e2e_faster_rcnn_R_50_FPN_1x_gn.yaml index 3efd0bf3c..e8f3ca7f5 100644 --- a/configs/gn_baselines/e2e_faster_rcnn_R_50_FPN_1x_gn.yaml +++ b/configs/gn_baselines/e2e_faster_rcnn_R_50_FPN_1x_gn.yaml @@ -38,11 +38,11 @@ DATASETS: DATALOADER: SIZE_DIVISIBILITY: 32 SOLVER: - # Assume 4 gpus - BASE_LR: 0.01 + # Assume 8 gpus + BASE_LR: 0.02 WEIGHT_DECAY: 0.0001 - STEPS: (120000, 180000) - MAX_ITER: 180000 - IMS_PER_BATCH: 8 + STEPS: (60000, 80000) + MAX_ITER: 90000 + IMS_PER_BATCH: 16 TEST: IMS_PER_BATCH: 8 diff --git a/configs/gn_baselines/e2e_faster_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml b/configs/gn_baselines/e2e_faster_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml index a609de9d8..d25b43430 100644 --- a/configs/gn_baselines/e2e_faster_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml +++ b/configs/gn_baselines/e2e_faster_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml @@ -40,11 +40,11 @@ DATASETS: DATALOADER: SIZE_DIVISIBILITY: 32 SOLVER: - # Assume 4 gpus - BASE_LR: 0.01 + # Assume 8 gpus + BASE_LR: 0.02 WEIGHT_DECAY: 0.0001 - STEPS: (120000, 180000) - MAX_ITER: 180000 - IMS_PER_BATCH: 8 + STEPS: (60000, 80000) + MAX_ITER: 90000 + IMS_PER_BATCH: 16 TEST: IMS_PER_BATCH: 8 \ No newline at end of file diff --git a/configs/gn_baselines/e2e_mask_rcnn_R_50_FPN_1x_gn.yaml b/configs/gn_baselines/e2e_mask_rcnn_R_50_FPN_1x_gn.yaml index 46d3e7a5b..e59c8b9a4 100644 --- a/configs/gn_baselines/e2e_mask_rcnn_R_50_FPN_1x_gn.yaml +++ b/configs/gn_baselines/e2e_mask_rcnn_R_50_FPN_1x_gn.yaml @@ -49,11 +49,11 @@ DATASETS: DATALOADER: SIZE_DIVISIBILITY: 32 SOLVER: - # Assume 4 gpus - BASE_LR: 0.01 + # Assume 8 gpus + BASE_LR: 0.02 WEIGHT_DECAY: 0.0001 - STEPS: (120000, 180000) - MAX_ITER: 180000 - IMS_PER_BATCH: 8 + STEPS: (60000, 80000) + MAX_ITER: 90000 + IMS_PER_BATCH: 16 TEST: IMS_PER_BATCH: 8 \ No newline at end of file diff --git a/configs/gn_baselines/e2e_mask_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml b/configs/gn_baselines/e2e_mask_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml index 451ec216e..f3bba2f58 100644 --- a/configs/gn_baselines/e2e_mask_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml +++ b/configs/gn_baselines/e2e_mask_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml @@ -51,11 +51,11 @@ DATASETS: DATALOADER: SIZE_DIVISIBILITY: 32 SOLVER: - # Assume 4 gpus - BASE_LR: 0.01 + # Assume 8 gpus + BASE_LR: 0.02 WEIGHT_DECAY: 0.0001 - STEPS: (120000, 180000) - MAX_ITER: 180000 - IMS_PER_BATCH: 8 + STEPS: (60000, 80000) + MAX_ITER: 90000 + IMS_PER_BATCH: 16 TEST: IMS_PER_BATCH: 8 \ No newline at end of file diff --git a/maskrcnn_benchmark/modeling/make_layers.py b/maskrcnn_benchmark/modeling/make_layers.py index b0e914b9a..e77311faf 100644 --- a/maskrcnn_benchmark/modeling/make_layers.py +++ b/maskrcnn_benchmark/modeling/make_layers.py @@ -67,19 +67,14 @@ def make_conv3x3( torch.nn.init.normal_(conv.weight, std=0.01) if not use_gn: nn.init.constant_(conv.bias, 0) + module = [conv,] if use_gn: - if use_relu: - return nn.Sequential( - conv, group_norm(out_channels), nn.ReLU(inplace=True) - ) - else: - return nn.Sequential( - conv, group_norm(out_channels) - ) + module.append(group_norm(out_channels)) if use_relu: - return nn.Sequential(conv, nn.ReLU(inplace=True)) - else: - return conv + module.append(nn.ReLU(inplace=True)) + if len(module) > 1: + return nn.Sequential(*module) + return conv def make_fc(dim_in, hidden_dim, use_gn): @@ -101,7 +96,7 @@ def conv_with_kaiming_uniform(use_gn=False, use_relu=False): def make_conv( in_channels, out_channels, kernel_size, stride=1, dilation=1 ): - module = Conv2d( + conv = Conv2d( in_channels, out_channels, kernel_size=kernel_size, @@ -112,20 +107,16 @@ def make_conv( ) # Caffe2 implementation uses XavierFill, which in fact # corresponds to kaiming_uniform_ in PyTorch - nn.init.kaiming_uniform_(module.weight, a=1) + nn.init.kaiming_uniform_(conv.weight, a=1) if not use_gn: - nn.init.constant_(module.bias, 0) + nn.init.constant_(conv.bias, 0) + module = [conv,] if use_gn: - if use_relu: - return nn.Sequential( - module, group_norm(out_channels), nn.ReLU(inplace=True) - ) - else: - return nn.Sequential( - module, group_norm(out_channels) - ) + module.append(group_norm(out_channels)) if use_relu: - return nn.Sequential(module, nn.ReLU(inplace=True)) - else: - return module + module.append(nn.ReLU(inplace=True)) + if len(module) > 1: + return nn.Sequential(*module) + return conv + return make_conv From 9808a21f98f9d400dc487d094a8c647381688327 Mon Sep 17 00:00:00 2001 From: dongdk Date: Wed, 23 Jan 2019 21:24:14 +0800 Subject: [PATCH 09/13] use 'kaiming_uniform' to initialize resnet, disable gn after fc layer, and add dilation into ResNetHead --- maskrcnn_benchmark/modeling/backbone/resnet.py | 10 ++++++++++ maskrcnn_benchmark/modeling/make_layers.py | 2 +- .../roi_heads/box_head/roi_box_feature_extractors.py | 3 ++- 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/maskrcnn_benchmark/modeling/backbone/resnet.py b/maskrcnn_benchmark/modeling/backbone/resnet.py index dd6073e9d..eed1cdb3f 100644 --- a/maskrcnn_benchmark/modeling/backbone/resnet.py +++ b/maskrcnn_benchmark/modeling/backbone/resnet.py @@ -244,6 +244,10 @@ def __init__( ), norm_func(out_channels), ) + for modules in [self.downsample,]: + for l in modules.modules(): + if isinstance(l, Conv2d): + nn.init.kaiming_uniform_(l.weight, a=1) if dilation > 1: stride = 1 # reset to be 1 @@ -280,6 +284,9 @@ def __init__( ) self.bn3 = norm_func(out_channels) + for l in [self.conv1, self.conv2, self.conv3,]: + nn.init.kaiming_uniform_(l.weight, a=1) + def forward(self, x): identity = x @@ -314,6 +321,9 @@ def __init__(self, cfg, norm_func): ) self.bn1 = norm_func(out_channels) + for l in [self.conv1,]: + nn.init.kaiming_uniform_(l.weight, a=1) + def forward(self, x): x = self.conv1(x) x = self.bn1(x) diff --git a/maskrcnn_benchmark/modeling/make_layers.py b/maskrcnn_benchmark/modeling/make_layers.py index e77311faf..74e56b0e2 100644 --- a/maskrcnn_benchmark/modeling/make_layers.py +++ b/maskrcnn_benchmark/modeling/make_layers.py @@ -77,7 +77,7 @@ def make_conv3x3( return conv -def make_fc(dim_in, hidden_dim, use_gn): +def make_fc(dim_in, hidden_dim, use_gn=False): ''' Caffe2 implementation uses XavierFill, which in fact corresponds to kaiming_uniform_ in PyTorch diff --git a/maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_feature_extractors.py b/maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_feature_extractors.py index d56d7a01f..3dea50c24 100644 --- a/maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_feature_extractors.py +++ b/maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_feature_extractors.py @@ -33,6 +33,7 @@ def __init__(self, config): stride_in_1x1=config.MODEL.RESNETS.STRIDE_IN_1X1, stride_init=None, res2_out_channels=config.MODEL.RESNETS.RES2_OUT_CHANNELS, + dilation=config.MODEL.RESNETS.RES5_DILATION ) self.pooler = pooler @@ -131,7 +132,7 @@ def __init__(self, cfg): input_size = conv_head_dim * resolution ** 2 representation_size = cfg.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM - self.fc6 = make_fc(input_size, representation_size, use_gn) + self.fc6 = make_fc(input_size, representation_size, use_gn=False) def forward(self, x, proposals): x = self.pooler(x, proposals) From 4de348855e2f06682275564d9692a70b5f9ca75b Mon Sep 17 00:00:00 2001 From: dongdk Date: Mon, 28 Jan 2019 22:34:07 +0800 Subject: [PATCH 10/13] agnostic-regression for bbox --- maskrcnn_benchmark/config/defaults.py | 1 + .../modeling/roi_heads/box_head/inference.py | 19 +++++++++++++-- .../modeling/roi_heads/box_head/loss.py | 24 ++++++++++++++++--- .../roi_heads/box_head/roi_box_predictors.py | 6 +++-- 4 files changed, 43 insertions(+), 7 deletions(-) diff --git a/maskrcnn_benchmark/config/defaults.py b/maskrcnn_benchmark/config/defaults.py index 5ed3fa35c..95080d38f 100644 --- a/maskrcnn_benchmark/config/defaults.py +++ b/maskrcnn_benchmark/config/defaults.py @@ -25,6 +25,7 @@ _C.MODEL.MASK_ON = False _C.MODEL.DEVICE = "cuda" _C.MODEL.META_ARCHITECTURE = "GeneralizedRCNN" +_C.MODEL.CLS_AGNOSTIC_BBOX_REG = False # If the WEIGHT starts with a catalog://, like :R-50, the code will look for # the path in paths_catalog. Else, it will use it as the specified absolute diff --git a/maskrcnn_benchmark/modeling/roi_heads/box_head/inference.py b/maskrcnn_benchmark/modeling/roi_heads/box_head/inference.py index 196892550..a04635673 100644 --- a/maskrcnn_benchmark/modeling/roi_heads/box_head/inference.py +++ b/maskrcnn_benchmark/modeling/roi_heads/box_head/inference.py @@ -17,7 +17,12 @@ class PostProcessor(nn.Module): """ def __init__( - self, score_thresh=0.05, nms=0.5, detections_per_img=100, box_coder=None + self, + score_thresh=0.05, + nms=0.5, + detections_per_img=100, + box_coder=None, + cls_agnostic_bbox_reg=False ): """ Arguments: @@ -33,6 +38,7 @@ def __init__( if box_coder is None: box_coder = BoxCoder(weights=(10., 10., 5., 5.)) self.box_coder = box_coder + self.cls_agnostic_bbox_reg = cls_agnostic_bbox_reg def forward(self, x, boxes): """ @@ -54,9 +60,13 @@ def forward(self, x, boxes): boxes_per_image = [len(box) for box in boxes] concat_boxes = torch.cat([a.bbox for a in boxes], dim=0) + if self.cls_agnostic_bbox_reg: + box_regression = box_regression[:, -4:] proposals = self.box_coder.decode( box_regression.view(sum(boxes_per_image), -1), concat_boxes ) + if self.cls_agnostic_bbox_reg: + proposals = proposals.repeat(1, class_prob.shape[1]) num_classes = class_prob.shape[1] @@ -145,8 +155,13 @@ def make_roi_box_post_processor(cfg): score_thresh = cfg.MODEL.ROI_HEADS.SCORE_THRESH nms_thresh = cfg.MODEL.ROI_HEADS.NMS detections_per_img = cfg.MODEL.ROI_HEADS.DETECTIONS_PER_IMG + cls_agnostic_bbox_reg = cfg.MODEL.CLS_AGNOSTIC_BBOX_REG postprocessor = PostProcessor( - score_thresh, nms_thresh, detections_per_img, box_coder + score_thresh, + nms_thresh, + detections_per_img, + box_coder, + cls_agnostic_bbox_reg ) return postprocessor diff --git a/maskrcnn_benchmark/modeling/roi_heads/box_head/loss.py b/maskrcnn_benchmark/modeling/roi_heads/box_head/loss.py index 2c21f6cdb..9f2771d02 100644 --- a/maskrcnn_benchmark/modeling/roi_heads/box_head/loss.py +++ b/maskrcnn_benchmark/modeling/roi_heads/box_head/loss.py @@ -18,7 +18,13 @@ class FastRCNNLossComputation(object): Also supports FPN """ - def __init__(self, proposal_matcher, fg_bg_sampler, box_coder): + def __init__( + self, + proposal_matcher, + fg_bg_sampler, + box_coder, + cls_agnostic_bbox_reg=False + ): """ Arguments: proposal_matcher (Matcher) @@ -28,6 +34,7 @@ def __init__(self, proposal_matcher, fg_bg_sampler, box_coder): self.proposal_matcher = proposal_matcher self.fg_bg_sampler = fg_bg_sampler self.box_coder = box_coder + self.cls_agnostic_bbox_reg = cls_agnostic_bbox_reg def match_targets_to_proposals(self, proposal, target): match_quality_matrix = boxlist_iou(target, proposal) @@ -143,7 +150,11 @@ def __call__(self, class_logits, box_regression): # advanced indexing sampled_pos_inds_subset = torch.nonzero(labels > 0).squeeze(1) labels_pos = labels[sampled_pos_inds_subset] - map_inds = 4 * labels_pos[:, None] + torch.tensor([0, 1, 2, 3], device=device) + if self.cls_agnostic_bbox_reg: + map_inds = torch.tensor([4, 5, 6, 7], device=device) + else: + map_inds = 4 * labels_pos[:, None] + torch.tensor( + [0, 1, 2, 3], device=device) box_loss = smooth_l1_loss( box_regression[sampled_pos_inds_subset[:, None], map_inds], @@ -170,6 +181,13 @@ def make_roi_box_loss_evaluator(cfg): cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE, cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION ) - loss_evaluator = FastRCNNLossComputation(matcher, fg_bg_sampler, box_coder) + cls_agnostic_bbox_reg = cfg.MODEL.CLS_AGNOSTIC_BBOX_REG + + loss_evaluator = FastRCNNLossComputation( + matcher, + fg_bg_sampler, + box_coder, + cls_agnostic_bbox_reg + ) return loss_evaluator diff --git a/maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_predictors.py b/maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_predictors.py index e05fcbb1d..740767850 100644 --- a/maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_predictors.py +++ b/maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_predictors.py @@ -14,7 +14,8 @@ def __init__(self, config, pretrained=None): num_classes = config.MODEL.ROI_BOX_HEAD.NUM_CLASSES self.avgpool = nn.AvgPool2d(kernel_size=7, stride=7) self.cls_score = nn.Linear(num_inputs, num_classes) - self.bbox_pred = nn.Linear(num_inputs, num_classes * 4) + num_bbox_reg_classes = 2 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else num_classes + self.bbox_pred = nn.Linear(num_inputs, num_bbox_reg_classes * 4) nn.init.normal_(self.cls_score.weight, mean=0, std=0.01) nn.init.constant_(self.cls_score.bias, 0) @@ -37,7 +38,8 @@ def __init__(self, cfg): representation_size = cfg.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM self.cls_score = nn.Linear(representation_size, num_classes) - self.bbox_pred = nn.Linear(representation_size, num_classes * 4) + num_bbox_reg_classes = 2 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else num_classes + self.bbox_pred = nn.Linear(representation_size, num_bbox_reg_classes * 4) nn.init.normal_(self.cls_score.weight, std=0.01) nn.init.normal_(self.bbox_pred.weight, std=0.001) From 483fca81ba676e8c6576518252e139c3443dd656 Mon Sep 17 00:00:00 2001 From: dongdk Date: Tue, 19 Feb 2019 22:39:57 +0800 Subject: [PATCH 11/13] please set 'STRIDE_IN_1X1' to be 'False' when backbone use GN --- configs/gn_baselines/e2e_faster_rcnn_R_50_FPN_1x_gn.yaml | 1 + .../gn_baselines/e2e_faster_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml | 3 ++- configs/gn_baselines/e2e_mask_rcnn_R_50_FPN_1x_gn.yaml | 3 ++- .../gn_baselines/e2e_mask_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml | 3 ++- .../gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_3x_gn.yaml | 3 ++- .../scratch_e2e_faster_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml | 3 ++- configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_3x_gn.yaml | 1 + .../scratch_e2e_mask_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml | 3 ++- 8 files changed, 14 insertions(+), 6 deletions(-) diff --git a/configs/gn_baselines/e2e_faster_rcnn_R_50_FPN_1x_gn.yaml b/configs/gn_baselines/e2e_faster_rcnn_R_50_FPN_1x_gn.yaml index e8f3ca7f5..4e99eacb1 100644 --- a/configs/gn_baselines/e2e_faster_rcnn_R_50_FPN_1x_gn.yaml +++ b/configs/gn_baselines/e2e_faster_rcnn_R_50_FPN_1x_gn.yaml @@ -10,6 +10,7 @@ MODEL: CONV_BODY: "R-50-FPN" OUT_CHANNELS: 256 RESNETS: # use GN for backbone + STRIDE_IN_1X1: False TRANS_FUNC: "BottleneckWithGN" STEM_FUNC: "StemWithGN" FPN: diff --git a/configs/gn_baselines/e2e_faster_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml b/configs/gn_baselines/e2e_faster_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml index d25b43430..4a55c00f0 100644 --- a/configs/gn_baselines/e2e_faster_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml +++ b/configs/gn_baselines/e2e_faster_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml @@ -10,6 +10,7 @@ MODEL: CONV_BODY: "R-50-FPN" OUT_CHANNELS: 256 RESNETS: # use GN for backbone + STRIDE_IN_1X1: False TRANS_FUNC: "BottleneckWithGN" STEM_FUNC: "StemWithGN" FPN: @@ -47,4 +48,4 @@ SOLVER: MAX_ITER: 90000 IMS_PER_BATCH: 16 TEST: - IMS_PER_BATCH: 8 \ No newline at end of file + IMS_PER_BATCH: 8 diff --git a/configs/gn_baselines/e2e_mask_rcnn_R_50_FPN_1x_gn.yaml b/configs/gn_baselines/e2e_mask_rcnn_R_50_FPN_1x_gn.yaml index e59c8b9a4..f0b488ac4 100644 --- a/configs/gn_baselines/e2e_mask_rcnn_R_50_FPN_1x_gn.yaml +++ b/configs/gn_baselines/e2e_mask_rcnn_R_50_FPN_1x_gn.yaml @@ -10,6 +10,7 @@ MODEL: CONV_BODY: "R-50-FPN" OUT_CHANNELS: 256 RESNETS: # use GN for backbone + STRIDE_IN_1X1: False TRANS_FUNC: "BottleneckWithGN" STEM_FUNC: "StemWithGN" FPN: @@ -56,4 +57,4 @@ SOLVER: MAX_ITER: 90000 IMS_PER_BATCH: 16 TEST: - IMS_PER_BATCH: 8 \ No newline at end of file + IMS_PER_BATCH: 8 diff --git a/configs/gn_baselines/e2e_mask_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml b/configs/gn_baselines/e2e_mask_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml index f3bba2f58..fc4b6de00 100644 --- a/configs/gn_baselines/e2e_mask_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml +++ b/configs/gn_baselines/e2e_mask_rcnn_R_50_FPN_Xconv1fc_1x_gn.yaml @@ -10,6 +10,7 @@ MODEL: CONV_BODY: "R-50-FPN" OUT_CHANNELS: 256 RESNETS: # use GN for backbone + STRIDE_IN_1X1: False TRANS_FUNC: "BottleneckWithGN" STEM_FUNC: "StemWithGN" FPN: @@ -58,4 +59,4 @@ SOLVER: MAX_ITER: 90000 IMS_PER_BATCH: 16 TEST: - IMS_PER_BATCH: 8 \ No newline at end of file + IMS_PER_BATCH: 8 diff --git a/configs/gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_3x_gn.yaml b/configs/gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_3x_gn.yaml index a13049f58..16be1eb90 100644 --- a/configs/gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_3x_gn.yaml +++ b/configs/gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_3x_gn.yaml @@ -11,6 +11,7 @@ MODEL: OUT_CHANNELS: 256 FREEZE_CONV_BODY_AT: 0 # finetune all layers RESNETS: # use GN for backbone + STRIDE_IN_1X1: False TRANS_FUNC: "BottleneckWithGN" STEM_FUNC: "StemWithGN" FPN: @@ -46,4 +47,4 @@ SOLVER: MAX_ITER: 270000 IMS_PER_BATCH: 16 TEST: - IMS_PER_BATCH: 8 \ No newline at end of file + IMS_PER_BATCH: 8 diff --git a/configs/gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml b/configs/gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml index d04feddf5..b6f68424e 100644 --- a/configs/gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml +++ b/configs/gn_baselines/scratch_e2e_faster_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml @@ -11,6 +11,7 @@ MODEL: OUT_CHANNELS: 256 FREEZE_CONV_BODY_AT: 0 # finetune all layers RESNETS: # use GN for backbone + STRIDE_IN_1X1: False TRANS_FUNC: "BottleneckWithGN" STEM_FUNC: "StemWithGN" FPN: @@ -48,4 +49,4 @@ SOLVER: MAX_ITER: 270000 IMS_PER_BATCH: 16 TEST: - IMS_PER_BATCH: 8 \ No newline at end of file + IMS_PER_BATCH: 8 diff --git a/configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_3x_gn.yaml b/configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_3x_gn.yaml index c8eae5e0d..edc53b443 100644 --- a/configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_3x_gn.yaml +++ b/configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_3x_gn.yaml @@ -11,6 +11,7 @@ MODEL: OUT_CHANNELS: 256 FREEZE_CONV_BODY_AT: 0 # finetune all layers RESNETS: # use GN for backbone + STRIDE_IN_1X1: False TRANS_FUNC: "BottleneckWithGN" STEM_FUNC: "StemWithGN" FPN: diff --git a/configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml b/configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml index db8233790..3384770b0 100644 --- a/configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml +++ b/configs/gn_baselines/scratch_e2e_mask_rcnn_R_50_FPN_Xconv1fc_3x_gn.yaml @@ -11,6 +11,7 @@ MODEL: OUT_CHANNELS: 256 FREEZE_CONV_BODY_AT: 0 # finetune all layers RESNETS: # use GN for backbone + STRIDE_IN_1X1: False TRANS_FUNC: "BottleneckWithGN" STEM_FUNC: "StemWithGN" FPN: @@ -59,4 +60,4 @@ SOLVER: MAX_ITER: 270000 IMS_PER_BATCH: 16 TEST: - IMS_PER_BATCH: 8 \ No newline at end of file + IMS_PER_BATCH: 8 From d2d55f9cca42c2a0a756ab1d1b3ab452ffcd9fb0 Mon Sep 17 00:00:00 2001 From: dongdk Date: Tue, 26 Feb 2019 22:21:02 +0800 Subject: [PATCH 12/13] add README.md for GN --- configs/gn_baselines/README.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 configs/gn_baselines/README.md diff --git a/configs/gn_baselines/README.md b/configs/gn_baselines/README.md new file mode 100644 index 000000000..2d799d76c --- /dev/null +++ b/configs/gn_baselines/README.md @@ -0,0 +1,13 @@ +### Group Normalization +1 [Group Normalization](https://arxiv.org/abs/1803.08494) +2 [Rethinking ImageNet Pre-training](https://arxiv.org/abs/1811.08883) +3 [official code](https://github.com/facebookresearch/Detectron/blob/master/projects/GN/README.md) + + +### Performance +| case | Type | lr schd | im/gpu | bbox AP | mask AP | +|----------------------------|:------------:|:---------:|:-------:|:-------:|:-------:| +| R-50-FPN, GN (paper) | finetune | 2x | 2 | 40.3 | 35.7 | +| R-50-FPN, GN (implement) | finetune | 2x | 2 | 40.2 | 36.0 | +| R-50-FPN, GN (paper) | from scratch | 3x | 2 | 39.5 | 35.2 | +| R-50-FPN, GN (implement) | from scratch | 3x | 2 | 38.9 | 35.1 | From eee83fccc96105694cd5593b551611b846b3fccc Mon Sep 17 00:00:00 2001 From: dongdk Date: Fri, 19 Apr 2019 22:20:00 +0800 Subject: [PATCH 13/13] add dcn from mmdetection --- .gitignore | 1 + configs/dcn/README.md | 31 + .../e2e_faster_rcnn_dconv_R_50_FPN_1x.yaml | 44 + .../e2e_faster_rcnn_mdconv_R_50_FPN_1x.yaml | 44 + .../dcn/e2e_mask_rcnn_dconv_R_50_FPN_1x.yaml | 54 ++ .../dcn/e2e_mask_rcnn_mdconv_R_50_FPN_1x.yaml | 53 ++ maskrcnn_benchmark/config/defaults.py | 4 + .../csrc/cuda/deform_conv_cuda.cu | 691 ++++++++++++++ .../csrc/cuda/deform_conv_kernel_cuda.cu | 874 ++++++++++++++++++ .../csrc/cuda/deform_pool_cuda.cu | 87 ++ .../csrc/cuda/deform_pool_kernel_cuda.cu | 365 ++++++++ maskrcnn_benchmark/csrc/cuda/vision.h | 53 ++ maskrcnn_benchmark/csrc/deform_conv.h | 191 ++++ maskrcnn_benchmark/csrc/deform_pool.h | 70 ++ maskrcnn_benchmark/csrc/vision.cpp | 12 +- maskrcnn_benchmark/layers/__init__.py | 34 +- maskrcnn_benchmark/layers/dcn/__init__.py | 3 + .../layers/dcn/deform_conv_func.py | 259 ++++++ .../layers/dcn/deform_conv_module.py | 177 ++++ .../layers/dcn/deform_pool_func.py | 93 ++ .../layers/dcn/deform_pool_module.py | 150 +++ maskrcnn_benchmark/layers/misc.py | 84 ++ .../modeling/backbone/resnet.py | 73 +- maskrcnn_benchmark/utils/c2_model_loading.py | 31 + 24 files changed, 3453 insertions(+), 25 deletions(-) create mode 100644 configs/dcn/README.md create mode 100644 configs/dcn/e2e_faster_rcnn_dconv_R_50_FPN_1x.yaml create mode 100644 configs/dcn/e2e_faster_rcnn_mdconv_R_50_FPN_1x.yaml create mode 100644 configs/dcn/e2e_mask_rcnn_dconv_R_50_FPN_1x.yaml create mode 100644 configs/dcn/e2e_mask_rcnn_mdconv_R_50_FPN_1x.yaml create mode 100644 maskrcnn_benchmark/csrc/cuda/deform_conv_cuda.cu create mode 100644 maskrcnn_benchmark/csrc/cuda/deform_conv_kernel_cuda.cu create mode 100644 maskrcnn_benchmark/csrc/cuda/deform_pool_cuda.cu create mode 100644 maskrcnn_benchmark/csrc/cuda/deform_pool_kernel_cuda.cu create mode 100644 maskrcnn_benchmark/csrc/deform_conv.h create mode 100644 maskrcnn_benchmark/csrc/deform_pool.h create mode 100644 maskrcnn_benchmark/layers/dcn/__init__.py create mode 100644 maskrcnn_benchmark/layers/dcn/deform_conv_func.py create mode 100644 maskrcnn_benchmark/layers/dcn/deform_conv_module.py create mode 100644 maskrcnn_benchmark/layers/dcn/deform_pool_func.py create mode 100644 maskrcnn_benchmark/layers/dcn/deform_pool_module.py diff --git a/.gitignore b/.gitignore index 223e87a19..4124b2bf1 100644 --- a/.gitignore +++ b/.gitignore @@ -28,3 +28,4 @@ dist/ # project dirs /datasets /models +/output diff --git a/configs/dcn/README.md b/configs/dcn/README.md new file mode 100644 index 000000000..2c3a0784e --- /dev/null +++ b/configs/dcn/README.md @@ -0,0 +1,31 @@ +### Reference +1 [Deformable ConvNets v2: More Deformable, Better Results](https://arxiv.org/pdf/1811.11168.pdf) +2 third-party: [mmdetection](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn) + +### Performance +| case | bbox AP | mask AP | +|----------------------------:|--------:|:-------:| +| R-50-FPN-dcn (implement) | 39.8 | - | +| R-50-FPN-dcn (mmdetection) | 40.0 | - | +| R-50-FPN-mdcn (implement) | 40.0 | - | +| R-50-FPN-mdcn (mmdetection) | 40.3 | - | +| R-50-FPN-dcn (implement) | 40.8 | 36.8 | +| R-50-FPN-dcn (mmdetection) | 41.1 | 37.2 | +| R-50-FPN-dcn (implement) | 40.7 | 36.7 | +| R-50-FPN-dcn (mmdetection) | 41.4 | 37.4 | + + +### Note +see [dcn-v2](https://github.com/open-mmlab/mmdetection/blob/master/MODEL_ZOO.md#deformable-convolution-v2) in `mmdetection` for more details. + + +### Usage +add these three lines +``` +MODEL: + RESNETS: + # corresponding to C2,C3,C4,C5 + STAGE_WITH_DCN: (False, True, True, True) + WITH_MODULATED_DCN: True + DEFORMABLE_GROUPS: 1 +``` \ No newline at end of file diff --git a/configs/dcn/e2e_faster_rcnn_dconv_R_50_FPN_1x.yaml b/configs/dcn/e2e_faster_rcnn_dconv_R_50_FPN_1x.yaml new file mode 100644 index 000000000..25c037592 --- /dev/null +++ b/configs/dcn/e2e_faster_rcnn_dconv_R_50_FPN_1x.yaml @@ -0,0 +1,44 @@ +INPUT: + MIN_SIZE_TRAIN: (800,) + MAX_SIZE_TRAIN: 1333 + MIN_SIZE_TEST: 800 + MAX_SIZE_TEST: 1333 +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + WEIGHT: "catalog://ImageNetPretrained/MSRA/R-50" + BACKBONE: + CONV_BODY: "R-50-FPN" + RESNETS: + BACKBONE_OUT_CHANNELS: 256 + STAGE_WITH_DCN: (False, True, True, True) + WITH_MODULATED_DCN: False + DEFORMABLE_GROUPS: 1 + RPN: + USE_FPN: True + ANCHOR_STRIDE: (4, 8, 16, 32, 64) + PRE_NMS_TOP_N_TRAIN: 2000 + PRE_NMS_TOP_N_TEST: 1000 + POST_NMS_TOP_N_TEST: 1000 + FPN_POST_NMS_TOP_N_TEST: 1000 + ROI_HEADS: + USE_FPN: True + ROI_BOX_HEAD: + POOLER_RESOLUTION: 7 + POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) + POOLER_SAMPLING_RATIO: 2 + FEATURE_EXTRACTOR: "FPN2MLPFeatureExtractor" + PREDICTOR: "FPNPredictor" +DATASETS: + TRAIN: ("coco_2014_train", "coco_2014_valminusminival") + TEST: ("coco_2014_minival",) +DATALOADER: + SIZE_DIVISIBILITY: 32 +SOLVER: + # Assume 8 gpus + BASE_LR: 0.02 + WEIGHT_DECAY: 0.0001 + STEPS: (60000, 80000) + MAX_ITER: 90000 + IMS_PER_BATCH: 16 +TEST: + IMS_PER_BATCH: 8 diff --git a/configs/dcn/e2e_faster_rcnn_mdconv_R_50_FPN_1x.yaml b/configs/dcn/e2e_faster_rcnn_mdconv_R_50_FPN_1x.yaml new file mode 100644 index 000000000..6bc04212c --- /dev/null +++ b/configs/dcn/e2e_faster_rcnn_mdconv_R_50_FPN_1x.yaml @@ -0,0 +1,44 @@ +INPUT: + MIN_SIZE_TRAIN: (800,) + MAX_SIZE_TRAIN: 1333 + MIN_SIZE_TEST: 800 + MAX_SIZE_TEST: 1333 +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + WEIGHT: "catalog://ImageNetPretrained/MSRA/R-50" + BACKBONE: + CONV_BODY: "R-50-FPN" + RESNETS: + BACKBONE_OUT_CHANNELS: 256 + STAGE_WITH_DCN: (False, True, True, True) + WITH_MODULATED_DCN: True + DEFORMABLE_GROUPS: 1 + RPN: + USE_FPN: True + ANCHOR_STRIDE: (4, 8, 16, 32, 64) + PRE_NMS_TOP_N_TRAIN: 2000 + PRE_NMS_TOP_N_TEST: 1000 + POST_NMS_TOP_N_TEST: 1000 + FPN_POST_NMS_TOP_N_TEST: 1000 + ROI_HEADS: + USE_FPN: True + ROI_BOX_HEAD: + POOLER_RESOLUTION: 7 + POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) + POOLER_SAMPLING_RATIO: 2 + FEATURE_EXTRACTOR: "FPN2MLPFeatureExtractor" + PREDICTOR: "FPNPredictor" +DATASETS: + TRAIN: ("coco_2014_train", "coco_2014_valminusminival") + TEST: ("coco_2014_minival",) +DATALOADER: + SIZE_DIVISIBILITY: 32 +SOLVER: + # Assume 8 gpus + BASE_LR: 0.02 + WEIGHT_DECAY: 0.0001 + STEPS: (60000, 80000) + MAX_ITER: 90000 + IMS_PER_BATCH: 16 +TEST: + IMS_PER_BATCH: 8 diff --git a/configs/dcn/e2e_mask_rcnn_dconv_R_50_FPN_1x.yaml b/configs/dcn/e2e_mask_rcnn_dconv_R_50_FPN_1x.yaml new file mode 100644 index 000000000..5cffe5c57 --- /dev/null +++ b/configs/dcn/e2e_mask_rcnn_dconv_R_50_FPN_1x.yaml @@ -0,0 +1,54 @@ +INPUT: + MIN_SIZE_TRAIN: (800,) + MAX_SIZE_TRAIN: 1333 + MIN_SIZE_TEST: 800 + MAX_SIZE_TEST: 1333 +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + WEIGHT: "catalog://ImageNetPretrained/MSRA/R-50" + BACKBONE: + CONV_BODY: "R-50-FPN" + RESNETS: + BACKBONE_OUT_CHANNELS: 256 + STAGE_WITH_DCN: (False, True, True, True) + WITH_MODULATED_DCN: False + DEFORMABLE_GROUPS: 1 + RPN: + USE_FPN: True + ANCHOR_STRIDE: (4, 8, 16, 32, 64) + PRE_NMS_TOP_N_TRAIN: 2000 + PRE_NMS_TOP_N_TEST: 1000 + POST_NMS_TOP_N_TEST: 1000 + FPN_POST_NMS_TOP_N_TEST: 1000 + ROI_HEADS: + USE_FPN: True + ROI_BOX_HEAD: + POOLER_RESOLUTION: 7 + POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) + POOLER_SAMPLING_RATIO: 2 + FEATURE_EXTRACTOR: "FPN2MLPFeatureExtractor" + PREDICTOR: "FPNPredictor" + ROI_MASK_HEAD: + POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) + FEATURE_EXTRACTOR: "MaskRCNNFPNFeatureExtractor" + PREDICTOR: "MaskRCNNC4Predictor" + POOLER_RESOLUTION: 14 + POOLER_SAMPLING_RATIO: 2 + RESOLUTION: 28 + SHARE_BOX_FEATURE_EXTRACTOR: False + MASK_ON: True +DATASETS: + TRAIN: ("coco_2014_train", "coco_2014_valminusminival") + TEST: ("coco_2014_minival",) +DATALOADER: + SIZE_DIVISIBILITY: 32 +SOLVER: + # Assume 8 gpus + BASE_LR: 0.02 + WEIGHT_DECAY: 0.0001 + STEPS: (60000, 80000) + MAX_ITER: 90000 + IMS_PER_BATCH: 16 +TEST: + IMS_PER_BATCH: 8 + diff --git a/configs/dcn/e2e_mask_rcnn_mdconv_R_50_FPN_1x.yaml b/configs/dcn/e2e_mask_rcnn_mdconv_R_50_FPN_1x.yaml new file mode 100644 index 000000000..9921adfa5 --- /dev/null +++ b/configs/dcn/e2e_mask_rcnn_mdconv_R_50_FPN_1x.yaml @@ -0,0 +1,53 @@ +INPUT: + MIN_SIZE_TRAIN: (800,) + MAX_SIZE_TRAIN: 1333 + MIN_SIZE_TEST: 800 + MAX_SIZE_TEST: 1333 +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + WEIGHT: "catalog://ImageNetPretrained/MSRA/R-50" + BACKBONE: + CONV_BODY: "R-50-FPN" + RESNETS: + BACKBONE_OUT_CHANNELS: 256 + STAGE_WITH_DCN: (False, True, True, True) + WITH_MODULATED_DCN: True + DEFORMABLE_GROUPS: 1 + RPN: + USE_FPN: True + ANCHOR_STRIDE: (4, 8, 16, 32, 64) + PRE_NMS_TOP_N_TRAIN: 2000 + PRE_NMS_TOP_N_TEST: 1000 + POST_NMS_TOP_N_TEST: 1000 + FPN_POST_NMS_TOP_N_TEST: 1000 + ROI_HEADS: + USE_FPN: True + ROI_BOX_HEAD: + POOLER_RESOLUTION: 7 + POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) + POOLER_SAMPLING_RATIO: 2 + FEATURE_EXTRACTOR: "FPN2MLPFeatureExtractor" + PREDICTOR: "FPNPredictor" + ROI_MASK_HEAD: + POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) + FEATURE_EXTRACTOR: "MaskRCNNFPNFeatureExtractor" + PREDICTOR: "MaskRCNNC4Predictor" + POOLER_RESOLUTION: 14 + POOLER_SAMPLING_RATIO: 2 + RESOLUTION: 28 + SHARE_BOX_FEATURE_EXTRACTOR: False + MASK_ON: True +DATASETS: + TRAIN: ("coco_2014_train", "coco_2014_valminusminival") + TEST: ("coco_2014_minival",) +DATALOADER: + SIZE_DIVISIBILITY: 32 +SOLVER: + # Assume 8 gpus + BASE_LR: 0.02 + WEIGHT_DECAY: 0.0001 + STEPS: (60000, 80000) + MAX_ITER: 90000 + IMS_PER_BATCH: 16 +TEST: + IMS_PER_BATCH: 8 diff --git a/maskrcnn_benchmark/config/defaults.py b/maskrcnn_benchmark/config/defaults.py index fc750fd4f..23a599ef7 100644 --- a/maskrcnn_benchmark/config/defaults.py +++ b/maskrcnn_benchmark/config/defaults.py @@ -274,6 +274,10 @@ _C.MODEL.RESNETS.RES2_OUT_CHANNELS = 256 _C.MODEL.RESNETS.STEM_OUT_CHANNELS = 64 +_C.MODEL.RESNETS.STAGE_WITH_DCN = (False, False, False, False) +_C.MODEL.RESNETS.WITH_MODULATED_DCN = False +_C.MODEL.RESNETS.DEFORMABLE_GROUPS = 1 + # ---------------------------------------------------------------------------- # # RetinaNet Options (Follow the Detectron version) diff --git a/maskrcnn_benchmark/csrc/cuda/deform_conv_cuda.cu b/maskrcnn_benchmark/csrc/cuda/deform_conv_cuda.cu new file mode 100644 index 000000000..74f7d3399 --- /dev/null +++ b/maskrcnn_benchmark/csrc/cuda/deform_conv_cuda.cu @@ -0,0 +1,691 @@ +// modify from +// https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda.c + +#include +#include + +#include +#include + +#include +#include +#include + + +void deformable_im2col(const at::Tensor data_im, const at::Tensor data_offset, + const int channels, const int height, const int width, + const int ksize_h, const int ksize_w, const int pad_h, + const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int parallel_imgs, const int deformable_group, + at::Tensor data_col); + +void deformable_col2im(const at::Tensor data_col, const at::Tensor data_offset, + const int channels, const int height, const int width, + const int ksize_h, const int ksize_w, const int pad_h, + const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int parallel_imgs, const int deformable_group, + at::Tensor grad_im); + +void deformable_col2im_coord( + const at::Tensor data_col, const at::Tensor data_im, + const at::Tensor data_offset, const int channels, const int height, + const int width, const int ksize_h, const int ksize_w, const int pad_h, + const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, const int parallel_imgs, + const int deformable_group, at::Tensor grad_offset); + +void modulated_deformable_im2col_cuda( + const at::Tensor data_im, const at::Tensor data_offset, + const at::Tensor data_mask, const int batch_size, const int channels, + const int height_im, const int width_im, const int height_col, + const int width_col, const int kernel_h, const int kenerl_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, const int deformable_group, + at::Tensor data_col); + +void modulated_deformable_col2im_cuda( + const at::Tensor data_col, const at::Tensor data_offset, + const at::Tensor data_mask, const int batch_size, const int channels, + const int height_im, const int width_im, const int height_col, + const int width_col, const int kernel_h, const int kenerl_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, const int deformable_group, + at::Tensor grad_im); + +void modulated_deformable_col2im_coord_cuda( + const at::Tensor data_col, const at::Tensor data_im, + const at::Tensor data_offset, const at::Tensor data_mask, + const int batch_size, const int channels, const int height_im, + const int width_im, const int height_col, const int width_col, + const int kernel_h, const int kenerl_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, const int dilation_h, + const int dilation_w, const int deformable_group, at::Tensor grad_offset, + at::Tensor grad_mask); + +void shape_check(at::Tensor input, at::Tensor offset, at::Tensor *gradOutput, + at::Tensor weight, int kH, int kW, int dH, int dW, int padH, + int padW, int dilationH, int dilationW, int group, + int deformable_group) +{ + AT_CHECK(weight.ndimension() == 4, + "4D weight tensor (nOutputPlane,nInputPlane,kH,kW) expected, " + "but got: %s", + weight.ndimension()); + + AT_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); + + AT_CHECK(kW > 0 && kH > 0, + "kernel size should be greater than zero, but got kH: %d kW: %d", kH, + kW); + + AT_CHECK((weight.size(2) == kH && weight.size(3) == kW), + "kernel size should be consistent with weight, ", + "but got kH: %d kW: %d weight.size(2): %d, weight.size(3): %d", kH, + kW, weight.size(2), weight.size(3)); + + AT_CHECK(dW > 0 && dH > 0, + "stride should be greater than zero, but got dH: %d dW: %d", dH, dW); + + AT_CHECK( + dilationW > 0 && dilationH > 0, + "dilation should be greater than 0, but got dilationH: %d dilationW: %d", + dilationH, dilationW); + + int ndim = input.ndimension(); + int dimf = 0; + int dimh = 1; + int dimw = 2; + + if (ndim == 4) { + dimf++; + dimh++; + dimw++; + } + + AT_CHECK(ndim == 3 || ndim == 4, "3D or 4D input tensor expected but got: %s", + ndim); + + long nInputPlane = weight.size(1) * group; + long inputHeight = input.size(dimh); + long inputWidth = input.size(dimw); + long nOutputPlane = weight.size(0); + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + + AT_CHECK(nInputPlane % deformable_group == 0, + "input channels must divide deformable group size"); + + if (outputWidth < 1 || outputHeight < 1) + AT_ERROR( + "Given input size: (%ld x %ld x %ld). " + "Calculated output size: (%ld x %ld x %ld). Output size is too small", + nInputPlane, inputHeight, inputWidth, nOutputPlane, outputHeight, + outputWidth); + + AT_CHECK(input.size(1) == nInputPlane, + "invalid number of input planes, expected: %d, but got: %d", + nInputPlane, input.size(1)); + + AT_CHECK((inputHeight >= kH && inputWidth >= kW), + "input image is smaller than kernel"); + + AT_CHECK((offset.size(2) == outputHeight && offset.size(3) == outputWidth), + "invalid spatial size of offset, expected height: %d width: %d, but " + "got height: %d width: %d", + outputHeight, outputWidth, offset.size(2), offset.size(3)); + + AT_CHECK((offset.size(1) == deformable_group * 2 * kH * kW), + "invalid number of channels of offset"); + + if (gradOutput != NULL) { + AT_CHECK(gradOutput->size(dimf) == nOutputPlane, + "invalid number of gradOutput planes, expected: %d, but got: %d", + nOutputPlane, gradOutput->size(dimf)); + + AT_CHECK((gradOutput->size(dimh) == outputHeight && + gradOutput->size(dimw) == outputWidth), + "invalid size of gradOutput, expected height: %d width: %d , but " + "got height: %d width: %d", + outputHeight, outputWidth, gradOutput->size(dimh), + gradOutput->size(dimw)); + } +} + +int deform_conv_forward_cuda(at::Tensor input, at::Tensor weight, + at::Tensor offset, at::Tensor output, + at::Tensor columns, at::Tensor ones, int kW, + int kH, int dW, int dH, int padW, int padH, + int dilationW, int dilationH, int group, + int deformable_group, int im2col_step) +{ + // todo: resize columns to include im2col: done + // todo: add im2col_step as input + // todo: add new output buffer and transpose it to output (or directly + // transpose output) todo: possibly change data indexing because of + // parallel_imgs + + shape_check(input, offset, NULL, weight, kH, kW, dH, dW, padH, padW, + dilationH, dilationW, group, deformable_group); + + input = input.contiguous(); + offset = offset.contiguous(); + weight = weight.contiguous(); + + int batch = 1; + if (input.ndimension() == 3) { + // Force batch + batch = 0; + input.unsqueeze_(0); + offset.unsqueeze_(0); + } + + // todo: assert batchsize dividable by im2col_step + + long batchSize = input.size(0); + long nInputPlane = input.size(1); + long inputHeight = input.size(2); + long inputWidth = input.size(3); + + long nOutputPlane = weight.size(0); + + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + + AT_CHECK((offset.size(0) == batchSize), "invalid batch size of offset"); + + output = output.view({batchSize / im2col_step, im2col_step, nOutputPlane, + outputHeight, outputWidth}); + columns = at::zeros( + {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, + input.options()); + + if (ones.ndimension() != 2 || + ones.size(0) * ones.size(1) < outputHeight * outputWidth) { + ones = at::ones({outputHeight, outputWidth}, input.options()); + } + + input = input.view({batchSize / im2col_step, im2col_step, nInputPlane, + inputHeight, inputWidth}); + offset = + offset.view({batchSize / im2col_step, im2col_step, + deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + at::Tensor output_buffer = + at::zeros({batchSize / im2col_step, nOutputPlane, + im2col_step * outputHeight, outputWidth}, + output.options()); + + output_buffer = output_buffer.view( + {output_buffer.size(0), group, output_buffer.size(1) / group, + output_buffer.size(2), output_buffer.size(3)}); + + for (int elt = 0; elt < batchSize / im2col_step; elt++) { + deformable_im2col(input[elt], offset[elt], nInputPlane, inputHeight, + inputWidth, kH, kW, padH, padW, dH, dW, dilationH, + dilationW, im2col_step, deformable_group, columns); + + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + weight = weight.view({group, weight.size(0) / group, weight.size(1), + weight.size(2), weight.size(3)}); + + for (int g = 0; g < group; g++) { + output_buffer[elt][g] = output_buffer[elt][g] + .flatten(1) + .addmm_(weight[g].flatten(1), columns[g]) + .view_as(output_buffer[elt][g]); + } + } + + output_buffer = output_buffer.view( + {output_buffer.size(0), output_buffer.size(1) * output_buffer.size(2), + output_buffer.size(3), output_buffer.size(4)}); + + output_buffer = output_buffer.view({batchSize / im2col_step, nOutputPlane, + im2col_step, outputHeight, outputWidth}); + output_buffer.transpose_(1, 2); + output.copy_(output_buffer); + output = output.view({batchSize, nOutputPlane, outputHeight, outputWidth}); + + input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); + offset = offset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + if (batch == 0) { + output = output.view({nOutputPlane, outputHeight, outputWidth}); + input = input.view({nInputPlane, inputHeight, inputWidth}); + offset = offset.view({offset.size(1), offset.size(2), offset.size(3)}); + } + + return 1; +} + +int deform_conv_backward_input_cuda(at::Tensor input, at::Tensor offset, + at::Tensor gradOutput, at::Tensor gradInput, + at::Tensor gradOffset, at::Tensor weight, + at::Tensor columns, int kW, int kH, int dW, + int dH, int padW, int padH, int dilationW, + int dilationH, int group, + int deformable_group, int im2col_step) +{ + shape_check(input, offset, &gradOutput, weight, kH, kW, dH, dW, padH, padW, + dilationH, dilationW, group, deformable_group); + + input = input.contiguous(); + offset = offset.contiguous(); + gradOutput = gradOutput.contiguous(); + weight = weight.contiguous(); + + int batch = 1; + + if (input.ndimension() == 3) { + // Force batch + batch = 0; + input = input.view({1, input.size(0), input.size(1), input.size(2)}); + offset = offset.view({1, offset.size(0), offset.size(1), offset.size(2)}); + gradOutput = gradOutput.view( + {1, gradOutput.size(0), gradOutput.size(1), gradOutput.size(2)}); + } + + long batchSize = input.size(0); + long nInputPlane = input.size(1); + long inputHeight = input.size(2); + long inputWidth = input.size(3); + + long nOutputPlane = weight.size(0); + + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + + AT_CHECK((offset.size(0) == batchSize), 3, "invalid batch size of offset"); + gradInput = gradInput.view({batchSize, nInputPlane, inputHeight, inputWidth}); + columns = at::zeros( + {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, + input.options()); + + // change order of grad output + gradOutput = gradOutput.view({batchSize / im2col_step, im2col_step, + nOutputPlane, outputHeight, outputWidth}); + gradOutput.transpose_(1, 2); + + gradInput = gradInput.view({batchSize / im2col_step, im2col_step, nInputPlane, + inputHeight, inputWidth}); + input = input.view({batchSize / im2col_step, im2col_step, nInputPlane, + inputHeight, inputWidth}); + gradOffset = gradOffset.view({batchSize / im2col_step, im2col_step, + deformable_group * 2 * kH * kW, outputHeight, + outputWidth}); + offset = + offset.view({batchSize / im2col_step, im2col_step, + deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + for (int elt = 0; elt < batchSize / im2col_step; elt++) { + // divide into groups + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + weight = weight.view({group, weight.size(0) / group, weight.size(1), + weight.size(2), weight.size(3)}); + gradOutput = gradOutput.view( + {gradOutput.size(0), group, gradOutput.size(1) / group, + gradOutput.size(2), gradOutput.size(3), gradOutput.size(4)}); + + for (int g = 0; g < group; g++) { + columns[g] = columns[g].addmm_(weight[g].flatten(1).transpose(0, 1), + gradOutput[elt][g].flatten(1), 0.0f, 1.0f); + } + + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + gradOutput = gradOutput.view( + {gradOutput.size(0), gradOutput.size(1) * gradOutput.size(2), + gradOutput.size(3), gradOutput.size(4), gradOutput.size(5)}); + + deformable_col2im_coord(columns, input[elt], offset[elt], nInputPlane, + inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, + dilationH, dilationW, im2col_step, deformable_group, + gradOffset[elt]); + + deformable_col2im(columns, offset[elt], nInputPlane, inputHeight, + inputWidth, kH, kW, padH, padW, dH, dW, dilationH, + dilationW, im2col_step, deformable_group, gradInput[elt]); + } + + gradOutput.transpose_(1, 2); + gradOutput = + gradOutput.view({batchSize, nOutputPlane, outputHeight, outputWidth}); + + gradInput = gradInput.view({batchSize, nInputPlane, inputHeight, inputWidth}); + input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); + gradOffset = gradOffset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + offset = offset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + if (batch == 0) { + gradOutput = gradOutput.view({nOutputPlane, outputHeight, outputWidth}); + input = input.view({nInputPlane, inputHeight, inputWidth}); + gradInput = gradInput.view({nInputPlane, inputHeight, inputWidth}); + offset = offset.view({offset.size(1), offset.size(2), offset.size(3)}); + gradOffset = + gradOffset.view({offset.size(1), offset.size(2), offset.size(3)}); + } + + return 1; +} + +int deform_conv_backward_parameters_cuda( + at::Tensor input, at::Tensor offset, at::Tensor gradOutput, + at::Tensor gradWeight, // at::Tensor gradBias, + at::Tensor columns, at::Tensor ones, int kW, int kH, int dW, int dH, + int padW, int padH, int dilationW, int dilationH, int group, + int deformable_group, float scale, int im2col_step) +{ + // todo: transpose and reshape outGrad + // todo: reshape columns + // todo: add im2col_step as input + + shape_check(input, offset, &gradOutput, gradWeight, kH, kW, dH, dW, padH, + padW, dilationH, dilationW, group, deformable_group); + + input = input.contiguous(); + offset = offset.contiguous(); + gradOutput = gradOutput.contiguous(); + + int batch = 1; + + if (input.ndimension() == 3) { + // Force batch + batch = 0; + input = input.view( + at::IntList({1, input.size(0), input.size(1), input.size(2)})); + gradOutput = gradOutput.view( + {1, gradOutput.size(0), gradOutput.size(1), gradOutput.size(2)}); + } + + long batchSize = input.size(0); + long nInputPlane = input.size(1); + long inputHeight = input.size(2); + long inputWidth = input.size(3); + + long nOutputPlane = gradWeight.size(0); + + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + + AT_CHECK((offset.size(0) == batchSize), "invalid batch size of offset"); + + columns = at::zeros( + {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, + input.options()); + + gradOutput = gradOutput.view({batchSize / im2col_step, im2col_step, + nOutputPlane, outputHeight, outputWidth}); + gradOutput.transpose_(1, 2); + + at::Tensor gradOutputBuffer = at::zeros_like(gradOutput); + gradOutputBuffer = + gradOutputBuffer.view({batchSize / im2col_step, nOutputPlane, im2col_step, + outputHeight, outputWidth}); + gradOutputBuffer.copy_(gradOutput); + gradOutputBuffer = + gradOutputBuffer.view({batchSize / im2col_step, nOutputPlane, + im2col_step * outputHeight, outputWidth}); + + gradOutput.transpose_(1, 2); + gradOutput = + gradOutput.view({batchSize, nOutputPlane, outputHeight, outputWidth}); + + input = input.view({batchSize / im2col_step, im2col_step, nInputPlane, + inputHeight, inputWidth}); + offset = + offset.view({batchSize / im2col_step, im2col_step, + deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + for (int elt = 0; elt < batchSize / im2col_step; elt++) { + deformable_im2col(input[elt], offset[elt], nInputPlane, inputHeight, + inputWidth, kH, kW, padH, padW, dH, dW, dilationH, + dilationW, im2col_step, deformable_group, columns); + + // divide into group + gradOutputBuffer = gradOutputBuffer.view( + {gradOutputBuffer.size(0), group, gradOutputBuffer.size(1) / group, + gradOutputBuffer.size(2), gradOutputBuffer.size(3)}); + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + gradWeight = + gradWeight.view({group, gradWeight.size(0) / group, gradWeight.size(1), + gradWeight.size(2), gradWeight.size(3)}); + + for (int g = 0; g < group; g++) { + gradWeight[g] = gradWeight[g] + .flatten(1) + .addmm_(gradOutputBuffer[elt][g].flatten(1), + columns[g].transpose(1, 0), 1.0, scale) + .view_as(gradWeight[g]); + } + gradOutputBuffer = gradOutputBuffer.view( + {gradOutputBuffer.size(0), + gradOutputBuffer.size(1) * gradOutputBuffer.size(2), + gradOutputBuffer.size(3), gradOutputBuffer.size(4)}); + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + gradWeight = gradWeight.view({gradWeight.size(0) * gradWeight.size(1), + gradWeight.size(2), gradWeight.size(3), + gradWeight.size(4)}); + } + + input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); + offset = offset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + if (batch == 0) { + gradOutput = gradOutput.view({nOutputPlane, outputHeight, outputWidth}); + input = input.view({nInputPlane, inputHeight, inputWidth}); + } + + return 1; +} + +void modulated_deform_conv_cuda_forward( + at::Tensor input, at::Tensor weight, at::Tensor bias, at::Tensor ones, + at::Tensor offset, at::Tensor mask, at::Tensor output, at::Tensor columns, + int kernel_h, int kernel_w, const int stride_h, const int stride_w, + const int pad_h, const int pad_w, const int dilation_h, + const int dilation_w, const int group, const int deformable_group, + const bool with_bias) +{ + AT_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); + AT_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); + + const int batch = input.size(0); + const int channels = input.size(1); + const int height = input.size(2); + const int width = input.size(3); + + const int channels_out = weight.size(0); + const int channels_kernel = weight.size(1); + const int kernel_h_ = weight.size(2); + const int kernel_w_ = weight.size(3); + + if (kernel_h_ != kernel_h || kernel_w_ != kernel_w) + AT_ERROR("Input shape and kernel shape wont match: (%d x %d vs %d x %d).", + kernel_h_, kernel_w, kernel_h_, kernel_w_); + if (channels != channels_kernel * group) + AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).", + channels, channels_kernel * group); + + const int height_out = + (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; + const int width_out = + (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; + + if (ones.ndimension() != 2 || + ones.size(0) * ones.size(1) < height_out * width_out) { + // Resize plane and fill with ones... + ones = at::ones({height_out, width_out}, input.options()); + } + + // resize output + output = output.view({batch, channels_out, height_out, width_out}).zero_(); + // resize temporary columns + columns = + at::zeros({channels * kernel_h * kernel_w, 1 * height_out * width_out}, + input.options()); + + output = output.view({output.size(0), group, output.size(1) / group, + output.size(2), output.size(3)}); + + for (int b = 0; b < batch; b++) { + modulated_deformable_im2col_cuda( + input[b], offset[b], mask[b], 1, channels, height, width, height_out, + width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, deformable_group, columns); + + // divide into group + weight = weight.view({group, weight.size(0) / group, weight.size(1), + weight.size(2), weight.size(3)}); + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + + for (int g = 0; g < group; g++) { + output[b][g] = output[b][g] + .flatten(1) + .addmm_(weight[g].flatten(1), columns[g]) + .view_as(output[b][g]); + } + + weight = weight.view({weight.size(0) * weight.size(1), weight.size(2), + weight.size(3), weight.size(4)}); + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + } + + output = output.view({output.size(0), output.size(1) * output.size(2), + output.size(3), output.size(4)}); + + if (with_bias) { + output += bias.view({1, bias.size(0), 1, 1}); + } +} + +void modulated_deform_conv_cuda_backward( + at::Tensor input, at::Tensor weight, at::Tensor bias, at::Tensor ones, + at::Tensor offset, at::Tensor mask, at::Tensor columns, + at::Tensor grad_input, at::Tensor grad_weight, at::Tensor grad_bias, + at::Tensor grad_offset, at::Tensor grad_mask, at::Tensor grad_output, + int kernel_h, int kernel_w, int stride_h, int stride_w, int pad_h, + int pad_w, int dilation_h, int dilation_w, int group, int deformable_group, + const bool with_bias) +{ + AT_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); + AT_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); + + const int batch = input.size(0); + const int channels = input.size(1); + const int height = input.size(2); + const int width = input.size(3); + + const int channels_kernel = weight.size(1); + const int kernel_h_ = weight.size(2); + const int kernel_w_ = weight.size(3); + if (kernel_h_ != kernel_h || kernel_w_ != kernel_w) + AT_ERROR("Input shape and kernel shape wont match: (%d x %d vs %d x %d).", + kernel_h_, kernel_w, kernel_h_, kernel_w_); + if (channels != channels_kernel * group) + AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).", + channels, channels_kernel * group); + + const int height_out = + (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; + const int width_out = + (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; + + if (ones.ndimension() != 2 || + ones.size(0) * ones.size(1) < height_out * width_out) { + // Resize plane and fill with ones... + ones = at::ones({height_out, width_out}, input.options()); + } + + grad_input = grad_input.view({batch, channels, height, width}); + columns = at::zeros({channels * kernel_h * kernel_w, height_out * width_out}, + input.options()); + + grad_output = + grad_output.view({grad_output.size(0), group, grad_output.size(1) / group, + grad_output.size(2), grad_output.size(3)}); + + for (int b = 0; b < batch; b++) { + // divide int group + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + weight = weight.view({group, weight.size(0) / group, weight.size(1), + weight.size(2), weight.size(3)}); + + for (int g = 0; g < group; g++) { + columns[g].addmm_(weight[g].flatten(1).transpose(0, 1), + grad_output[b][g].flatten(1), 0.0f, 1.0f); + } + + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + weight = weight.view({weight.size(0) * weight.size(1), weight.size(2), + weight.size(3), weight.size(4)}); + + // gradient w.r.t. input coordinate data + modulated_deformable_col2im_coord_cuda( + columns, input[b], offset[b], mask[b], 1, channels, height, width, + height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, + stride_w, dilation_h, dilation_w, deformable_group, grad_offset[b], + grad_mask[b]); + // gradient w.r.t. input data + modulated_deformable_col2im_cuda( + columns, offset[b], mask[b], 1, channels, height, width, height_out, + width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, deformable_group, grad_input[b]); + + // gradient w.r.t. weight, dWeight should accumulate across the batch and + // group + modulated_deformable_im2col_cuda( + input[b], offset[b], mask[b], 1, channels, height, width, height_out, + width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, deformable_group, columns); + + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + grad_weight = grad_weight.view({group, grad_weight.size(0) / group, + grad_weight.size(1), grad_weight.size(2), + grad_weight.size(3)}); + if (with_bias) + grad_bias = grad_bias.view({group, grad_bias.size(0) / group}); + + for (int g = 0; g < group; g++) { + grad_weight[g] = + grad_weight[g] + .flatten(1) + .addmm_(grad_output[b][g].flatten(1), columns[g].transpose(0, 1)) + .view_as(grad_weight[g]); + if (with_bias) { + grad_bias[g] = + grad_bias[g] + .view({-1, 1}) + .addmm_(grad_output[b][g].flatten(1), ones.view({-1, 1})) + .view(-1); + } + } + + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + grad_weight = grad_weight.view({grad_weight.size(0) * grad_weight.size(1), + grad_weight.size(2), grad_weight.size(3), + grad_weight.size(4)}); + if (with_bias) + grad_bias = grad_bias.view({grad_bias.size(0) * grad_bias.size(1)}); + } + grad_output = grad_output.view({grad_output.size(0) * grad_output.size(1), + grad_output.size(2), grad_output.size(3), + grad_output.size(4)}); +} diff --git a/maskrcnn_benchmark/csrc/cuda/deform_conv_kernel_cuda.cu b/maskrcnn_benchmark/csrc/cuda/deform_conv_kernel_cuda.cu new file mode 100644 index 000000000..b4f8813b4 --- /dev/null +++ b/maskrcnn_benchmark/csrc/cuda/deform_conv_kernel_cuda.cu @@ -0,0 +1,874 @@ +/*! + ******************* BEGIN Caffe Copyright Notice and Disclaimer **************** + * + * COPYRIGHT + * + * All contributions by the University of California: + * Copyright (c) 2014-2017 The Regents of the University of California (Regents) + * All rights reserved. + * + * All other contributions: + * Copyright (c) 2014-2017, the respective contributors + * All rights reserved. + * + * Caffe uses a shared copyright model: each contributor holds copyright over + * their contributions to Caffe. The project versioning records all such + * contribution and copyright details. If a contributor wants to further mark + * their specific copyright on a particular contribution, they should indicate + * their copyright solely in the commit message of the change when it is + * committed. + * + * LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * CONTRIBUTION AGREEMENT + * + * By contributing to the BVLC/caffe repository through pull-request, comment, + * or otherwise, the contributor releases their content to the + * license and copyright terms herein. + * + ***************** END Caffe Copyright Notice and Disclaimer ******************** + * + * Copyright (c) 2018 Microsoft + * Licensed under The MIT License [see LICENSE for details] + * \file modulated_deformable_im2col.cuh + * \brief Function definitions of converting an image to + * column matrix based on kernel, padding, dilation, and offset. + * These functions are mainly used in deformable convolution operators. + * \ref: https://arxiv.org/abs/1703.06211 + * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng + */ + +// modify from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu + + +#include +#include +#include +#include +#include + +using namespace at; + +#define CUDA_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ + i += blockDim.x * gridDim.x) + +const int CUDA_NUM_THREADS = 1024; +const int kMaxGridNum = 65535; +inline int GET_BLOCKS(const int N) +{ + return std::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS); +} + +/* +const int CUDA_NUM_THREADS = 1024; + +inline int GET_BLOCKS(const int N) +{ + return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; +}*/ + +template +__device__ scalar_t deformable_im2col_bilinear(const scalar_t *bottom_data, const int data_width, + const int height, const int width, scalar_t h, scalar_t w) +{ + + int h_low = floor(h); + int w_low = floor(w); + int h_high = h_low + 1; + int w_high = w_low + 1; + + scalar_t lh = h - h_low; + scalar_t lw = w - w_low; + scalar_t hh = 1 - lh, hw = 1 - lw; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + v1 = bottom_data[h_low * data_width + w_low]; + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + v2 = bottom_data[h_low * data_width + w_high]; + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + v3 = bottom_data[h_high * data_width + w_low]; + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + v4 = bottom_data[h_high * data_width + w_high]; + + scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + +template +__device__ scalar_t get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w, + const int h, const int w, const int height, const int width) +{ + + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) + { + //empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + if (h == argmax_h_low && w == argmax_w_low) + weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); + if (h == argmax_h_low && w == argmax_w_high) + weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); + if (h == argmax_h_high && w == argmax_w_low) + weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); + if (h == argmax_h_high && w == argmax_w_high) + weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); + return weight; +} + +template +__device__ scalar_t get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w, + const int height, const int width, const scalar_t *im_data, + const int data_width, const int bp_dir) +{ + + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) + { + //empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + + if (bp_dir == 0) + { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; + } + else if (bp_dir == 1) + { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; + } + + return weight; +} + +template +__global__ void deformable_im2col_gpu_kernel(const int n, const scalar_t *data_im, const scalar_t *data_offset, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, const int channel_per_deformable_group, + const int batch_size, const int num_channels, const int deformable_group, + const int height_col, const int width_col, + scalar_t *data_col) +{ + CUDA_KERNEL_LOOP(index, n) + { + // index index of output matrix + const int w_col = index % width_col; + const int h_col = (index / width_col) % height_col; + const int b_col = (index / width_col / height_col) % batch_size; + const int c_im = (index / width_col / height_col) / batch_size; + const int c_col = c_im * kernel_h * kernel_w; + + // compute deformable group index + const int deformable_group_index = c_im / channel_per_deformable_group; + + const int h_in = h_col * stride_h - pad_h; + const int w_in = w_col * stride_w - pad_w; + scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; + //const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; + const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; + const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; + + for (int i = 0; i < kernel_h; ++i) + { + for (int j = 0; j < kernel_w; ++j) + { + const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; + const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + scalar_t val = static_cast(0); + const scalar_t h_im = h_in + i * dilation_h + offset_h; + const scalar_t w_im = w_in + j * dilation_w + offset_w; + if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) + { + //const scalar_t map_h = i * dilation_h + offset_h; + //const scalar_t map_w = j * dilation_w + offset_w; + //const int cur_height = height - h_in; + //const int cur_width = width - w_in; + //val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); + val = deformable_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); + } + *data_col_ptr = val; + data_col_ptr += batch_size * height_col * width_col; + } + } + } +} + +void deformable_im2col( + const at::Tensor data_im, const at::Tensor data_offset, const int channels, + const int height, const int width, const int ksize_h, const int ksize_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, const int parallel_imgs, + const int deformable_group, at::Tensor data_col) +{ + // num_axes should be smaller than block size + // todo: check parallel_imgs is correctly passed in + int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; + int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; + int num_kernels = channels * height_col * width_col * parallel_imgs; + int channel_per_deformable_group = channels / deformable_group; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_im.type(), "deformable_im2col_gpu", ([&] { + const scalar_t *data_im_ = data_im.data(); + const scalar_t *data_offset_ = data_offset.data(); + scalar_t *data_col_ = data_col.data(); + + deformable_im2col_gpu_kernel<<>>( + num_kernels, data_im_, data_offset_, height, width, ksize_h, ksize_w, + pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, + channel_per_deformable_group, parallel_imgs, channels, deformable_group, + height_col, width_col, data_col_); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in deformable_im2col: %s\n", cudaGetErrorString(err)); + } +} + +template +__global__ void deformable_col2im_gpu_kernel( + const int n, const scalar_t *data_col, const scalar_t *data_offset, + const int channels, const int height, const int width, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, const int deformable_group, + const int height_col, const int width_col, + scalar_t *grad_im) +{ + CUDA_KERNEL_LOOP(index, n) + { + const int j = (index / width_col / height_col / batch_size) % kernel_w; + const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; + const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; + // compute the start and end of the output + + const int deformable_group_index = c / channel_per_deformable_group; + + int w_out = index % width_col; + int h_out = (index / width_col) % height_col; + int b = (index / width_col / height_col) % batch_size; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + + const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * + 2 * kernel_h * kernel_w * height_col * width_col; + const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; + const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; + const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; + + const scalar_t cur_top_grad = data_col[index]; + const int cur_h = (int)cur_inv_h_data; + const int cur_w = (int)cur_inv_w_data; + for (int dy = -2; dy <= 2; dy++) + { + for (int dx = -2; dx <= 2; dx++) + { + if (cur_h + dy >= 0 && cur_h + dy < height && + cur_w + dx >= 0 && cur_w + dx < width && + abs(cur_inv_h_data - (cur_h + dy)) < 1 && + abs(cur_inv_w_data - (cur_w + dx)) < 1) + { + int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; + scalar_t weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); + atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); + } + } + } + } +} + +void deformable_col2im( + const at::Tensor data_col, const at::Tensor data_offset, const int channels, + const int height, const int width, const int ksize_h, + const int ksize_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int parallel_imgs, const int deformable_group, + at::Tensor grad_im) +{ + + // todo: make sure parallel_imgs is passed in correctly + int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; + int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; + int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs; + int channel_per_deformable_group = channels / deformable_group; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.type(), "deformable_col2im_gpu", ([&] { + const scalar_t *data_col_ = data_col.data(); + const scalar_t *data_offset_ = data_offset.data(); + scalar_t *grad_im_ = grad_im.data(); + + deformable_col2im_gpu_kernel<<>>( + num_kernels, data_col_, data_offset_, channels, height, width, ksize_h, + ksize_w, pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, channel_per_deformable_group, + parallel_imgs, deformable_group, height_col, width_col, grad_im_); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in deformable_col2im: %s\n", cudaGetErrorString(err)); + } +} + +template +__global__ void deformable_col2im_coord_gpu_kernel(const int n, const scalar_t *data_col, + const scalar_t *data_im, const scalar_t *data_offset, + const int channels, const int height, const int width, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, const int offset_channels, const int deformable_group, + const int height_col, const int width_col, scalar_t *grad_offset) +{ + CUDA_KERNEL_LOOP(index, n) + { + scalar_t val = 0; + int w = index % width_col; + int h = (index / width_col) % height_col; + int c = (index / width_col / height_col) % offset_channels; + int b = (index / width_col / height_col) / offset_channels; + // compute the start and end of the output + + const int deformable_group_index = c / (2 * kernel_h * kernel_w); + const int col_step = kernel_h * kernel_w; + int cnt = 0; + const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * + batch_size * width_col * height_col; + const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * + channel_per_deformable_group / kernel_h / kernel_w * height * width; + const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * + kernel_h * kernel_w * height_col * width_col; + + const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; + + for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) + { + const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; + const int bp_dir = offset_c % 2; + + int j = (col_pos / width_col / height_col / batch_size) % kernel_w; + int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; + int w_out = col_pos % width_col; + int h_out = (col_pos / width_col) % height_col; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); + const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + scalar_t inv_h = h_in + i * dilation_h + offset_h; + scalar_t inv_w = w_in + j * dilation_w + offset_w; + if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) + { + inv_h = inv_w = -2; + } + const scalar_t weight = get_coordinate_weight( + inv_h, inv_w, + height, width, data_im_ptr + cnt * height * width, width, bp_dir); + val += weight * data_col_ptr[col_pos]; + cnt += 1; + } + + grad_offset[index] = val; + } +} + +void deformable_col2im_coord( + const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, + const int channels, const int height, const int width, const int ksize_h, + const int ksize_w, const int pad_h, const int pad_w, const int stride_h, + const int stride_w, const int dilation_h, const int dilation_w, + const int parallel_imgs, const int deformable_group, at::Tensor grad_offset) +{ + + int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; + int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; + int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs; + int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.type(), "deformable_col2im_coord_gpu", ([&] { + const scalar_t *data_col_ = data_col.data(); + const scalar_t *data_im_ = data_im.data(); + const scalar_t *data_offset_ = data_offset.data(); + scalar_t *grad_offset_ = grad_offset.data(); + + deformable_col2im_coord_gpu_kernel<<>>( + num_kernels, data_col_, data_im_, data_offset_, channels, height, width, + ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, channel_per_deformable_group, + parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group, + height_col, width_col, grad_offset_); + })); +} + +template +__device__ scalar_t dmcn_im2col_bilinear(const scalar_t *bottom_data, const int data_width, + const int height, const int width, scalar_t h, scalar_t w) +{ + int h_low = floor(h); + int w_low = floor(w); + int h_high = h_low + 1; + int w_high = w_low + 1; + + scalar_t lh = h - h_low; + scalar_t lw = w - w_low; + scalar_t hh = 1 - lh, hw = 1 - lw; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + v1 = bottom_data[h_low * data_width + w_low]; + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + v2 = bottom_data[h_low * data_width + w_high]; + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + v3 = bottom_data[h_high * data_width + w_low]; + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + v4 = bottom_data[h_high * data_width + w_high]; + + scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + +template +__device__ scalar_t dmcn_get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w, + const int h, const int w, const int height, const int width) +{ + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) + { + //empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + if (h == argmax_h_low && w == argmax_w_low) + weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); + if (h == argmax_h_low && w == argmax_w_high) + weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); + if (h == argmax_h_high && w == argmax_w_low) + weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); + if (h == argmax_h_high && w == argmax_w_high) + weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); + return weight; +} + +template +__device__ scalar_t dmcn_get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w, + const int height, const int width, const scalar_t *im_data, + const int data_width, const int bp_dir) +{ + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) + { + //empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + + if (bp_dir == 0) + { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; + } + else if (bp_dir == 1) + { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; + } + + return weight; +} + +template +__global__ void modulated_deformable_im2col_gpu_kernel(const int n, + const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, const int num_channels, const int deformable_group, + const int height_col, const int width_col, + scalar_t *data_col) +{ + CUDA_KERNEL_LOOP(index, n) + { + // index index of output matrix + const int w_col = index % width_col; + const int h_col = (index / width_col) % height_col; + const int b_col = (index / width_col / height_col) % batch_size; + const int c_im = (index / width_col / height_col) / batch_size; + const int c_col = c_im * kernel_h * kernel_w; + + // compute deformable group index + const int deformable_group_index = c_im / channel_per_deformable_group; + + const int h_in = h_col * stride_h - pad_h; + const int w_in = w_col * stride_w - pad_w; + + scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; + //const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; + const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; + const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; + + const scalar_t *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; + + for (int i = 0; i < kernel_h; ++i) + { + for (int j = 0; j < kernel_w; ++j) + { + const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; + const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; + const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; + scalar_t val = static_cast(0); + const scalar_t h_im = h_in + i * dilation_h + offset_h; + const scalar_t w_im = w_in + j * dilation_w + offset_w; + //if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { + if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) + { + //const float map_h = i * dilation_h + offset_h; + //const float map_w = j * dilation_w + offset_w; + //const int cur_height = height - h_in; + //const int cur_width = width - w_in; + //val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); + val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); + } + *data_col_ptr = val * mask; + data_col_ptr += batch_size * height_col * width_col; + //data_col_ptr += height_col * width_col; + } + } + } +} + +template +__global__ void modulated_deformable_col2im_gpu_kernel(const int n, + const scalar_t *data_col, const scalar_t *data_offset, const scalar_t *data_mask, + const int channels, const int height, const int width, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, const int deformable_group, + const int height_col, const int width_col, + scalar_t *grad_im) +{ + CUDA_KERNEL_LOOP(index, n) + { + const int j = (index / width_col / height_col / batch_size) % kernel_w; + const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; + const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; + // compute the start and end of the output + + const int deformable_group_index = c / channel_per_deformable_group; + + int w_out = index % width_col; + int h_out = (index / width_col) % height_col; + int b = (index / width_col / height_col) % batch_size; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + + const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; + const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; + const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; + const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; + const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; + const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; + const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; + + const scalar_t cur_top_grad = data_col[index] * mask; + const int cur_h = (int)cur_inv_h_data; + const int cur_w = (int)cur_inv_w_data; + for (int dy = -2; dy <= 2; dy++) + { + for (int dx = -2; dx <= 2; dx++) + { + if (cur_h + dy >= 0 && cur_h + dy < height && + cur_w + dx >= 0 && cur_w + dx < width && + abs(cur_inv_h_data - (cur_h + dy)) < 1 && + abs(cur_inv_w_data - (cur_w + dx)) < 1) + { + int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; + scalar_t weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); + atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); + } + } + } + } +} + +template +__global__ void modulated_deformable_col2im_coord_gpu_kernel(const int n, + const scalar_t *data_col, const scalar_t *data_im, + const scalar_t *data_offset, const scalar_t *data_mask, + const int channels, const int height, const int width, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, const int offset_channels, const int deformable_group, + const int height_col, const int width_col, + scalar_t *grad_offset, scalar_t *grad_mask) +{ + CUDA_KERNEL_LOOP(index, n) + { + scalar_t val = 0, mval = 0; + int w = index % width_col; + int h = (index / width_col) % height_col; + int c = (index / width_col / height_col) % offset_channels; + int b = (index / width_col / height_col) / offset_channels; + // compute the start and end of the output + + const int deformable_group_index = c / (2 * kernel_h * kernel_w); + const int col_step = kernel_h * kernel_w; + int cnt = 0; + const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; + const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; + const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; + const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; + + const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; + + for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) + { + const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; + const int bp_dir = offset_c % 2; + + int j = (col_pos / width_col / height_col / batch_size) % kernel_w; + int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; + int w_out = col_pos % width_col; + int h_out = (col_pos / width_col) % height_col; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); + const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); + const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out); + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; + scalar_t inv_h = h_in + i * dilation_h + offset_h; + scalar_t inv_w = w_in + j * dilation_w + offset_w; + if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) + { + inv_h = inv_w = -2; + } + else + { + mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w); + } + const scalar_t weight = dmcn_get_coordinate_weight( + inv_h, inv_w, + height, width, data_im_ptr + cnt * height * width, width, bp_dir); + val += weight * data_col_ptr[col_pos] * mask; + cnt += 1; + } + // KERNEL_ASSIGN(grad_offset[index], offset_req, val); + grad_offset[index] = val; + if (offset_c % 2 == 0) + // KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval); + grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval; + } +} + +void modulated_deformable_im2col_cuda( + const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, + const int batch_size, const int channels, const int height_im, const int width_im, + const int height_col, const int width_col, const int kernel_h, const int kenerl_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int deformable_group, at::Tensor data_col) +{ + // num_axes should be smaller than block size + const int channel_per_deformable_group = channels / deformable_group; + const int num_kernels = channels * batch_size * height_col * width_col; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_im.type(), "modulated_deformable_im2col_gpu", ([&] { + const scalar_t *data_im_ = data_im.data(); + const scalar_t *data_offset_ = data_offset.data(); + const scalar_t *data_mask_ = data_mask.data(); + scalar_t *data_col_ = data_col.data(); + + modulated_deformable_im2col_gpu_kernel<<>>( + num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w, + pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, + batch_size, channels, deformable_group, height_col, width_col, data_col_); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in modulated_deformable_im2col_cuda: %s\n", cudaGetErrorString(err)); + } +} + +void modulated_deformable_col2im_cuda( + const at::Tensor data_col, const at::Tensor data_offset, const at::Tensor data_mask, + const int batch_size, const int channels, const int height_im, const int width_im, + const int height_col, const int width_col, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int deformable_group, at::Tensor grad_im) +{ + + const int channel_per_deformable_group = channels / deformable_group; + const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.type(), "modulated_deformable_col2im_gpu", ([&] { + const scalar_t *data_col_ = data_col.data(); + const scalar_t *data_offset_ = data_offset.data(); + const scalar_t *data_mask_ = data_mask.data(); + scalar_t *grad_im_ = grad_im.data(); + + modulated_deformable_col2im_gpu_kernel<<>>( + num_kernels, data_col_, data_offset_, data_mask_, channels, height_im, width_im, + kernel_h, kernel_w, pad_h, pad_h, stride_h, stride_w, + dilation_h, dilation_w, channel_per_deformable_group, + batch_size, deformable_group, height_col, width_col, grad_im_); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in modulated_deformable_col2im_cuda: %s\n", cudaGetErrorString(err)); + } +} + +void modulated_deformable_col2im_coord_cuda( + const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, + const int batch_size, const int channels, const int height_im, const int width_im, + const int height_col, const int width_col, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int deformable_group, + at::Tensor grad_offset, at::Tensor grad_mask) +{ + const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group; + const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.type(), "modulated_deformable_col2im_coord_gpu", ([&] { + const scalar_t *data_col_ = data_col.data(); + const scalar_t *data_im_ = data_im.data(); + const scalar_t *data_offset_ = data_offset.data(); + const scalar_t *data_mask_ = data_mask.data(); + scalar_t *grad_offset_ = grad_offset.data(); + scalar_t *grad_mask_ = grad_mask.data(); + + modulated_deformable_col2im_coord_gpu_kernel<<>>( + num_kernels, data_col_, data_im_, data_offset_, data_mask_, channels, height_im, width_im, + kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, channel_per_deformable_group, + batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col, + grad_offset_, grad_mask_); + })); + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in modulated_deformable_col2im_coord_cuda: %s\n", cudaGetErrorString(err)); + } +} diff --git a/maskrcnn_benchmark/csrc/cuda/deform_pool_cuda.cu b/maskrcnn_benchmark/csrc/cuda/deform_pool_cuda.cu new file mode 100644 index 000000000..71f305af9 --- /dev/null +++ b/maskrcnn_benchmark/csrc/cuda/deform_pool_cuda.cu @@ -0,0 +1,87 @@ +// modify from +// https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/modulated_dcn_cuda.c + +// based on +// author: Charles Shang +// https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu + +#include +#include + +#include +#include + +#include +#include +#include + + +void DeformablePSROIPoolForward( + const at::Tensor data, const at::Tensor bbox, const at::Tensor trans, + at::Tensor out, at::Tensor top_count, const int batch, const int channels, + const int height, const int width, const int num_bbox, + const int channels_trans, const int no_trans, const float spatial_scale, + const int output_dim, const int group_size, const int pooled_size, + const int part_size, const int sample_per_part, const float trans_std); + +void DeformablePSROIPoolBackwardAcc( + const at::Tensor out_grad, const at::Tensor data, const at::Tensor bbox, + const at::Tensor trans, const at::Tensor top_count, at::Tensor in_grad, + at::Tensor trans_grad, const int batch, const int channels, + const int height, const int width, const int num_bbox, + const int channels_trans, const int no_trans, const float spatial_scale, + const int output_dim, const int group_size, const int pooled_size, + const int part_size, const int sample_per_part, const float trans_std); + +void deform_psroi_pooling_cuda_forward( + at::Tensor input, at::Tensor bbox, at::Tensor trans, at::Tensor out, + at::Tensor top_count, const int no_trans, const float spatial_scale, + const int output_dim, const int group_size, const int pooled_size, + const int part_size, const int sample_per_part, const float trans_std) +{ + AT_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); + + const int batch = input.size(0); + const int channels = input.size(1); + const int height = input.size(2); + const int width = input.size(3); + const int channels_trans = no_trans ? 2 : trans.size(1); + + const int num_bbox = bbox.size(0); + if (num_bbox != out.size(0)) + AT_ERROR("Output shape and bbox number wont match: (%d vs %d).", + out.size(0), num_bbox); + + DeformablePSROIPoolForward( + input, bbox, trans, out, top_count, batch, channels, height, width, + num_bbox, channels_trans, no_trans, spatial_scale, output_dim, group_size, + pooled_size, part_size, sample_per_part, trans_std); +} + +void deform_psroi_pooling_cuda_backward( + at::Tensor out_grad, at::Tensor input, at::Tensor bbox, at::Tensor trans, + at::Tensor top_count, at::Tensor input_grad, at::Tensor trans_grad, + const int no_trans, const float spatial_scale, const int output_dim, + const int group_size, const int pooled_size, const int part_size, + const int sample_per_part, const float trans_std) +{ + AT_CHECK(out_grad.is_contiguous(), "out_grad tensor has to be contiguous"); + AT_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); + + const int batch = input.size(0); + const int channels = input.size(1); + const int height = input.size(2); + const int width = input.size(3); + const int channels_trans = no_trans ? 2 : trans.size(1); + + const int num_bbox = bbox.size(0); + if (num_bbox != out_grad.size(0)) + AT_ERROR("Output shape and bbox number wont match: (%d vs %d).", + out_grad.size(0), num_bbox); + + DeformablePSROIPoolBackwardAcc( + out_grad, input, bbox, trans, top_count, input_grad, trans_grad, batch, + channels, height, width, num_bbox, channels_trans, no_trans, + spatial_scale, output_dim, group_size, pooled_size, part_size, + sample_per_part, trans_std); +} diff --git a/maskrcnn_benchmark/csrc/cuda/deform_pool_kernel_cuda.cu b/maskrcnn_benchmark/csrc/cuda/deform_pool_kernel_cuda.cu new file mode 100644 index 000000000..127899ec6 --- /dev/null +++ b/maskrcnn_benchmark/csrc/cuda/deform_pool_kernel_cuda.cu @@ -0,0 +1,365 @@ +/*! + * Copyright (c) 2017 Microsoft + * Licensed under The MIT License [see LICENSE for details] + * \file deformable_psroi_pooling.cu + * \brief + * \author Yi Li, Guodong Zhang, Jifeng Dai +*/ +/***************** Adapted by Charles Shang *********************/ +// modify from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/cuda/deform_psroi_pooling_cuda.cu + + +#include +#include +#include +#include +#include + +using namespace at; + +#define CUDA_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ + i < (n); \ + i += blockDim.x * gridDim.x) + +const int CUDA_NUM_THREADS = 1024; +inline int GET_BLOCKS(const int N) +{ + return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; +} + +template +__device__ scalar_t bilinear_interp( + const scalar_t *data, + const scalar_t x, + const scalar_t y, + const int width, + const int height) +{ + int x1 = floor(x); + int x2 = ceil(x); + int y1 = floor(y); + int y2 = ceil(y); + scalar_t dist_x = (scalar_t)(x - x1); + scalar_t dist_y = (scalar_t)(y - y1); + scalar_t value11 = data[y1 * width + x1]; + scalar_t value12 = data[y2 * width + x1]; + scalar_t value21 = data[y1 * width + x2]; + scalar_t value22 = data[y2 * width + x2]; + scalar_t value = (1 - dist_x) * (1 - dist_y) * value11 + (1 - dist_x) * dist_y * value12 + dist_x * (1 - dist_y) * value21 + dist_x * dist_y * value22; + return value; +} + +template +__global__ void DeformablePSROIPoolForwardKernel( + const int count, + const scalar_t *bottom_data, + const scalar_t spatial_scale, + const int channels, + const int height, const int width, + const int pooled_height, const int pooled_width, + const scalar_t *bottom_rois, const scalar_t *bottom_trans, + const int no_trans, + const scalar_t trans_std, + const int sample_per_part, + const int output_dim, + const int group_size, + const int part_size, + const int num_classes, + const int channels_each_class, + scalar_t *top_data, + scalar_t *top_count) +{ + CUDA_KERNEL_LOOP(index, count) + { + // The output is in order (n, ctop, ph, pw) + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int ctop = (index / pooled_width / pooled_height) % output_dim; + int n = index / pooled_width / pooled_height / output_dim; + + // [start, end) interval for spatial sampling + const scalar_t *offset_bottom_rois = bottom_rois + n * 5; + int roi_batch_ind = offset_bottom_rois[0]; + scalar_t roi_start_w = (scalar_t)(round(offset_bottom_rois[1])) * spatial_scale - 0.5; + scalar_t roi_start_h = (scalar_t)(round(offset_bottom_rois[2])) * spatial_scale - 0.5; + scalar_t roi_end_w = (scalar_t)(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5; + scalar_t roi_end_h = (scalar_t)(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5; + + // Force too small ROIs to be 1x1 + scalar_t roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 + scalar_t roi_height = max(roi_end_h - roi_start_h, 0.1); + + // Compute w and h at bottom + scalar_t bin_size_h = roi_height / (scalar_t)(pooled_height); + scalar_t bin_size_w = roi_width / (scalar_t)(pooled_width); + + scalar_t sub_bin_size_h = bin_size_h / (scalar_t)(sample_per_part); + scalar_t sub_bin_size_w = bin_size_w / (scalar_t)(sample_per_part); + + int part_h = floor((scalar_t)(ph) / pooled_height * part_size); + int part_w = floor((scalar_t)(pw) / pooled_width * part_size); + int class_id = ctop / channels_each_class; + scalar_t trans_x = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std; + scalar_t trans_y = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std; + + scalar_t wstart = (scalar_t)(pw)*bin_size_w + roi_start_w; + wstart += trans_x * roi_width; + scalar_t hstart = (scalar_t)(ph)*bin_size_h + roi_start_h; + hstart += trans_y * roi_height; + + scalar_t sum = 0; + int count = 0; + int gw = floor((scalar_t)(pw)*group_size / pooled_width); + int gh = floor((scalar_t)(ph)*group_size / pooled_height); + gw = min(max(gw, 0), group_size - 1); + gh = min(max(gh, 0), group_size - 1); + + const scalar_t *offset_bottom_data = bottom_data + (roi_batch_ind * channels) * height * width; + for (int ih = 0; ih < sample_per_part; ih++) + { + for (int iw = 0; iw < sample_per_part; iw++) + { + scalar_t w = wstart + iw * sub_bin_size_w; + scalar_t h = hstart + ih * sub_bin_size_h; + // bilinear interpolation + if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) + { + continue; + } + w = min(max(w, 0.), width - 1.); + h = min(max(h, 0.), height - 1.); + int c = (ctop * group_size + gh) * group_size + gw; + scalar_t val = bilinear_interp(offset_bottom_data + c * height * width, w, h, width, height); + sum += val; + count++; + } + } + top_data[index] = count == 0 ? (scalar_t)(0) : sum / count; + top_count[index] = count; + } +} + +template +__global__ void DeformablePSROIPoolBackwardAccKernel( + const int count, + const scalar_t *top_diff, + const scalar_t *top_count, + const int num_rois, + const scalar_t spatial_scale, + const int channels, + const int height, const int width, + const int pooled_height, const int pooled_width, + const int output_dim, + scalar_t *bottom_data_diff, scalar_t *bottom_trans_diff, + const scalar_t *bottom_data, + const scalar_t *bottom_rois, + const scalar_t *bottom_trans, + const int no_trans, + const scalar_t trans_std, + const int sample_per_part, + const int group_size, + const int part_size, + const int num_classes, + const int channels_each_class) +{ + CUDA_KERNEL_LOOP(index, count) + { + // The output is in order (n, ctop, ph, pw) + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int ctop = (index / pooled_width / pooled_height) % output_dim; + int n = index / pooled_width / pooled_height / output_dim; + + // [start, end) interval for spatial sampling + const scalar_t *offset_bottom_rois = bottom_rois + n * 5; + int roi_batch_ind = offset_bottom_rois[0]; + scalar_t roi_start_w = (scalar_t)(round(offset_bottom_rois[1])) * spatial_scale - 0.5; + scalar_t roi_start_h = (scalar_t)(round(offset_bottom_rois[2])) * spatial_scale - 0.5; + scalar_t roi_end_w = (scalar_t)(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5; + scalar_t roi_end_h = (scalar_t)(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5; + + // Force too small ROIs to be 1x1 + scalar_t roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 + scalar_t roi_height = max(roi_end_h - roi_start_h, 0.1); + + // Compute w and h at bottom + scalar_t bin_size_h = roi_height / (scalar_t)(pooled_height); + scalar_t bin_size_w = roi_width / (scalar_t)(pooled_width); + + scalar_t sub_bin_size_h = bin_size_h / (scalar_t)(sample_per_part); + scalar_t sub_bin_size_w = bin_size_w / (scalar_t)(sample_per_part); + + int part_h = floor((scalar_t)(ph) / pooled_height * part_size); + int part_w = floor((scalar_t)(pw) / pooled_width * part_size); + int class_id = ctop / channels_each_class; + scalar_t trans_x = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std; + scalar_t trans_y = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std; + + scalar_t wstart = (scalar_t)(pw)*bin_size_w + roi_start_w; + wstart += trans_x * roi_width; + scalar_t hstart = (scalar_t)(ph)*bin_size_h + roi_start_h; + hstart += trans_y * roi_height; + + if (top_count[index] <= 0) + { + continue; + } + scalar_t diff_val = top_diff[index] / top_count[index]; + const scalar_t *offset_bottom_data = bottom_data + roi_batch_ind * channels * height * width; + scalar_t *offset_bottom_data_diff = bottom_data_diff + roi_batch_ind * channels * height * width; + int gw = floor((scalar_t)(pw)*group_size / pooled_width); + int gh = floor((scalar_t)(ph)*group_size / pooled_height); + gw = min(max(gw, 0), group_size - 1); + gh = min(max(gh, 0), group_size - 1); + + for (int ih = 0; ih < sample_per_part; ih++) + { + for (int iw = 0; iw < sample_per_part; iw++) + { + scalar_t w = wstart + iw * sub_bin_size_w; + scalar_t h = hstart + ih * sub_bin_size_h; + // bilinear interpolation + if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) + { + continue; + } + w = min(max(w, 0.), width - 1.); + h = min(max(h, 0.), height - 1.); + int c = (ctop * group_size + gh) * group_size + gw; + // backward on feature + int x0 = floor(w); + int x1 = ceil(w); + int y0 = floor(h); + int y1 = ceil(h); + scalar_t dist_x = w - x0, dist_y = h - y0; + scalar_t q00 = (1 - dist_x) * (1 - dist_y); + scalar_t q01 = (1 - dist_x) * dist_y; + scalar_t q10 = dist_x * (1 - dist_y); + scalar_t q11 = dist_x * dist_y; + int bottom_index_base = c * height * width; + atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x0, q00 * diff_val); + atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x0, q01 * diff_val); + atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x1, q10 * diff_val); + atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x1, q11 * diff_val); + + if (no_trans) + { + continue; + } + scalar_t U00 = offset_bottom_data[bottom_index_base + y0 * width + x0]; + scalar_t U01 = offset_bottom_data[bottom_index_base + y1 * width + x0]; + scalar_t U10 = offset_bottom_data[bottom_index_base + y0 * width + x1]; + scalar_t U11 = offset_bottom_data[bottom_index_base + y1 * width + x1]; + scalar_t diff_x = (U11 * dist_y + U10 * (1 - dist_y) - U01 * dist_y - U00 * (1 - dist_y)) * trans_std * diff_val; + diff_x *= roi_width; + scalar_t diff_y = (U11 * dist_x + U01 * (1 - dist_x) - U10 * dist_x - U00 * (1 - dist_x)) * trans_std * diff_val; + diff_y *= roi_height; + + atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w, diff_x); + atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w, diff_y); + } + } + } +} + +void DeformablePSROIPoolForward(const at::Tensor data, + const at::Tensor bbox, + const at::Tensor trans, + at::Tensor out, + at::Tensor top_count, + const int batch, + const int channels, + const int height, + const int width, + const int num_bbox, + const int channels_trans, + const int no_trans, + const float spatial_scale, + const int output_dim, + const int group_size, + const int pooled_size, + const int part_size, + const int sample_per_part, + const float trans_std) +{ + const int pooled_height = pooled_size; + const int pooled_width = pooled_size; + const int count = num_bbox * output_dim * pooled_height * pooled_width; + const int num_classes = no_trans ? 1 : channels_trans / 2; + const int channels_each_class = no_trans ? output_dim : output_dim / num_classes; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data.type(), "deformable_psroi_pool_forward", ([&] { + const scalar_t *bottom_data = data.data(); + const scalar_t *bottom_rois = bbox.data(); + const scalar_t *bottom_trans = no_trans ? NULL : trans.data(); + scalar_t *top_data = out.data(); + scalar_t *top_count_data = top_count.data(); + + DeformablePSROIPoolForwardKernel<<>>( + count, bottom_data, (scalar_t)spatial_scale, channels, height, width, pooled_height, pooled_width, + bottom_rois, bottom_trans, no_trans, (scalar_t)trans_std, sample_per_part, output_dim, + group_size, part_size, num_classes, channels_each_class, top_data, top_count_data); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in DeformablePSROIPoolForward: %s\n", cudaGetErrorString(err)); + } +} + +void DeformablePSROIPoolBackwardAcc(const at::Tensor out_grad, + const at::Tensor data, + const at::Tensor bbox, + const at::Tensor trans, + const at::Tensor top_count, + at::Tensor in_grad, + at::Tensor trans_grad, + const int batch, + const int channels, + const int height, + const int width, + const int num_bbox, + const int channels_trans, + const int no_trans, + const float spatial_scale, + const int output_dim, + const int group_size, + const int pooled_size, + const int part_size, + const int sample_per_part, + const float trans_std) +{ + // LOG(INFO) << "DeformablePSROIPoolBackward"; + const int num_rois = num_bbox; + const int pooled_height = pooled_size; + const int pooled_width = pooled_size; + const int count = num_bbox * output_dim * pooled_height * pooled_width; + const int num_classes = no_trans ? 1 : channels_trans / 2; + const int channels_each_class = no_trans ? output_dim : output_dim / num_classes; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + out_grad.type(), "deformable_psroi_pool_backward_acc", ([&] { + const scalar_t *top_diff = out_grad.data(); + const scalar_t *bottom_data = data.data(); + const scalar_t *bottom_rois = bbox.data(); + const scalar_t *bottom_trans = no_trans ? NULL : trans.data(); + scalar_t *bottom_data_diff = in_grad.data(); + scalar_t *bottom_trans_diff = no_trans ? NULL : trans_grad.data(); + const scalar_t *top_count_data = top_count.data(); + + DeformablePSROIPoolBackwardAccKernel<<>>( + count, top_diff, top_count_data, num_rois, (scalar_t)spatial_scale, channels, height, width, + pooled_height, pooled_width, output_dim, bottom_data_diff, bottom_trans_diff, + bottom_data, bottom_rois, bottom_trans, no_trans, (scalar_t)trans_std, sample_per_part, + group_size, part_size, num_classes, channels_each_class); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in DeformablePSROIPoolForward: %s\n", cudaGetErrorString(err)); + } +} \ No newline at end of file diff --git a/maskrcnn_benchmark/csrc/cuda/vision.h b/maskrcnn_benchmark/csrc/cuda/vision.h index 6d9f8871f..32d3c6956 100644 --- a/maskrcnn_benchmark/csrc/cuda/vision.h +++ b/maskrcnn_benchmark/csrc/cuda/vision.h @@ -58,6 +58,59 @@ at::Tensor ROIPool_backward_cuda(const at::Tensor& grad, at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh); +int deform_conv_forward_cuda(at::Tensor input, at::Tensor weight, + at::Tensor offset, at::Tensor output, + at::Tensor columns, at::Tensor ones, int kW, + int kH, int dW, int dH, int padW, int padH, + int dilationW, int dilationH, int group, + int deformable_group, int im2col_step); + +int deform_conv_backward_input_cuda(at::Tensor input, at::Tensor offset, + at::Tensor gradOutput, at::Tensor gradInput, + at::Tensor gradOffset, at::Tensor weight, + at::Tensor columns, int kW, int kH, int dW, + int dH, int padW, int padH, int dilationW, + int dilationH, int group, + int deformable_group, int im2col_step); + +int deform_conv_backward_parameters_cuda( + at::Tensor input, at::Tensor offset, at::Tensor gradOutput, + at::Tensor gradWeight, // at::Tensor gradBias, + at::Tensor columns, at::Tensor ones, int kW, int kH, int dW, int dH, + int padW, int padH, int dilationW, int dilationH, int group, + int deformable_group, float scale, int im2col_step); + +void modulated_deform_conv_cuda_forward( + at::Tensor input, at::Tensor weight, at::Tensor bias, at::Tensor ones, + at::Tensor offset, at::Tensor mask, at::Tensor output, at::Tensor columns, + int kernel_h, int kernel_w, const int stride_h, const int stride_w, + const int pad_h, const int pad_w, const int dilation_h, + const int dilation_w, const int group, const int deformable_group, + const bool with_bias); + +void modulated_deform_conv_cuda_backward( + at::Tensor input, at::Tensor weight, at::Tensor bias, at::Tensor ones, + at::Tensor offset, at::Tensor mask, at::Tensor columns, + at::Tensor grad_input, at::Tensor grad_weight, at::Tensor grad_bias, + at::Tensor grad_offset, at::Tensor grad_mask, at::Tensor grad_output, + int kernel_h, int kernel_w, int stride_h, int stride_w, int pad_h, + int pad_w, int dilation_h, int dilation_w, int group, int deformable_group, + const bool with_bias); + +void deform_psroi_pooling_cuda_forward( + at::Tensor input, at::Tensor bbox, at::Tensor trans, at::Tensor out, + at::Tensor top_count, const int no_trans, const float spatial_scale, + const int output_dim, const int group_size, const int pooled_size, + const int part_size, const int sample_per_part, const float trans_std); + +void deform_psroi_pooling_cuda_backward( + at::Tensor out_grad, at::Tensor input, at::Tensor bbox, at::Tensor trans, + at::Tensor top_count, at::Tensor input_grad, at::Tensor trans_grad, + const int no_trans, const float spatial_scale, const int output_dim, + const int group_size, const int pooled_size, const int part_size, + const int sample_per_part, const float trans_std); + + at::Tensor compute_flow_cuda(const at::Tensor& boxes, const int height, const int width); diff --git a/maskrcnn_benchmark/csrc/deform_conv.h b/maskrcnn_benchmark/csrc/deform_conv.h new file mode 100644 index 000000000..a5930e390 --- /dev/null +++ b/maskrcnn_benchmark/csrc/deform_conv.h @@ -0,0 +1,191 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +#pragma once +#include "cpu/vision.h" + +#ifdef WITH_CUDA +#include "cuda/vision.h" +#endif + + +// Interface for Python +int deform_conv_forward( + at::Tensor input, + at::Tensor weight, + at::Tensor offset, + at::Tensor output, + at::Tensor columns, + at::Tensor ones, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + int im2col_step) +{ + if (input.type().is_cuda()) { +#ifdef WITH_CUDA + return deform_conv_forward_cuda( + input, weight, offset, output, columns, ones, + kW, kH, dW, dH, padW, padH, dilationW, dilationH, + group, deformable_group, im2col_step + ); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} + + +int deform_conv_backward_input( + at::Tensor input, + at::Tensor offset, + at::Tensor gradOutput, + at::Tensor gradInput, + at::Tensor gradOffset, + at::Tensor weight, + at::Tensor columns, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + int im2col_step) +{ + if (input.type().is_cuda()) { +#ifdef WITH_CUDA + return deform_conv_backward_input_cuda( + input, offset, gradOutput, gradInput, gradOffset, weight, columns, + kW, kH, dW, dH, padW, padH, dilationW, dilationH, + group, deformable_group, im2col_step + ); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} + + +int deform_conv_backward_parameters( + at::Tensor input, + at::Tensor offset, + at::Tensor gradOutput, + at::Tensor gradWeight, // at::Tensor gradBias, + at::Tensor columns, + at::Tensor ones, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + float scale, + int im2col_step) +{ + if (input.type().is_cuda()) { +#ifdef WITH_CUDA + return deform_conv_backward_parameters_cuda( + input, offset, gradOutput, gradWeight, columns, ones, + kW, kH, dW, dH, padW, padH, dilationW, dilationH, + group, deformable_group, scale, im2col_step + ); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} + + +void modulated_deform_conv_forward( + at::Tensor input, + at::Tensor weight, + at::Tensor bias, + at::Tensor ones, + at::Tensor offset, + at::Tensor mask, + at::Tensor output, + at::Tensor columns, + int kernel_h, + int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + const int dilation_h, + const int dilation_w, + const int group, + const int deformable_group, + const bool with_bias) +{ + if (input.type().is_cuda()) { +#ifdef WITH_CUDA + return modulated_deform_conv_cuda_forward( + input, weight, bias, ones, offset, mask, output, columns, + kernel_h, kernel_w, stride_h, stride_w, + pad_h, pad_w, dilation_h, dilation_w, + group, deformable_group, with_bias + ); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} + + +void modulated_deform_conv_backward( + at::Tensor input, + at::Tensor weight, + at::Tensor bias, + at::Tensor ones, + at::Tensor offset, + at::Tensor mask, + at::Tensor columns, + at::Tensor grad_input, + at::Tensor grad_weight, + at::Tensor grad_bias, + at::Tensor grad_offset, + at::Tensor grad_mask, + at::Tensor grad_output, + int kernel_h, + int kernel_w, + int stride_h, + int stride_w, + int pad_h, + int pad_w, + int dilation_h, + int dilation_w, + int group, + int deformable_group, + const bool with_bias) +{ + if (input.type().is_cuda()) { +#ifdef WITH_CUDA + return modulated_deform_conv_cuda_backward( + input, weight, bias, ones, offset, mask, columns, + grad_input, grad_weight, grad_bias, grad_offset, grad_mask, grad_output, + kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dilation_h, dilation_w, + group, deformable_group, with_bias + ); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} \ No newline at end of file diff --git a/maskrcnn_benchmark/csrc/deform_pool.h b/maskrcnn_benchmark/csrc/deform_pool.h new file mode 100644 index 000000000..234223809 --- /dev/null +++ b/maskrcnn_benchmark/csrc/deform_pool.h @@ -0,0 +1,70 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +#pragma once +#include "cpu/vision.h" + +#ifdef WITH_CUDA +#include "cuda/vision.h" +#endif + + +// Interface for Python +void deform_psroi_pooling_forward( + at::Tensor input, + at::Tensor bbox, + at::Tensor trans, + at::Tensor out, + at::Tensor top_count, + const int no_trans, + const float spatial_scale, + const int output_dim, + const int group_size, + const int pooled_size, + const int part_size, + const int sample_per_part, + const float trans_std) +{ + if (input.type().is_cuda()) { +#ifdef WITH_CUDA + return deform_psroi_pooling_cuda_forward( + input, bbox, trans, out, top_count, + no_trans, spatial_scale, output_dim, group_size, + pooled_size, part_size, sample_per_part, trans_std + ); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} + + +void deform_psroi_pooling_backward( + at::Tensor out_grad, + at::Tensor input, + at::Tensor bbox, + at::Tensor trans, + at::Tensor top_count, + at::Tensor input_grad, + at::Tensor trans_grad, + const int no_trans, + const float spatial_scale, + const int output_dim, + const int group_size, + const int pooled_size, + const int part_size, + const int sample_per_part, + const float trans_std) +{ + if (input.type().is_cuda()) { +#ifdef WITH_CUDA + return deform_psroi_pooling_cuda_backward( + out_grad, input, bbox, trans, top_count, input_grad, trans_grad, + no_trans, spatial_scale, output_dim, group_size, pooled_size, + part_size, sample_per_part, trans_std + ); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} diff --git a/maskrcnn_benchmark/csrc/vision.cpp b/maskrcnn_benchmark/csrc/vision.cpp index 8234f43b1..30971995d 100644 --- a/maskrcnn_benchmark/csrc/vision.cpp +++ b/maskrcnn_benchmark/csrc/vision.cpp @@ -3,6 +3,8 @@ #include "ROIAlign.h" #include "ROIPool.h" #include "SigmoidFocalLoss.h" +#include "deform_conv.h" +#include "deform_pool.h" PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("nms", &nms, "non-maximum suppression"); @@ -12,4 +14,12 @@ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("roi_pool_backward", &ROIPool_backward, "ROIPool_backward"); m.def("sigmoid_focalloss_forward", &SigmoidFocalLoss_forward, "SigmoidFocalLoss_forward"); m.def("sigmoid_focalloss_backward", &SigmoidFocalLoss_backward, "SigmoidFocalLoss_backward"); -} + // dcn-v2 + m.def("deform_conv_forward", &deform_conv_forward, "deform_conv_forward"); + m.def("deform_conv_backward_input", &deform_conv_backward_input, "deform_conv_backward_input"); + m.def("deform_conv_backward_parameters", &deform_conv_backward_parameters, "deform_conv_backward_parameters"); + m.def("modulated_deform_conv_forward", &modulated_deform_conv_forward, "modulated_deform_conv_forward"); + m.def("modulated_deform_conv_backward", &modulated_deform_conv_backward, "modulated_deform_conv_backward"); + m.def("deform_psroi_pooling_forward", &deform_psroi_pooling_forward, "deform_psroi_pooling_forward"); + m.def("deform_psroi_pooling_backward", &deform_psroi_pooling_backward, "deform_psroi_pooling_backward"); +} \ No newline at end of file diff --git a/maskrcnn_benchmark/layers/__init__.py b/maskrcnn_benchmark/layers/__init__.py index bab50abae..0e1ccc513 100644 --- a/maskrcnn_benchmark/layers/__init__.py +++ b/maskrcnn_benchmark/layers/__init__.py @@ -3,6 +3,7 @@ from .batch_norm import FrozenBatchNorm2d from .misc import Conv2d +from .misc import DFConv2d from .misc import ConvTranspose2d from .misc import BatchNorm2d from .misc import interpolate @@ -13,9 +14,34 @@ from .roi_pool import roi_pool from .smooth_l1_loss import smooth_l1_loss from .sigmoid_focal_loss import SigmoidFocalLoss +from .dcn.deform_conv_func import deform_conv, modulated_deform_conv +from .dcn.deform_conv_module import DeformConv, ModulatedDeformConv, ModulatedDeformConvPack +from .dcn.deform_pool_func import deform_roi_pooling +from .dcn.deform_pool_module import DeformRoIPooling, DeformRoIPoolingPack, ModulatedDeformRoIPoolingPack -__all__ = ["nms", "roi_align", "ROIAlign", "roi_pool", "ROIPool", - "smooth_l1_loss", "Conv2d", "ConvTranspose2d", "interpolate", - "BatchNorm2d", "FrozenBatchNorm2d", "SigmoidFocalLoss" - ] + +__all__ = [ + "nms", + "roi_align", + "ROIAlign", + "roi_pool", + "ROIPool", + "smooth_l1_loss", + "Conv2d", + "DFConv2d", + "ConvTranspose2d", + "interpolate", + "BatchNorm2d", + "FrozenBatchNorm2d", + "SigmoidFocalLoss", + 'deform_conv', + 'modulated_deform_conv', + 'DeformConv', + 'ModulatedDeformConv', + 'ModulatedDeformConvPack', + 'deform_roi_pooling', + 'DeformRoIPooling', + 'DeformRoIPoolingPack', + 'ModulatedDeformRoIPoolingPack', +] diff --git a/maskrcnn_benchmark/layers/dcn/__init__.py b/maskrcnn_benchmark/layers/dcn/__init__.py new file mode 100644 index 000000000..22fe18ff3 --- /dev/null +++ b/maskrcnn_benchmark/layers/dcn/__init__.py @@ -0,0 +1,3 @@ +# +# Copied From [mmdetection](https://github.com/open-mmlab/mmdetection/tree/master/mmdet/ops/dcn) +# \ No newline at end of file diff --git a/maskrcnn_benchmark/layers/dcn/deform_conv_func.py b/maskrcnn_benchmark/layers/dcn/deform_conv_func.py new file mode 100644 index 000000000..ddc92bb0c --- /dev/null +++ b/maskrcnn_benchmark/layers/dcn/deform_conv_func.py @@ -0,0 +1,259 @@ +import torch +from torch.autograd import Function +from torch.nn.modules.utils import _pair + +from maskrcnn_benchmark import _C + + +class DeformConvFunction(Function): + + @staticmethod + def forward( + ctx, + input, + offset, + weight, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1, + im2col_step=64 + ): + if input is not None and input.dim() != 4: + raise ValueError( + "Expected 4D tensor as input, got {}D tensor instead.".format( + input.dim())) + ctx.stride = _pair(stride) + ctx.padding = _pair(padding) + ctx.dilation = _pair(dilation) + ctx.groups = groups + ctx.deformable_groups = deformable_groups + ctx.im2col_step = im2col_step + + ctx.save_for_backward(input, offset, weight) + + output = input.new_empty( + DeformConvFunction._output_size(input, weight, ctx.padding, + ctx.dilation, ctx.stride)) + + ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] # columns, ones + + if not input.is_cuda: + raise NotImplementedError + else: + cur_im2col_step = min(ctx.im2col_step, input.shape[0]) + assert (input.shape[0] % + cur_im2col_step) == 0, 'im2col step must divide batchsize' + _C.deform_conv_forward( + input, + weight, + offset, + output, + ctx.bufs_[0], + ctx.bufs_[1], + weight.size(3), + weight.size(2), + ctx.stride[1], + ctx.stride[0], + ctx.padding[1], + ctx.padding[0], + ctx.dilation[1], + ctx.dilation[0], + ctx.groups, + ctx.deformable_groups, + cur_im2col_step + ) + return output + + @staticmethod + def backward(ctx, grad_output): + input, offset, weight = ctx.saved_tensors + + grad_input = grad_offset = grad_weight = None + + if not grad_output.is_cuda: + raise NotImplementedError + else: + cur_im2col_step = min(ctx.im2col_step, input.shape[0]) + assert (input.shape[0] % + cur_im2col_step) == 0, 'im2col step must divide batchsize' + + if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]: + grad_input = torch.zeros_like(input) + grad_offset = torch.zeros_like(offset) + _C.deform_conv_backward_input( + input, + offset, + grad_output, + grad_input, + grad_offset, + weight, + ctx.bufs_[0], + weight.size(3), + weight.size(2), + ctx.stride[1], + ctx.stride[0], + ctx.padding[1], + ctx.padding[0], + ctx.dilation[1], + ctx.dilation[0], + ctx.groups, + ctx.deformable_groups, + cur_im2col_step + ) + + if ctx.needs_input_grad[2]: + grad_weight = torch.zeros_like(weight) + _C.deform_conv_backward_parameters( + input, + offset, + grad_output, + grad_weight, + ctx.bufs_[0], + ctx.bufs_[1], + weight.size(3), + weight.size(2), + ctx.stride[1], + ctx.stride[0], + ctx.padding[1], + ctx.padding[0], + ctx.dilation[1], + ctx.dilation[0], + ctx.groups, + ctx.deformable_groups, + 1, + cur_im2col_step + ) + + return (grad_input, grad_offset, grad_weight, None, None, None, None, None) + + @staticmethod + def _output_size(input, weight, padding, dilation, stride): + channels = weight.size(0) + output_size = (input.size(0), channels) + for d in range(input.dim() - 2): + in_size = input.size(d + 2) + pad = padding[d] + kernel = dilation[d] * (weight.size(d + 2) - 1) + 1 + stride_ = stride[d] + output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1, ) + if not all(map(lambda s: s > 0, output_size)): + raise ValueError( + "convolution input is too small (output would be {})".format( + 'x'.join(map(str, output_size)))) + return output_size + + +class ModulatedDeformConvFunction(Function): + + @staticmethod + def forward( + ctx, + input, + offset, + mask, + weight, + bias=None, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1 + ): + ctx.stride = stride + ctx.padding = padding + ctx.dilation = dilation + ctx.groups = groups + ctx.deformable_groups = deformable_groups + ctx.with_bias = bias is not None + if not ctx.with_bias: + bias = input.new_empty(1) # fake tensor + if not input.is_cuda: + raise NotImplementedError + if weight.requires_grad or mask.requires_grad or offset.requires_grad \ + or input.requires_grad: + ctx.save_for_backward(input, offset, mask, weight, bias) + output = input.new_empty( + ModulatedDeformConvFunction._infer_shape(ctx, input, weight)) + ctx._bufs = [input.new_empty(0), input.new_empty(0)] + _C.modulated_deform_conv_forward( + input, + weight, + bias, + ctx._bufs[0], + offset, + mask, + output, + ctx._bufs[1], + weight.shape[2], + weight.shape[3], + ctx.stride, + ctx.stride, + ctx.padding, + ctx.padding, + ctx.dilation, + ctx.dilation, + ctx.groups, + ctx.deformable_groups, + ctx.with_bias + ) + return output + + @staticmethod + def backward(ctx, grad_output): + if not grad_output.is_cuda: + raise NotImplementedError + input, offset, mask, weight, bias = ctx.saved_tensors + grad_input = torch.zeros_like(input) + grad_offset = torch.zeros_like(offset) + grad_mask = torch.zeros_like(mask) + grad_weight = torch.zeros_like(weight) + grad_bias = torch.zeros_like(bias) + _C.modulated_deform_conv_backward( + input, + weight, + bias, + ctx._bufs[0], + offset, + mask, + ctx._bufs[1], + grad_input, + grad_weight, + grad_bias, + grad_offset, + grad_mask, + grad_output, + weight.shape[2], + weight.shape[3], + ctx.stride, + ctx.stride, + ctx.padding, + ctx.padding, + ctx.dilation, + ctx.dilation, + ctx.groups, + ctx.deformable_groups, + ctx.with_bias + ) + if not ctx.with_bias: + grad_bias = None + + return (grad_input, grad_offset, grad_mask, grad_weight, grad_bias, + None, None, None, None, None) + + @staticmethod + def _infer_shape(ctx, input, weight): + n = input.size(0) + channels_out = weight.size(0) + height, width = input.shape[2:4] + kernel_h, kernel_w = weight.shape[2:4] + height_out = (height + 2 * ctx.padding - + (ctx.dilation * (kernel_h - 1) + 1)) // ctx.stride + 1 + width_out = (width + 2 * ctx.padding - + (ctx.dilation * (kernel_w - 1) + 1)) // ctx.stride + 1 + return n, channels_out, height_out, width_out + + +deform_conv = DeformConvFunction.apply +modulated_deform_conv = ModulatedDeformConvFunction.apply diff --git a/maskrcnn_benchmark/layers/dcn/deform_conv_module.py b/maskrcnn_benchmark/layers/dcn/deform_conv_module.py new file mode 100644 index 000000000..e6b58c840 --- /dev/null +++ b/maskrcnn_benchmark/layers/dcn/deform_conv_module.py @@ -0,0 +1,177 @@ +import math + +import torch +import torch.nn as nn +from torch.nn.modules.utils import _pair + +from .deform_conv_func import deform_conv, modulated_deform_conv + + +class DeformConv(nn.Module): + + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1, + bias=False + ): + assert not bias + super(DeformConv, self).__init__() + self.with_bias = bias + + assert in_channels % groups == 0, \ + 'in_channels {} cannot be divisible by groups {}'.format( + in_channels, groups) + assert out_channels % groups == 0, \ + 'out_channels {} cannot be divisible by groups {}'.format( + out_channels, groups) + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = _pair(kernel_size) + self.stride = _pair(stride) + self.padding = _pair(padding) + self.dilation = _pair(dilation) + self.groups = groups + self.deformable_groups = deformable_groups + + self.weight = nn.Parameter( + torch.Tensor(out_channels, in_channels // self.groups, + *self.kernel_size)) + + self.reset_parameters() + + def reset_parameters(self): + n = self.in_channels + for k in self.kernel_size: + n *= k + stdv = 1. / math.sqrt(n) + self.weight.data.uniform_(-stdv, stdv) + + def forward(self, input, offset): + return deform_conv(input, offset, self.weight, self.stride, + self.padding, self.dilation, self.groups, + self.deformable_groups) + + def __repr__(self): + return "".join([ + "{}(".format(self.__class__.__name__), + "in_channels={}, ".format(self.in_channels), + "out_channels={}, ".format(self.out_channels), + "kernel_size={}, ".format(self.kernel_size), + "stride={}, ".format(self.stride), + "dilation={}, ".format(self.dilation), + "padding={}, ".format(self.padding), + "groups={}, ".format(self.groups), + "deformable_groups={}, ".format(self.deformable_groups), + "bias={})".format(self.with_bias), + ]) + + +class ModulatedDeformConv(nn.Module): + + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1, + bias=True + ): + super(ModulatedDeformConv, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = _pair(kernel_size) + self.stride = stride + self.padding = padding + self.dilation = dilation + self.groups = groups + self.deformable_groups = deformable_groups + self.with_bias = bias + + self.weight = nn.Parameter(torch.Tensor( + out_channels, + in_channels // groups, + *self.kernel_size + )) + if bias: + self.bias = nn.Parameter(torch.Tensor(out_channels)) + else: + self.register_parameter('bias', None) + self.reset_parameters() + + def reset_parameters(self): + n = self.in_channels + for k in self.kernel_size: + n *= k + stdv = 1. / math.sqrt(n) + self.weight.data.uniform_(-stdv, stdv) + if self.bias is not None: + self.bias.data.zero_() + + def forward(self, input, offset, mask): + return modulated_deform_conv( + input, offset, mask, self.weight, self.bias, self.stride, + self.padding, self.dilation, self.groups, self.deformable_groups) + + def __repr__(self): + return "".join([ + "{}(".format(self.__class__.__name__), + "in_channels={}, ".format(self.in_channels), + "out_channels={}, ".format(self.out_channels), + "kernel_size={}, ".format(self.kernel_size), + "stride={}, ".format(self.stride), + "dilation={}, ".format(self.dilation), + "padding={}, ".format(self.padding), + "groups={}, ".format(self.groups), + "deformable_groups={}, ".format(self.deformable_groups), + "bias={})".format(self.with_bias), + ]) + +class ModulatedDeformConvPack(ModulatedDeformConv): + + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1, + bias=True): + super(ModulatedDeformConvPack, self).__init__( + in_channels, out_channels, kernel_size, stride, padding, dilation, + groups, deformable_groups, bias) + + self.conv_offset_mask = nn.Conv2d( + self.in_channels // self.groups, + self.deformable_groups * 3 * self.kernel_size[0] * + self.kernel_size[1], + kernel_size=self.kernel_size, + stride=_pair(self.stride), + padding=_pair(self.padding), + bias=True) + self.init_offset() + + def init_offset(self): + self.conv_offset_mask.weight.data.zero_() + self.conv_offset_mask.bias.data.zero_() + + def forward(self, input): + out = self.conv_offset_mask(input) + o1, o2, mask = torch.chunk(out, 3, dim=1) + offset = torch.cat((o1, o2), dim=1) + mask = torch.sigmoid(mask) + return modulated_deform_conv( + input, offset, mask, self.weight, self.bias, self.stride, + self.padding, self.dilation, self.groups, self.deformable_groups) diff --git a/maskrcnn_benchmark/layers/dcn/deform_pool_func.py b/maskrcnn_benchmark/layers/dcn/deform_pool_func.py new file mode 100644 index 000000000..f18fdd4cb --- /dev/null +++ b/maskrcnn_benchmark/layers/dcn/deform_pool_func.py @@ -0,0 +1,93 @@ +import torch +from torch.autograd import Function + +from maskrcnn_benchmark import _C + + +class DeformRoIPoolingFunction(Function): + + @staticmethod + def forward( + ctx, + data, + rois, + offset, + spatial_scale, + out_size, + out_channels, + no_trans, + group_size=1, + part_size=None, + sample_per_part=4, + trans_std=.0 + ): + ctx.spatial_scale = spatial_scale + ctx.out_size = out_size + ctx.out_channels = out_channels + ctx.no_trans = no_trans + ctx.group_size = group_size + ctx.part_size = out_size if part_size is None else part_size + ctx.sample_per_part = sample_per_part + ctx.trans_std = trans_std + + assert 0.0 <= ctx.trans_std <= 1.0 + if not data.is_cuda: + raise NotImplementedError + + n = rois.shape[0] + output = data.new_empty(n, out_channels, out_size, out_size) + output_count = data.new_empty(n, out_channels, out_size, out_size) + _C.deform_psroi_pooling_forward( + data, + rois, + offset, + output, + output_count, + ctx.no_trans, + ctx.spatial_scale, + ctx.out_channels, + ctx.group_size, + ctx.out_size, + ctx.part_size, + ctx.sample_per_part, + ctx.trans_std + ) + + if data.requires_grad or rois.requires_grad or offset.requires_grad: + ctx.save_for_backward(data, rois, offset) + ctx.output_count = output_count + + return output + + @staticmethod + def backward(ctx, grad_output): + if not grad_output.is_cuda: + raise NotImplementedError + + data, rois, offset = ctx.saved_tensors + output_count = ctx.output_count + grad_input = torch.zeros_like(data) + grad_rois = None + grad_offset = torch.zeros_like(offset) + + _C.deform_psroi_pooling_backward( + grad_output, + data, + rois, + offset, + output_count, + grad_input, + grad_offset, + ctx.no_trans, + ctx.spatial_scale, + ctx.out_channels, + ctx.group_size, + ctx.out_size, + ctx.part_size, + ctx.sample_per_part, + ctx.trans_std + ) + return (grad_input, grad_rois, grad_offset, None, None, None, None, None, None, None, None) + + +deform_roi_pooling = DeformRoIPoolingFunction.apply diff --git a/maskrcnn_benchmark/layers/dcn/deform_pool_module.py b/maskrcnn_benchmark/layers/dcn/deform_pool_module.py new file mode 100644 index 000000000..bab6c2604 --- /dev/null +++ b/maskrcnn_benchmark/layers/dcn/deform_pool_module.py @@ -0,0 +1,150 @@ +from torch import nn + +from .deform_pool_func import deform_roi_pooling + + +class DeformRoIPooling(nn.Module): + + def __init__(self, + spatial_scale, + out_size, + out_channels, + no_trans, + group_size=1, + part_size=None, + sample_per_part=4, + trans_std=.0): + super(DeformRoIPooling, self).__init__() + self.spatial_scale = spatial_scale + self.out_size = out_size + self.out_channels = out_channels + self.no_trans = no_trans + self.group_size = group_size + self.part_size = out_size if part_size is None else part_size + self.sample_per_part = sample_per_part + self.trans_std = trans_std + + def forward(self, data, rois, offset): + if self.no_trans: + offset = data.new_empty(0) + return deform_roi_pooling( + data, rois, offset, self.spatial_scale, self.out_size, + self.out_channels, self.no_trans, self.group_size, self.part_size, + self.sample_per_part, self.trans_std) + + +class DeformRoIPoolingPack(DeformRoIPooling): + + def __init__(self, + spatial_scale, + out_size, + out_channels, + no_trans, + group_size=1, + part_size=None, + sample_per_part=4, + trans_std=.0, + deform_fc_channels=1024): + super(DeformRoIPoolingPack, + self).__init__(spatial_scale, out_size, out_channels, no_trans, + group_size, part_size, sample_per_part, trans_std) + + self.deform_fc_channels = deform_fc_channels + + if not no_trans: + self.offset_fc = nn.Sequential( + nn.Linear(self.out_size * self.out_size * self.out_channels, + self.deform_fc_channels), + nn.ReLU(inplace=True), + nn.Linear(self.deform_fc_channels, self.deform_fc_channels), + nn.ReLU(inplace=True), + nn.Linear(self.deform_fc_channels, + self.out_size * self.out_size * 2)) + self.offset_fc[-1].weight.data.zero_() + self.offset_fc[-1].bias.data.zero_() + + def forward(self, data, rois): + assert data.size(1) == self.out_channels + if self.no_trans: + offset = data.new_empty(0) + return deform_roi_pooling( + data, rois, offset, self.spatial_scale, self.out_size, + self.out_channels, self.no_trans, self.group_size, + self.part_size, self.sample_per_part, self.trans_std) + else: + n = rois.shape[0] + offset = data.new_empty(0) + x = deform_roi_pooling(data, rois, offset, self.spatial_scale, + self.out_size, self.out_channels, True, + self.group_size, self.part_size, + self.sample_per_part, self.trans_std) + offset = self.offset_fc(x.view(n, -1)) + offset = offset.view(n, 2, self.out_size, self.out_size) + return deform_roi_pooling( + data, rois, offset, self.spatial_scale, self.out_size, + self.out_channels, self.no_trans, self.group_size, + self.part_size, self.sample_per_part, self.trans_std) + + +class ModulatedDeformRoIPoolingPack(DeformRoIPooling): + + def __init__(self, + spatial_scale, + out_size, + out_channels, + no_trans, + group_size=1, + part_size=None, + sample_per_part=4, + trans_std=.0, + deform_fc_channels=1024): + super(ModulatedDeformRoIPoolingPack, self).__init__( + spatial_scale, out_size, out_channels, no_trans, group_size, + part_size, sample_per_part, trans_std) + + self.deform_fc_channels = deform_fc_channels + + if not no_trans: + self.offset_fc = nn.Sequential( + nn.Linear(self.out_size * self.out_size * self.out_channels, + self.deform_fc_channels), + nn.ReLU(inplace=True), + nn.Linear(self.deform_fc_channels, self.deform_fc_channels), + nn.ReLU(inplace=True), + nn.Linear(self.deform_fc_channels, + self.out_size * self.out_size * 2)) + self.offset_fc[-1].weight.data.zero_() + self.offset_fc[-1].bias.data.zero_() + self.mask_fc = nn.Sequential( + nn.Linear(self.out_size * self.out_size * self.out_channels, + self.deform_fc_channels), + nn.ReLU(inplace=True), + nn.Linear(self.deform_fc_channels, + self.out_size * self.out_size * 1), + nn.Sigmoid()) + self.mask_fc[2].weight.data.zero_() + self.mask_fc[2].bias.data.zero_() + + def forward(self, data, rois): + assert data.size(1) == self.out_channels + if self.no_trans: + offset = data.new_empty(0) + return deform_roi_pooling( + data, rois, offset, self.spatial_scale, self.out_size, + self.out_channels, self.no_trans, self.group_size, + self.part_size, self.sample_per_part, self.trans_std) + else: + n = rois.shape[0] + offset = data.new_empty(0) + x = deform_roi_pooling(data, rois, offset, self.spatial_scale, + self.out_size, self.out_channels, True, + self.group_size, self.part_size, + self.sample_per_part, self.trans_std) + offset = self.offset_fc(x.view(n, -1)) + offset = offset.view(n, 2, self.out_size, self.out_size) + mask = self.mask_fc(x.view(n, -1)) + mask = mask.view(n, 1, self.out_size, self.out_size) + return deform_roi_pooling( + data, rois, offset, self.spatial_scale, self.out_size, + self.out_channels, self.no_trans, self.group_size, + self.part_size, self.sample_per_part, self.trans_std) * mask diff --git a/maskrcnn_benchmark/layers/misc.py b/maskrcnn_benchmark/layers/misc.py index a8cf1c680..b3bca0db5 100644 --- a/maskrcnn_benchmark/layers/misc.py +++ b/maskrcnn_benchmark/layers/misc.py @@ -11,6 +11,7 @@ import math import torch +from torch import nn from torch.nn.modules.utils import _ntuple @@ -108,3 +109,86 @@ def _output_size(dim): output_shape = tuple(_output_size(2)) output_shape = input.shape[:-2] + output_shape return _NewEmptyTensorOp.apply(input, output_shape) + + +class DFConv2d(nn.Module): + """Deformable convolutional layer""" + def __init__( + self, + in_channels, + out_channels, + with_modulated_dcn=True, + kernel_size=3, + stride=1, + groups=1, + dilation=1, + deformable_groups=1, + bias=False + ): + super(DFConv2d, self).__init__() + if isinstance(kernel_size, (list, tuple)): + assert len(kernel_size) == 2 + offset_base_channels = kernel_size[0] * kernel_size[1] + else: + offset_base_channels = kernel_size * kernel_size + if with_modulated_dcn: + from maskrcnn_benchmark.layers import ModulatedDeformConv + offset_channels = offset_base_channels * 3 #default: 27 + conv_block = ModulatedDeformConv + else: + from maskrcnn_benchmark.layers import DeformConv + offset_channels = offset_base_channels * 2 #default: 18 + conv_block = DeformConv + self.offset = Conv2d( + in_channels, + deformable_groups * offset_channels, + kernel_size=kernel_size, + stride= stride, + padding= dilation, + groups=1, + dilation=dilation + ) + for l in [self.offset,]: + nn.init.kaiming_uniform_(l.weight, a=1) + torch.nn.init.constant_(l.bias, 0.) + self.conv = conv_block( + in_channels, + out_channels, + kernel_size=kernel_size, + stride= stride, + padding=dilation, + dilation=dilation, + groups=groups, + deformable_groups=deformable_groups, + bias=bias + ) + self.with_modulated_dcn = with_modulated_dcn + self.kernel_size = kernel_size + self.stride = stride + self.padding = dilation + self.dilation = dilation + + def forward(self, x): + if x.numel() > 0: + if not self.with_modulated_dcn: + offset = self.offset(x) + x = self.conv(x, offset) + else: + offset_mask = self.offset(x) + offset = offset_mask[:, :18, :, :] + mask = offset_mask[:, -9:, :, :].sigmoid() + x = self.conv(x, offset, mask) + return x + # get output shape + output_shape = [ + (i + 2 * p - (di * (k - 1) + 1)) // d + 1 + for i, p, di, k, d in zip( + x.shape[-2:], + self.padding, + self.dilation, + self.kernel_size, + self.stride + ) + ] + output_shape = [x.shape[0], self.conv.weight.shape[0]] + output_shape + return _NewEmptyTensorOp.apply(x, output_shape) diff --git a/maskrcnn_benchmark/modeling/backbone/resnet.py b/maskrcnn_benchmark/modeling/backbone/resnet.py index aaa438026..fc02dc1e8 100644 --- a/maskrcnn_benchmark/modeling/backbone/resnet.py +++ b/maskrcnn_benchmark/modeling/backbone/resnet.py @@ -24,6 +24,7 @@ from maskrcnn_benchmark.layers import FrozenBatchNorm2d from maskrcnn_benchmark.layers import Conv2d +from maskrcnn_benchmark.layers import DFConv2d from maskrcnn_benchmark.modeling.make_layers import group_norm from maskrcnn_benchmark.utils.registry import Registry @@ -106,6 +107,7 @@ def __init__(self, cfg): stage2_relative_factor = 2 ** (stage_spec.index - 1) bottleneck_channels = stage2_bottleneck_channels * stage2_relative_factor out_channels = stage2_out_channels * stage2_relative_factor + stage_with_dcn = cfg.MODEL.RESNETS.STAGE_WITH_DCN[stage_spec.index -1] module = _make_stage( transformation_module, in_channels, @@ -115,6 +117,11 @@ def __init__(self, cfg): num_groups, cfg.MODEL.RESNETS.STRIDE_IN_1X1, first_stride=int(stage_spec.index > 1) + 1, + dcn_config={ + "stage_with_dcn": stage_with_dcn, + "with_modulated_dcn": cfg.MODEL.RESNETS.WITH_MODULATED_DCN, + "deformable_groups": cfg.MODEL.RESNETS.DEFORMABLE_GROUPS, + } ) in_channels = out_channels self.add_module(name, module) @@ -155,7 +162,8 @@ def __init__( stride_in_1x1=True, stride_init=None, res2_out_channels=256, - dilation=1 + dilation=1, + dcn_config={} ): super(ResNetHead, self).__init__() @@ -182,7 +190,8 @@ def __init__( num_groups, stride_in_1x1, first_stride=stride, - dilation=dilation + dilation=dilation, + dcn_config=dcn_config ) stride = None self.add_module(name, module) @@ -204,7 +213,8 @@ def _make_stage( num_groups, stride_in_1x1, first_stride, - dilation=1 + dilation=1, + dcn_config={} ): blocks = [] stride = first_stride @@ -217,7 +227,8 @@ def _make_stage( num_groups, stride_in_1x1, stride, - dilation=dilation + dilation=dilation, + dcn_config=dcn_config ) ) stride = 1 @@ -235,7 +246,8 @@ def __init__( stride_in_1x1, stride, dilation, - norm_func + norm_func, + dcn_config ): super(Bottleneck, self).__init__() @@ -271,17 +283,34 @@ def __init__( ) self.bn1 = norm_func(bottleneck_channels) # TODO: specify init for the above + with_dcn = dcn_config.get("stage_with_dcn", False) + if with_dcn: + deformable_groups = dcn_config.get("deformable_groups", 1) + with_modulated_dcn = dcn_config.get("with_modulated_dcn", False) + self.conv2 = DFConv2d( + bottleneck_channels, + bottleneck_channels, + with_modulated_dcn=with_modulated_dcn, + kernel_size=3, + stride=stride_3x3, + groups=num_groups, + dilation=dilation, + deformable_groups=deformable_groups, + bias=False + ) + else: + self.conv2 = Conv2d( + bottleneck_channels, + bottleneck_channels, + kernel_size=3, + stride=stride_3x3, + padding=dilation, + bias=False, + groups=num_groups, + dilation=dilation + ) + nn.init.kaiming_uniform_(self.conv2.weight, a=1) - self.conv2 = Conv2d( - bottleneck_channels, - bottleneck_channels, - kernel_size=3, - stride=stride_3x3, - padding=dilation, - bias=False, - groups=num_groups, - dilation=dilation - ) self.bn2 = norm_func(bottleneck_channels) self.conv3 = Conv2d( @@ -289,7 +318,7 @@ def __init__( ) self.bn3 = norm_func(out_channels) - for l in [self.conv1, self.conv2, self.conv3,]: + for l in [self.conv1, self.conv3,]: nn.init.kaiming_uniform_(l.weight, a=1) def forward(self, x): @@ -346,7 +375,8 @@ def __init__( num_groups=1, stride_in_1x1=True, stride=1, - dilation=1 + dilation=1, + dcn_config={} ): super(BottleneckWithFixedBatchNorm, self).__init__( in_channels=in_channels, @@ -356,7 +386,8 @@ def __init__( stride_in_1x1=stride_in_1x1, stride=stride, dilation=dilation, - norm_func=FrozenBatchNorm2d + norm_func=FrozenBatchNorm2d, + dcn_config=dcn_config ) @@ -376,7 +407,8 @@ def __init__( num_groups=1, stride_in_1x1=True, stride=1, - dilation=1 + dilation=1, + dcn_config={} ): super(BottleneckWithGN, self).__init__( in_channels=in_channels, @@ -386,7 +418,8 @@ def __init__( stride_in_1x1=stride_in_1x1, stride=stride, dilation=dilation, - norm_func=group_norm + norm_func=group_norm, + dcn_config=dcn_config ) diff --git a/maskrcnn_benchmark/utils/c2_model_loading.py b/maskrcnn_benchmark/utils/c2_model_loading.py index b1b9996e8..cbf4c050a 100644 --- a/maskrcnn_benchmark/utils/c2_model_loading.py +++ b/maskrcnn_benchmark/utils/c2_model_loading.py @@ -143,6 +143,33 @@ def _load_c2_pickled_weights(file_path): return weights +def _rename_conv_weights_for_deformable_conv_layers(state_dict, cfg): + import re + logger = logging.getLogger(__name__) + logger.info("Remapping conv weights for deformable conv weights") + layer_keys = sorted(state_dict.keys()) + for ix, stage_with_dcn in enumerate(cfg.MODEL.RESNETS.STAGE_WITH_DCN, 1): + if not stage_with_dcn: + continue + for old_key in layer_keys: + pattern = ".*layer{}.*conv2.*".format(ix) + r = re.match(pattern, old_key) + if r is None: + continue + for param in ["weight", "bias"]: + if old_key.find(param) is -1: + continue + new_key = old_key.replace( + "conv2.{}".format(param), "conv2.conv.{}".format(param) + ) + logger.info("pattern: {}, old_key: {}, new_key: {}".format( + pattern, old_key, new_key + )) + state_dict[new_key] = state_dict[old_key] + del state_dict[old_key] + return state_dict + + _C2_STAGE_NAMES = { "R-50": ["1.2", "2.3", "3.5", "4.2"], "R-101": ["1.2", "2.3", "3.22", "4.2"], @@ -168,6 +195,10 @@ def load_resnet_c2_format(cfg, f): arch = arch.replace("-RETINANET", "") stages = _C2_STAGE_NAMES[arch] state_dict = _rename_weights_for_resnet(state_dict, stages) + # *********************************** + # for deformable convolutional layer + state_dict = _rename_conv_weights_for_deformable_conv_layers(state_dict, cfg) + # *********************************** return dict(model=state_dict)