Skip to content
This repository has been archived by the owner on Apr 17, 2023. It is now read-only.

Commit

Permalink
Merge remote-tracking branch 'public/otx' into songkich/merge-dev-otx
Browse files Browse the repository at this point in the history
  • Loading branch information
goodsong81 committed Dec 21, 2022
2 parents 7a504d1 + f784958 commit 187e529
Show file tree
Hide file tree
Showing 42 changed files with 1,265 additions and 574 deletions.
1 change: 0 additions & 1 deletion models/segmentation/_base_/ocr_litehrnet18_mod2.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
__norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
type='ClassIncrSegmentor',
is_task_adapt=True,
pretrained=None,
backbone=dict(
type='LiteHRNet',
Expand Down
1 change: 0 additions & 1 deletion models/segmentation/_base_/ocr_litehrnet_s_mod2.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
__norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
type='ClassIncrSegmentor',
is_task_adapt=True,
pretrained=None,
backbone=dict(
type='LiteHRNet',
Expand Down
1 change: 0 additions & 1 deletion models/segmentation/_base_/ocr_litehrnet_x_mod3.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
__norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
type='ClassIncrSegmentor',
is_task_adapt=True,
pretrained=None,
backbone=dict(
type='LiteHRNet',
Expand Down
2 changes: 2 additions & 0 deletions mpa/cls/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,3 +49,5 @@
import mpa.modules.models.losses.triplet_loss
import mpa.modules.models.losses.barlowtwins_loss
import mpa.modules.models.losses.mse_loss

import mpa.modules.optimizer.lars
9 changes: 5 additions & 4 deletions mpa/cls/explainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,13 @@

from mpa.registry import STAGES
from mpa.cls.stage import ClsStage
from mpa.modules.hooks.recording_forward_hooks import ActivationMapHook, EigenCamHook
from mpa.modules.hooks.recording_forward_hooks import ActivationMapHook, EigenCamHook, ReciproCAMHook
from mpa.utils.logger import get_logger
logger = get_logger()
EXPLAINER_HOOK_SELECTOR = {
'eigencam': EigenCamHook,
'activationmap': ActivationMapHook,
'classwisesaliencymap': ReciproCAMHook,
}


Expand All @@ -35,9 +36,9 @@ def run(self, model_cfg, model_ckpt, data_cfg, **kwargs):
explainer = kwargs.get('explainer')
self.explainer_hook = EXPLAINER_HOOK_SELECTOR.get(explainer.lower(), None)
if self.explainer_hook is None:
raise NotImplementedError(f'explainer algorithm {explainer} not supported')
raise NotImplementedError(f'Explainer algorithm {explainer} not supported!')
logger.info(
f'explainer algorithm: {explainer}'
f'Explainer algorithm: {explainer}'
)
cfg = self.configure(model_cfg, model_ckpt, data_cfg, training=False, **kwargs)

Expand Down Expand Up @@ -71,7 +72,7 @@ def _explain(self, cfg):
_ = load_checkpoint(model, cfg.load_from, map_location='cpu')

model.eval()
model = MMDataParallel(model, device_ids=[0])
model = self._put_model_on_gpu(model, cfg)

# InferenceProgressCallback (Time Monitor enable into Infer task)
ClsStage.set_inference_progress_callback(model, cfg)
Expand Down
24 changes: 10 additions & 14 deletions mpa/cls/inferrer.py
Original file line number Diff line number Diff line change
@@ -1,24 +1,21 @@
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
import os.path as osp
from contextlib import nullcontext

import os.path as osp
import mmcv
import numpy as np
import torch

import mmcv
from mmcv.parallel import MMDataParallel
from mmcv.runner import load_checkpoint, wrap_fp16_model

from mmcls.datasets import build_dataloader, build_dataset
from mmcls.models import build_classifier

from mpa.registry import STAGES
from mmcv.runner import load_checkpoint, wrap_fp16_model
from mpa.cls.stage import ClsStage
from mpa.modules.hooks.recording_forward_hooks import FeatureVectorHook, ReciproCAMHook
from mpa.modules.hooks.recording_forward_hooks import ReciproCAMHook, FeatureVectorHook
from mpa.modules.utils.task_adapt import prob_extractor
from mpa.registry import STAGES
from mpa.utils.logger import get_logger

logger = get_logger()


Expand Down Expand Up @@ -83,7 +80,7 @@ def _infer(self, cfg, dump_features=False, dump_saliency_map=False):
_ = load_checkpoint(model, cfg.load_from, map_location='cpu')

model.eval()
model = MMDataParallel(model, device_ids=[0])
model = self._put_model_on_gpu(model, cfg)

# InferenceProgressCallback (Time Monitor enable into Infer task)
ClsStage.set_inference_progress_callback(model, cfg)
Expand All @@ -109,10 +106,9 @@ def _infer(self, cfg, dump_features=False, dump_saliency_map=False):
saliency_maps = forward_explainer_hook.records if dump_saliency_map else [None] * len(self.dataset)

assert len(eval_predictions) == len(feature_vectors) == len(saliency_maps), \
(
"Number of elements should be the same, however, number of outputs are ",
f"{len(eval_predictions)}, {len(feature_vectors)}, and {len(saliency_maps)}"
)
"Number of elements should be the same, however, number of outputs are " \
f"{len(eval_predictions)}, {len(feature_vectors)}, and {len(saliency_maps)}"

outputs = dict(
eval_predictions=eval_predictions,
feature_vectors=feature_vectors,
Expand Down
24 changes: 23 additions & 1 deletion mpa/cls/stage.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,12 @@

from mmcv import ConfigDict
from mmcv import build_from_cfg
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel

from mpa.stage import Stage
from mpa.utils.config_utils import update_or_add_custom_hook, recursively_update_cfg
from mpa.utils.logger import get_logger
from mpa.utils.data_cpu import MMDataCPU

logger = get_logger()

Expand Down Expand Up @@ -118,7 +120,7 @@ def configure_model(cfg, training, **kwargs):
return

# update model layer's in/out configuration
from mmcls.models.builder import BACKBONES as backbone_reg
from mmcv.cnn import MODELS as backbone_reg
layer = build_from_cfg(cfg.model.backbone, backbone_reg)
layer.eval()
input_shape = [3, 224, 224]
Expand Down Expand Up @@ -284,6 +286,26 @@ def configure_task(cfg, training, model_meta=None, **kwargs):
cfg.model.head.num_old_classes = len(old_classes)
return model_tasks, dst_classes

def _put_model_on_gpu(self, model, cfg):
if torch.cuda.is_available():
model = model.cuda()
if self.distributed:
# put model on gpus
find_unused_parameters = cfg.get('find_unused_parameters', False)
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
model = MMDistributedDataParallel(
model,
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
else:
model = MMDataParallel(
model.cuda(), device_ids=[0])
else:
model = MMDataCPU(model)

return model

def refine_tasks(train_cfg, meta, adapt_type):
new_tasks = train_cfg['tasks']
Expand Down
Loading

0 comments on commit 187e529

Please sign in to comment.