Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feat/settings #3646

Open
wants to merge 26 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions .devcontainer/devcontainer.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
{
"build": { "dockerfile": "../docker/Dockerfile" },
"runArgs": [
"--gpus",
"all",
"--shm-size",
"8g"
],
"customizations": {
"vscode": {
"extensions": [
"ms-python.python",
"ms-python.vscode-pylance"
]
}
},
"shutdownAction": "none"
}
9 changes: 7 additions & 2 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ MANIFEST
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

nohup.out
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
Expand Down Expand Up @@ -62,7 +62,8 @@ instance/

# Scrapy stuff:
.scrapy

wandb
test_commands.sh
# Sphinx documentation
docs/en/_build/
docs/zh_cn/_build/
Expand Down Expand Up @@ -118,3 +119,7 @@ mmseg/.mim

# Pytorch
*.pth

logs/
*.png
*.out
32 changes: 32 additions & 0 deletions README_amc-cbn.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
# README_AMC-CBN

## Dataset
- ADE20K: Download ADE20K dataset from http://data.csail.mit.edu/places/ADEchallenge/ADEChallengeData2016.zip
- Angiography:
- `mmseg/datasets/angiography.py`

## Settings
- logger: change `vis_backends` type to `WandbVisBackend` in `configs/_base_/default_runtime.py`
- wandb logging url: https://wandb.ai/amccbn/mmsegmentation-tool
- Adjust the number of epochs depending on the batch size.

## Installation
- Download pretrained vit from https://github.com/open-mmlab/mmpretrain/tree/master/configs/vision_transformer and move it to `pretrain/`
- Used [vit checkpoint](vit_vit-b16_mln_upernet_8xb2-80k_ade20k-512x512.py) for training vit_vit-b16_mln_upernet_8xb2-80k_ade20k-512x512.py
- `pip install wandb`

## Train
```
tools/dist_train.sh configs/vit/vit_vit-b16_mln_upernet_8xb2-80k_ade20k-512x512.py [num_gpus] --work-dir logs/vit-upernet-ade20k
tools/dist_train.sh configs/upernet/upernet_r101_4xb4-160k_ade20k-512x512.py [num_gpus] --work-dir logs/res-101-upernet-ade20k

tools/dist_train.sh configs/upernet/upernet_r101_4xb4-160k_cag-512x512.py [num_gpus] --work-dir logs/res-101-upernet-cag
```

## Transforms
- Additional augmentation functions using albumentation can be found in `mmseg/datasets/transforms/transforms.py`

## Issues
- To create instance of `dataset` in the jupyter notebook, `init_default_scope('mmseg')` must be called.
- https://mmsegmentation.readthedocs.io/en/latest/advanced_guides/datasets.html#main-interfaces
- After adding a custom dataset class, you need to register the class to `datasets/__init__.py` file.
430 changes: 430 additions & 0 deletions a.ipynb

Large diffs are not rendered by default.

95 changes: 95 additions & 0 deletions configs/_base_/datasets/cag.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
# dataset settings
dataset_type = 'CoronaryAngiographyDataset'
data_root = 'data/cag_2100'
# augmentation setting from YoungIn's jupyter notebook
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', reduce_zero_label=False),
dict(
type='AlbuShiftScaleRotateTransform',
scale_limit=(-0.2,0),
rotate_limit=20,
shift_limit=0.1,
border_mode=0, value=[0.3,0.4,0.5],
p=1
),
dict(
type='AlbuRandomContrastTransform',
brightness_limit=(-0.2, 0.2),
contrast_limit=(-0.2, 0.2),
p=0.5
),
dict(
type='AlbuGaussNoiseTransform',
var_limit=(0, 0.01),
p=0.5
),
dict(type='RandomFlip', prob=0.5),
# dict(type='PhotoMetricDistortion'),
dict(type='PackSegInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(512, 512), keep_ratio=True),
# add loading annotation after ``Resize`` because ground truth
# does not need to do resize data transform
dict(type='LoadAnnotations', reduce_zero_label=False),
dict(type='PackSegInputs')
]
img_ratios = [1.0]
tta_pipeline = [
dict(type='LoadImageFromFile', backend_args=None),
dict(
type='TestTimeAug',
transforms=[
[
dict(type='Resize', scale_factor=r, keep_ratio=True)
for r in img_ratios
],
[
dict(type='RandomFlip', prob=0., direction='horizontal'),
dict(type='RandomFlip', prob=1., direction='horizontal')
], [dict(type='LoadAnnotations')], [dict(type='PackSegInputs')]
])
]
train_dataloader = dict(
batch_size=4,
num_workers=4,
persistent_workers=True,
sampler=dict(type='InfiniteSampler', shuffle=True),
dataset=dict(
type=dataset_type,
data_root=data_root,
reduce_zero_label=False,
data_prefix=dict(
img_path='images/training', seg_map_path='annotations/training'),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=4,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
reduce_zero_label=False,
data_prefix=dict(
img_path='images/validation',
seg_map_path='annotations/validation'),
pipeline=test_pipeline))
test_dataloader = dict(
batch_size=1,
num_workers=4,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
reduce_zero_label=False,
data_prefix=dict(
img_path='images/test',
seg_map_path='annotations/test'),
pipeline=test_pipeline))

val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU', 'mFscore'])
test_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU', 'mFscore'])
4 changes: 2 additions & 2 deletions configs/_base_/schedules/schedule_160k.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,13 @@
]
# training schedule for 160k
train_cfg = dict(
type='IterBasedTrainLoop', max_iters=160000, val_interval=16000)
type='IterBasedTrainLoop', max_iters=160000, val_interval=10000)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
default_hooks = dict(
timer=dict(type='IterTimerHook'),
logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False),
param_scheduler=dict(type='ParamSchedulerHook'),
checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=16000),
checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=10000),
sampler_seed=dict(type='DistSamplerSeedHook'),
visualization=dict(type='SegVisualizationHook'))
6 changes: 3 additions & 3 deletions configs/mae/mae-base_upernet_8xb2-amp-160k_ade20k-512x512.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
_base_ = [
'../_base_/models/upernet_mae.py', '../_base_/datasets/ade20k.py',
'../_base_/models/upernet_mae.py', '../_base_/datasets/cag.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
crop_size = (512, 512)
data_preprocessor = dict(size=crop_size)
model = dict(
data_preprocessor=data_preprocessor,
pretrained='./pretrain/mae_pretrain_vit_base_mmcls.pth',
pretrained='/workspaces/mmsegmentation-1/configs/mae/mae_pretrain_vit_base.pth',
backbone=dict(
type='MAE',
img_size=(512, 512),
Expand Down Expand Up @@ -51,4 +51,4 @@
# By default, models are trained on 8 GPUs with 2 images per GPU
train_dataloader = dict(batch_size=2)
val_dataloader = dict(batch_size=1)
test_dataloader = val_dataloader
test_dataloader = dict(batch_size=1)
54 changes: 54 additions & 0 deletions configs/mae/mae-base_upernet_8xb2-amp-160k_cag-512x512.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
_base_ = [
'../_base_/models/upernet_mae.py', '../_base_/datasets/cag.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
crop_size = (512, 512)
data_preprocessor = dict(size=crop_size)
model = dict(
data_preprocessor=data_preprocessor,
pretrained='/workspaces/mmsegmentation-1/converted_model.pth',
backbone=dict(
type='MAE',
img_size=(512, 512),
patch_size=16,
embed_dims=768,
num_layers=12,
num_heads=12,
mlp_ratio=4,
init_values=1.0,
drop_path_rate=0.1,
out_indices=[3, 5, 7, 11]),
neck=dict(embed_dim=768, rescales=[4, 2, 1, 0.5]),
decode_head=dict(
in_channels=[768, 768, 768, 768], num_classes=150, channels=768),
auxiliary_head=dict(in_channels=768, num_classes=150),
test_cfg=dict(mode='slide', crop_size=(512, 512), stride=(341, 341)))

optim_wrapper = dict(
_delete_=True,
type='OptimWrapper',
optimizer=dict(
type='AdamW', lr=1e-4, betas=(0.9, 0.999), weight_decay=0.05),
paramwise_cfg=dict(num_layers=12, layer_decay_rate=0.65),
constructor='LayerDecayOptimizerConstructor')

param_scheduler = [
dict(
type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500),
dict(
type='PolyLR',
eta_min=0.0,
power=1.0,
begin=1500,
end=160000,
by_epoch=False,
)
]

# mixed precision
fp16 = dict(loss_scale='dynamic')

# By default, models are trained on 8 GPUs with 2 images per GPU
train_dataloader = dict(batch_size=2)
val_dataloader = dict(batch_size=1)
test_dataloader = dict(batch_size=1)
11 changes: 11 additions & 0 deletions configs/upernet/upernet_r101_4xb4-160k_cag-512x512.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
_base_ = [
'../_base_/models/upernet_r50.py', '../_base_/datasets/cag.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
crop_size = (512, 512)
data_preprocessor = dict(size=crop_size)
model = dict(
data_preprocessor=data_preprocessor,
decode_head=dict(num_classes=2),
auxiliary_head=dict(num_classes=2),
pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
1 change: 1 addition & 0 deletions docker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -33,3 +33,4 @@ WORKDIR /mmsegmentation
ENV FORCE_CUDA="1"
RUN pip install -r requirements.txt
RUN pip install --no-cache-dir -e .
RUN pip install wandb
Loading