From 0cba98c6c6771d83abaab28664c925adca327a8c Mon Sep 17 00:00:00 2001 From: liyinshuo Date: Sun, 18 Apr 2021 23:24:42 +0800 Subject: [PATCH] Add config files of RDN. --- .../rdn/rdn_x2c64b16_g1_1000k_div2k.py | 124 ++++++++++++++++++ .../rdn/rdn_x3c64b16_g1_1000k_div2k.py | 124 ++++++++++++++++++ .../rdn/rdn_x4c64b16_g1_1000k_div2k.py | 124 ++++++++++++++++++ 3 files changed, 372 insertions(+) create mode 100644 configs/restorers/rdn/rdn_x2c64b16_g1_1000k_div2k.py create mode 100644 configs/restorers/rdn/rdn_x3c64b16_g1_1000k_div2k.py create mode 100644 configs/restorers/rdn/rdn_x4c64b16_g1_1000k_div2k.py diff --git a/configs/restorers/rdn/rdn_x2c64b16_g1_1000k_div2k.py b/configs/restorers/rdn/rdn_x2c64b16_g1_1000k_div2k.py new file mode 100644 index 0000000000..ee7187fe34 --- /dev/null +++ b/configs/restorers/rdn/rdn_x2c64b16_g1_1000k_div2k.py @@ -0,0 +1,124 @@ +exp_name = 'rdn_x2c64b16_g1_1000k_div2k' + +scale = 2 +# model settings +model = dict( + type='BasicRestorer', + generator=dict( + type='RDN', + in_channels=3, + out_channels=3, + mid_channels=64, + num_blocks=16, + upscale_factor=scale), + pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean')) +# model training and testing settings +train_cfg = None +test_cfg = dict(metrics=['PSNR', 'SSIM'], crop_border=scale) + +# dataset settings +train_dataset_type = 'SRAnnotationDataset' +val_dataset_type = 'SRFolderDataset' +train_pipeline = [ + dict( + type='LoadImageFromFile', + io_backend='disk', + key='lq', + flag='unchanged'), + dict( + type='LoadImageFromFile', + io_backend='disk', + key='gt', + flag='unchanged'), + dict(type='RescaleToZeroOne', keys=['lq', 'gt']), + dict( + type='Normalize', + keys=['lq', 'gt'], + mean=[0, 0, 0], + std=[1, 1, 1], + to_rgb=True), + dict(type='PairedRandomCrop', gt_patch_size=64), + dict( + type='Flip', keys=['lq', 'gt'], flip_ratio=0.5, + direction='horizontal'), + dict(type='Flip', keys=['lq', 'gt'], flip_ratio=0.5, direction='vertical'), + dict(type='RandomTransposeHW', keys=['lq', 'gt'], transpose_ratio=0.5), + dict(type='Collect', keys=['lq', 'gt'], meta_keys=['lq_path', 'gt_path']), + dict(type='ImageToTensor', keys=['lq', 'gt']) +] +test_pipeline = [ + dict( + type='LoadImageFromFile', + io_backend='disk', + key='lq', + flag='unchanged'), + dict( + type='LoadImageFromFile', + io_backend='disk', + key='gt', + flag='unchanged'), + dict(type='RescaleToZeroOne', keys=['lq', 'gt']), + dict( + type='Normalize', + keys=['lq', 'gt'], + mean=[0, 0, 0], + std=[1, 1, 1], + to_rgb=True), + dict(type='Collect', keys=['lq', 'gt'], meta_keys=['lq_path', 'gt_path']), + dict(type='ImageToTensor', keys=['lq', 'gt']) +] + +data = dict( + workers_per_gpu=1, + train_dataloader=dict(samples_per_gpu=16, drop_last=True), + val_dataloader=dict(samples_per_gpu=1), + test_dataloader=dict(samples_per_gpu=1), + train=dict( + type='RepeatDataset', + times=1000, + dataset=dict( + type=train_dataset_type, + lq_folder='data/DIV2K/DIV2K_train_LR_bicubic/X2_sub', + gt_folder='data/DIV2K/DIV2K_train_HR_sub', + ann_file='data/DIV2K/meta_info_DIV2K800sub_GT.txt', + pipeline=train_pipeline, + scale=scale)), + val=dict( + type=val_dataset_type, + lq_folder='data/val_set5/Set5_bicLRx2', + gt_folder='data/val_set5/Set5_mod12', + pipeline=test_pipeline, + scale=scale, + filename_tmpl='{}'), + test=dict( + type=val_dataset_type, + lq_folder='data/val_set5/Set5_bicLRx2', + gt_folder='data/val_set5/Set5_mod12', + pipeline=test_pipeline, + scale=scale, + filename_tmpl='{}')) + +# optimizer +optimizers = dict(generator=dict(type='Adam', lr=1e-4, betas=(0.9, 0.999))) + +# learning policy +total_iters = 1000000 +lr_config = dict( + policy='Step', + by_epoch=False, + step=[200000, 400000, 600000, 800000], + gamma=0.5) + +checkpoint_config = dict(interval=5000, save_optimizer=True, by_epoch=False) +evaluation = dict(interval=5000, save_image=True, gpu_collect=True) +log_config = dict( + interval=100, hooks=[dict(type='TextLoggerHook', by_epoch=False)]) +visual_config = None + +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = f'./work_dirs/{exp_name}' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/configs/restorers/rdn/rdn_x3c64b16_g1_1000k_div2k.py b/configs/restorers/rdn/rdn_x3c64b16_g1_1000k_div2k.py new file mode 100644 index 0000000000..a609cba2fd --- /dev/null +++ b/configs/restorers/rdn/rdn_x3c64b16_g1_1000k_div2k.py @@ -0,0 +1,124 @@ +exp_name = 'rdn_x3c64b16_g1_1000k_div2k' + +scale = 3 +# model settings +model = dict( + type='BasicRestorer', + generator=dict( + type='RDN', + in_channels=3, + out_channels=3, + mid_channels=64, + num_blocks=16, + upscale_factor=scale), + pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean')) +# model training and testing settings +train_cfg = None +test_cfg = dict(metrics=['PSNR', 'SSIM'], crop_border=scale) + +# dataset settings +train_dataset_type = 'SRAnnotationDataset' +val_dataset_type = 'SRFolderDataset' +train_pipeline = [ + dict( + type='LoadImageFromFile', + io_backend='disk', + key='lq', + flag='unchanged'), + dict( + type='LoadImageFromFile', + io_backend='disk', + key='gt', + flag='unchanged'), + dict(type='RescaleToZeroOne', keys=['lq', 'gt']), + dict( + type='Normalize', + keys=['lq', 'gt'], + mean=[0, 0, 0], + std=[1, 1, 1], + to_rgb=True), + dict(type='PairedRandomCrop', gt_patch_size=96), + dict( + type='Flip', keys=['lq', 'gt'], flip_ratio=0.5, + direction='horizontal'), + dict(type='Flip', keys=['lq', 'gt'], flip_ratio=0.5, direction='vertical'), + dict(type='RandomTransposeHW', keys=['lq', 'gt'], transpose_ratio=0.5), + dict(type='Collect', keys=['lq', 'gt'], meta_keys=['lq_path', 'gt_path']), + dict(type='ImageToTensor', keys=['lq', 'gt']) +] +test_pipeline = [ + dict( + type='LoadImageFromFile', + io_backend='disk', + key='lq', + flag='unchanged'), + dict( + type='LoadImageFromFile', + io_backend='disk', + key='gt', + flag='unchanged'), + dict(type='RescaleToZeroOne', keys=['lq', 'gt']), + dict( + type='Normalize', + keys=['lq', 'gt'], + mean=[0, 0, 0], + std=[1, 1, 1], + to_rgb=True), + dict(type='Collect', keys=['lq', 'gt'], meta_keys=['lq_path', 'gt_path']), + dict(type='ImageToTensor', keys=['lq', 'gt']) +] + +data = dict( + workers_per_gpu=1, + train_dataloader=dict(samples_per_gpu=16, drop_last=True), + val_dataloader=dict(samples_per_gpu=1), + test_dataloader=dict(samples_per_gpu=1), + train=dict( + type='RepeatDataset', + times=1000, + dataset=dict( + type=train_dataset_type, + lq_folder='data/DIV2K/DIV2K_train_LR_bicubic/X3_sub', + gt_folder='data/DIV2K/DIV2K_train_HR_sub', + ann_file='data/DIV2K/meta_info_DIV2K800sub_GT.txt', + pipeline=train_pipeline, + scale=scale)), + val=dict( + type=val_dataset_type, + lq_folder='data/val_set5/Set5_bicLRx3', + gt_folder='data/val_set5/Set5_mod12', + pipeline=test_pipeline, + scale=scale, + filename_tmpl='{}'), + test=dict( + type=val_dataset_type, + lq_folder='data/val_set5/Set5_bicLRx3', + gt_folder='data/val_set5/Set5_mod12', + pipeline=test_pipeline, + scale=scale, + filename_tmpl='{}')) + +# optimizer +optimizers = dict(generator=dict(type='Adam', lr=1e-4, betas=(0.9, 0.999))) + +# learning policy +total_iters = 1000000 +lr_config = dict( + policy='Step', + by_epoch=False, + step=[200000, 400000, 600000, 800000], + gamma=0.5) + +checkpoint_config = dict(interval=5000, save_optimizer=True, by_epoch=False) +evaluation = dict(interval=5000, save_image=True, gpu_collect=True) +log_config = dict( + interval=100, hooks=[dict(type='TextLoggerHook', by_epoch=False)]) +visual_config = None + +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = f'./work_dirs/{exp_name}' +load_from = None +resume_from = 'work_dirs/rdn_x3c64b16_g1_1000k_div2k/iter_315000.pth' +workflow = [('train', 1)] diff --git a/configs/restorers/rdn/rdn_x4c64b16_g1_1000k_div2k.py b/configs/restorers/rdn/rdn_x4c64b16_g1_1000k_div2k.py new file mode 100644 index 0000000000..78d5677d1d --- /dev/null +++ b/configs/restorers/rdn/rdn_x4c64b16_g1_1000k_div2k.py @@ -0,0 +1,124 @@ +exp_name = 'rdn_x4c64b16_g1_100k_div2k' + +scale = 4 +# model settings +model = dict( + type='BasicRestorer', + generator=dict( + type='RDN', + in_channels=3, + out_channels=3, + mid_channels=64, + num_blocks=16, + upscale_factor=scale), + pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean')) +# model training and testing settings +train_cfg = None +test_cfg = dict(metrics=['PSNR', 'SSIM'], crop_border=scale) + +# dataset settings +train_dataset_type = 'SRAnnotationDataset' +val_dataset_type = 'SRFolderDataset' +train_pipeline = [ + dict( + type='LoadImageFromFile', + io_backend='disk', + key='lq', + flag='unchanged'), + dict( + type='LoadImageFromFile', + io_backend='disk', + key='gt', + flag='unchanged'), + dict(type='RescaleToZeroOne', keys=['lq', 'gt']), + dict( + type='Normalize', + keys=['lq', 'gt'], + mean=[0, 0, 0], + std=[1, 1, 1], + to_rgb=True), + dict(type='PairedRandomCrop', gt_patch_size=128), + dict( + type='Flip', keys=['lq', 'gt'], flip_ratio=0.5, + direction='horizontal'), + dict(type='Flip', keys=['lq', 'gt'], flip_ratio=0.5, direction='vertical'), + dict(type='RandomTransposeHW', keys=['lq', 'gt'], transpose_ratio=0.5), + dict(type='Collect', keys=['lq', 'gt'], meta_keys=['lq_path', 'gt_path']), + dict(type='ImageToTensor', keys=['lq', 'gt']) +] +test_pipeline = [ + dict( + type='LoadImageFromFile', + io_backend='disk', + key='lq', + flag='unchanged'), + dict( + type='LoadImageFromFile', + io_backend='disk', + key='gt', + flag='unchanged'), + dict(type='RescaleToZeroOne', keys=['lq', 'gt']), + dict( + type='Normalize', + keys=['lq', 'gt'], + mean=[0, 0, 0], + std=[1, 1, 1], + to_rgb=True), + dict(type='Collect', keys=['lq', 'gt'], meta_keys=['lq_path', 'gt_path']), + dict(type='ImageToTensor', keys=['lq', 'gt']) +] + +data = dict( + workers_per_gpu=1, + train_dataloader=dict(samples_per_gpu=16, drop_last=True), + val_dataloader=dict(samples_per_gpu=1), + test_dataloader=dict(samples_per_gpu=1), + train=dict( + type='RepeatDataset', + times=1000, + dataset=dict( + type=train_dataset_type, + lq_folder='data/DIV2K/DIV2K_train_LR_bicubic/X4_sub', + gt_folder='data/DIV2K/DIV2K_train_HR_sub', + ann_file='data/DIV2K/meta_info_DIV2K800sub_GT.txt', + pipeline=train_pipeline, + scale=scale)), + val=dict( + type=val_dataset_type, + lq_folder='data/val_set5/Set5_bicLRx4', + gt_folder='data/val_set5/Set5_mod12', + pipeline=test_pipeline, + scale=scale, + filename_tmpl='{}'), + test=dict( + type=val_dataset_type, + lq_folder='data/val_set5/Set5_bicLRx4', + gt_folder='data/val_set5/Set5_mod12', + pipeline=test_pipeline, + scale=scale, + filename_tmpl='{}')) + +# optimizer +optimizers = dict(generator=dict(type='Adam', lr=1e-4, betas=(0.9, 0.999))) + +# learning policy +total_iters = 1000000 +lr_config = dict( + policy='Step', + by_epoch=False, + step=[200000, 400000, 600000, 800000], + gamma=0.5) + +checkpoint_config = dict(interval=5000, save_optimizer=True, by_epoch=False) +evaluation = dict(interval=5000, save_image=True, gpu_collect=True) +log_config = dict( + interval=100, hooks=[dict(type='TextLoggerHook', by_epoch=False)]) +visual_config = None + +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = f'./work_dirs/{exp_name}' +load_from = None +resume_from = None +workflow = [('train', 1)]