diff --git a/configs/nafnet/nafnet_c64eb11128mb1db1111_8xb8-lr1e-3-400k_gopro.py b/configs/nafnet/nafnet_c64eb11128mb1db1111_8xb8-lr1e-3-400k_gopro.py index fcb87e7b80..bf68b89a35 100644 --- a/configs/nafnet/nafnet_c64eb11128mb1db1111_8xb8-lr1e-3-400k_gopro.py +++ b/configs/nafnet/nafnet_c64eb11128mb1db1111_8xb8-lr1e-3-400k_gopro.py @@ -4,9 +4,6 @@ work_dir = f'./work_dirs/{experiment_name}' save_dir = './work_dirs/' -# DistributedDataParallel -model_wrapper_cfg = dict(type='MMSeparateDistributedDataParallel') - # model settings model = dict( type='BaseEditModel', diff --git a/configs/nafnet/nafnet_c64eb2248mb12db2222_8xb8-lr1e-3-400k_sidd.py b/configs/nafnet/nafnet_c64eb2248mb12db2222_8xb8-lr1e-3-400k_sidd.py index 1b12cc0053..5235d3c79a 100644 --- a/configs/nafnet/nafnet_c64eb2248mb12db2222_8xb8-lr1e-3-400k_sidd.py +++ b/configs/nafnet/nafnet_c64eb2248mb12db2222_8xb8-lr1e-3-400k_sidd.py @@ -4,9 +4,6 @@ work_dir = f'./work_dirs/{experiment_name}' save_dir = './work_dirs/' -# DistributedDataParallel -model_wrapper_cfg = dict(type='MMSeparateDistributedDataParallel') - # model settings model = dict( type='BaseEditModel', @@ -94,11 +91,9 @@ # optimizer optim_wrapper = dict( - constructor='MultiOptimWrapperConstructor', - generator=dict( - type='OptimWrapper', - optimizer=dict( - type='AdamW', lr=1e-3, weight_decay=1e-3, betas=(0.9, 0.9)))) + constructor='DefaultOptimWrapperConstructor', + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=1e-3, weight_decay=1e-3, betas=(0.9, 0.9))) # learning policy param_scheduler = dict(