diff --git a/docs/faq.md b/docs/faq.md index 59a7bc1cf9..db96da729b 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -54,5 +54,7 @@ data = dict( gt_folder='data/val_set5/Set5_mod12', pipeline=test_pipeline, scale=scale, - filename_tmpl='{}'), + filename_tmpl='{}') + +empty_cache = True # empty cache in every iteration. ``` diff --git a/mmedit/apis/test.py b/mmedit/apis/test.py index d6be06ac82..c72fb15d43 100644 --- a/mmedit/apis/test.py +++ b/mmedit/apis/test.py @@ -63,7 +63,8 @@ def multi_gpu_test(model, gpu_collect=False, save_image=False, save_path=None, - iteration=None): + iteration=None, + empty_cache=False): """Test model with multiple gpus. This method tests model with multiple gpus and collects the results @@ -82,6 +83,7 @@ def multi_gpu_test(model, save_path (str): The path to save image. Default: None. iteration (int): Iteration number. It is used for the save image name. Default: None. + empty_cache (bool): empty cache in every iteration. Default: False. Returns: list: The prediction results. @@ -105,7 +107,8 @@ def multi_gpu_test(model, iteration=iteration, **data) results.append(result) - + if empty_cache: + torch.cuda.empty_cache() if rank == 0: # get batch size for _, v in data.items(): diff --git a/tools/test.py b/tools/test.py index 9a4e361156..3f55e89a36 100644 --- a/tools/test.py +++ b/tools/test.py @@ -88,6 +88,7 @@ def main(): model = build_model(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) args.save_image = args.save_path is not None + empty_cache = cfg.get('empty_cache', False) if not distributed: _ = load_checkpoint(model, args.checkpoint, map_location='cpu') model = MMDataParallel(model, device_ids=[0]) @@ -115,7 +116,8 @@ def main(): args.tmpdir, args.gpu_collect, save_path=args.save_path, - save_image=args.save_image) + save_image=args.save_image, + empty_cache=empty_cache) if rank == 0: print('')