Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Enhance] upodate benchmark scripts #1907

Merged
merged 1 commit into from
Jun 16, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
114 changes: 114 additions & 0 deletions .dev_scripts/inference_benchmark.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,114 @@
python demo/download_inference_resources.py

# Text-to-Image
python demo/mmagic_inference_demo.py \
--model-name stable_diffusion \
--text "A panda is having dinner at KFC" \
--result-out-dir demo_text2image_stable_diffusion_res.png

python demo/mmagic_inference_demo.py \
--model-name controlnet \
--model-setting 1 \
--text "Room with blue walls and a yellow ceiling." \
--control 'https://user-images.githubusercontent.com/28132635/230297033-4f5c32df-365c-4cf4-8e4f-1b76a4cbb0b7.png' \
--result-out-dir demo_text2image_controlnet_canny_res.png

python demo/mmagic_inference_demo.py \
--model-name controlnet \
--model-setting 2 \
--text "masterpiece, best quality, sky, black hair, skirt, sailor collar, looking at viewer, short hair, building, bangs, neckerchief, long sleeves, cloudy sky, power lines, shirt, cityscape, pleated skirt, scenery, blunt bangs, city, night, black sailor collar, closed mouth" \
--control 'https://user-images.githubusercontent.com/28132635/230380893-2eae68af-d610-4f7f-aa68-c2f22c2abf7e.png' \
--result-out-dir demo_text2image_controlnet_pose_res.png

python demo/mmagic_inference_demo.py \
--model-name controlnet \
--model-setting 3 \
--text "black house, blue sky" \
--control 'https://github-production-user-asset-6210df.s3.amazonaws.com/49083766/243599897-553a4c46-c61d-46df-b820-59a49aaf6678.png' \
--result-out-dir demo_text2image_controlnet_seg_res.png

# Conditional GANs
python demo/mmagic_inference_demo.py \
--model-name biggan \
--model-setting 3 \
--label 1 \
--result-out-dir demo_conditional_biggan_res.jpg

# Unconditional GANs
python demo/mmagic_inference_demo.py \
--model-name styleganv1 \
--result-out-dir demo_unconditional_styleganv1_res.jpg

# Image Translation
python demo/mmagic_inference_demo.py \
--model-name pix2pix \
--img ./resources/input/translation/gt_mask_0.png \
--result-out-dir ./resources/output/translation/demo_translation_pix2pix_res.png

# Inpainting
python demo/mmagic_inference_demo.py \
--model-name deepfillv2 \
--img ./resources/input/inpainting/celeba_test.png \
--mask ./resources/input/inpainting/bbox_mask.png \
--result-out-dir ./resources/output/inpainting/demo_inpainting_deepfillv2_res.

# Matting
python demo/mmagic_inference_demo.py \
--model-name aot_gan \
--img ./resources/input/matting/GT05.jpg \
--trimap ./resources/input/matting/GT05_trimap.jpg \
--result-out-dir ./resources/output/matting/demo_matting_gca_res.png

# Image Restoration
python demo/mmagic_inference_demo.py \
--model-name nafnet \
--img ./resources/input/restoration/0901x2.png \
--result-out-dir ./resources/output/restoration/demo_restoration_nafnet_res.png

# Image Super-resolution
python demo/mmagic_inference_demo.py \
--model-name esrgan \
--img ./resources/input/restoration/0901x2.png \
--result-out-dir ./resources/output/restoration/demo_restoration_esrgan_res.png

python demo/mmagic_inference_demo.py \
--model-name ttsr \
--img ./resources/input/restoration/000001.png \
--ref ./resources/input/restoration/000001.png \
--result-out-dir ./resources/output/restoration/demo_restoration_ttsr_res.png

# Video Super-Resolution
python demo/mmagic_inference_demo.py \
--model-name basicvsr \
--video ./resources/input/video_restoration/QUuC4vJs_000084_000094_400x320.mp4 \
--result-out-dir ./resources/output/video_restoration/demo_video_restoration_basicvsr_res.mp4

python demo/mmagic_inference_demo.py \
--model-name edvr \
--extra-parameters window_size=5 \
--video ./resources/input/video_restoration/QUuC4vJs_000084_000094_400x320.mp4 \
--result-out-dir ./resources/output/video_restoration/demo_video_restoration_edvr_res.mp4

python demo/mmagic_inference_demo.py \
--model-name tdan \
--model-setting 2 \
--extra-parameters window_size=5 \
--video ./resources/input/video_restoration/QUuC4vJs_000084_000094_400x320.mp4 \
--result-out-dir ./resources/output/video_restoration/demo_video_restoration_tdan_res.mp4

# Video interpolation
python demo/mmagic_inference_demo.py \
--model-name flavr \
--video ./resources/input/video_interpolation/b-3LLDhc4EU_000000_000010.mp4 \
--result-out-dir ./resources/output/video_interpolation/demo_video_interpolation_flavr_res.mp4

# Image Colorization
python demo/mmagic_inference_demo.py \
--model-name inst_colorization \
--img https://github-production-user-asset-6210df.s3.amazonaws.com/49083766/245713512-de973677-2be8-4915-911f-fab90bb17c40.jpg \
--result-out-dir demo_colorization_res.png

# 3D-aware Generation
python demo/mmagic_inference_demo.py \
--model-name eg3d \
--result-out-dir ./resources/output/eg3d-output
32 changes: 23 additions & 9 deletions .dev_scripts/test_benchmark.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,18 @@
cases:
- name: controlnet-1xb1-fill50k
params:
config: configs/controlnet/controlnet-1xb1-fill50k.py
cpus_per_node: 16
gpus: 1
gpus_per_node: 1

- name: dreambooth
params:
config: configs/dreambooth/dreambooth.py
cpus_per_node: 16
gpus: 1
gpus_per_node: 1

- name: basicvsr-pp_c64n7_8xb1-600k_reds4
params:
checkpoint: basicvsr_plusplus_c64n7_8x1_600k_reds4_20210217-db622b2f.pth
Expand Down Expand Up @@ -44,20 +58,20 @@ cases:
metrics:
FID-50k: 3.992

- name: deepfillv1_256x256_8x2_places
- name: deepfillv2_8xb2_celeba-256x256
params:
checkpoint: deepfillv1_256x256_8x2_places_20200619-c00a0e21.pth
checkpoint_url: https://download.openmmlab.com/mmediting/inpainting/deepfillv1/deepfillv1_256x256_8x2_places_20200619-c00a0e21.pth
config: configs/deepfillv1/deepfillv1_4xb4_celeba-256x256.py
checkpoint: deepfillv2_256x256_8x2_celeba_20200619-c96e5f12.pth
checkpoint_url: https://download.openmmlab.com/mmediting/inpainting/deepfillv2/deepfillv2_256x256_8x2_celeba_20200619-c96e5f12.pth
config: configs/deepfillv2/deepfillv2_8xb2_celeba-256x256.py
cpus_per_node: 16
gpus: 8
gpus_per_node: 8
results:
dataset: Places365-val
dataset: CelebA-val
metrics:
PSNR: 23.429
SSIM: 0.862
l1 error: 11.019
PSNR: 25.721
SSIM: 0.871
l1 error: 5.411

- name: realesrnet_c64b23g32_12x4_lr2e-4_1000k_df2k_ost
params:
Expand All @@ -78,6 +92,6 @@ default_floating_range: 1.0
model_floating_ranges: {}
partition: mm_lol
repo: mmagic
branch: dev-1.x
branch: test
task_type: test
third_part_libs: []
32 changes: 23 additions & 9 deletions .dev_scripts/train_benchmark.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,18 @@
cases:
- name: controlnet-1xb1-fill50k
params:
config: configs/controlnet/controlnet-1xb1-fill50k.py
cpus_per_node: 16
gpus: 1
gpus_per_node: 1

- name: dreambooth
params:
config: configs/dreambooth/dreambooth.py
cpus_per_node: 16
gpus: 1
gpus_per_node: 1

- name: basicvsr-pp_c64n7_8xb1-600k_reds4
params:
checkpoint: basicvsr_plusplus_c64n7_8x1_600k_reds4_20210217-db622b2f.pth
Expand Down Expand Up @@ -44,20 +58,20 @@ cases:
metrics:
FID-50k: 3.992

- name: deepfillv1_256x256_8x2_places
- name: deepfillv2_8xb2_celeba-256x256
params:
checkpoint: deepfillv1_256x256_8x2_places_20200619-c00a0e21.pth
checkpoint_url: https://download.openmmlab.com/mmediting/inpainting/deepfillv1/deepfillv1_256x256_8x2_places_20200619-c00a0e21.pth
config: configs/deepfillv1/deepfillv1_4xb4_celeba-256x256.py
checkpoint: deepfillv2_256x256_8x2_celeba_20200619-c96e5f12.pth
checkpoint_url: https://download.openmmlab.com/mmediting/inpainting/deepfillv2/deepfillv2_256x256_8x2_celeba_20200619-c96e5f12.pth
config: configs/deepfillv2/deepfillv2_8xb2_celeba-256x256.py
cpus_per_node: 16
gpus: 8
gpus_per_node: 8
results:
dataset: Places365-val
dataset: CelebA-val
metrics:
PSNR: 23.429
SSIM: 0.862
l1 error: 11.019
PSNR: 25.721
SSIM: 0.871
l1 error: 5.411

- name: realesrnet_c64b23g32_12x4_lr2e-4_1000k_df2k_ost
params:
Expand All @@ -78,6 +92,6 @@ default_floating_range: 1.0
model_floating_ranges: {}
partition: mm_lol
repo: mmagic
branch: dev-1.x
branch: test
task_type: train
third_part_libs: []