Skip to content

Commit

Permalink
Fix keyerror guidance scale (openvinotoolkit#1165)
Browse files Browse the repository at this point in the history
Fix the issue when running stable-diffusion

[ INFO ] Traceback (most recent call last):
File
"/home/guozhong/openvino.genai_2024_5/openvino.genai/tools/llm_bench/benchmark.py",
line 202, in main
iter_data_list, pretrain_time, iter_timestamp =
CASE_TO_BENCH[model_args['use_case']](
File
"/home/guozhong/openvino.genai_2024_5/openvino.genai/tools/llm_bench/task/image_generation.py",
line 198, in run_image_generation_benchmark
image_gen_fn(image_param, num, prompt_idx_list[image_id], pipe, args,
iter_data_list, proc_id, mem_consumption)
File
"/home/guozhong/openvino.genai_2024_5/openvino.genai/tools/llm_bench/task/image_generation.py",
line 50, in run_image_generation
f'steps={input_args["num_inference_steps"]},
width={input_args["width"]}, height={input_args["height"]},
guidance_scale={input_args["guidance_scale"]}'
KeyError: 'guidance_scale'

---------

Co-authored-by: Ilya Lavrenov <[email protected]>
  • Loading branch information
wgzintel and ilya-lavrenov authored Nov 7, 2024
1 parent 4051f45 commit 7ae9763
Show file tree
Hide file tree
Showing 3 changed files with 17 additions and 13 deletions.
8 changes: 4 additions & 4 deletions tools/llm_bench/llm_bench_utils/gen_output_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,10 @@ def gen_iterate_data(
iter_data['generation_time'] = gen_time
iter_data['latency'] = latency
iter_data['result_md5'] = res_md5
iter_data['first_token_latency'] = ''
iter_data['other_tokens_avg_latency'] = ''
iter_data['first_token_infer_latency'] = ''
iter_data['other_tokens_infer_avg_latency'] = ''
iter_data['first_token_latency'] = -1
iter_data['other_tokens_avg_latency'] = -1
iter_data['first_token_infer_latency'] = -1
iter_data['other_tokens_infer_avg_latency'] = -1
iter_data['max_rss_mem_consumption'] = max_rss_mem
iter_data['max_shared_mem_consumption'] = max_shared_mem
iter_data['max_uss_mem_consumption'] = max_uss_mem
Expand Down
3 changes: 2 additions & 1 deletion tools/llm_bench/prompts/stable-diffusion.jsonl
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
{"steps":"30", "width":"256", "height":"256", "guidance_scale":"1.0", "prompt": "side profile centered painted portrait, Gandhi rolling a blunt, Gloomhaven, matte painting concept art, art nouveau, 8K HD Resolution, beautifully background"}
{"steps":"30", "width":"256", "height":"256", "guidance_scale":"1.0", "prompt": "side profile centered painted portrait, Gandhi rolling a blunt, Gloomhaven, matte painting concept art, art nouveau, 8K HD Resolution, beautifully background"}
{"prompt": "side profile centered painted portrait, Gandhi rolling a blunt, Gloomhaven, matte painting concept art, art nouveau, 8K HD Resolution, beautifully background"}
19 changes: 11 additions & 8 deletions tools/llm_bench/task/image_generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,11 @@ def run_image_generation(image_param, num, image_id, pipe, args, iter_data_list,
set_seed(args['seed'])
input_text = image_param['prompt']
input_args = collects_input_args(image_param, args['model_type'], args['model_name'])
log.info(
f"[{'warm-up' if num == 0 else num}][P{image_id}] Input params: Batch_size={args['batch_size']}, "
f'steps={input_args["num_inference_steps"]}, width={input_args["width"]}, height={input_args["height"]}, guidance_scale={input_args["guidance_scale"]}'
)
out_str = f"Input params: Batch_size={args['batch_size']}, " \
f"steps={input_args['num_inference_steps']}, width={input_args['width']}, height={input_args['height']}"
if 'guidance_scale' in input_args:
out_str += f", guidance_scale={input_args['guidance_scale']}"
log.info(f"[{'warm-up' if num == 0 else num}][P{image_id}]{out_str}")

result_md5_list = []
max_rss_mem_consumption = ''
Expand Down Expand Up @@ -107,10 +108,12 @@ def run_image_generation_genai(image_param, num, image_id, pipe, args, iter_data
set_seed(args['seed'])
input_text = image_param['prompt']
input_args = collects_input_args(image_param, args['model_type'], args['model_name'])
log.info(
f"[{'warm-up' if num == 0 else num}][P{image_id}] Input params: Batch_size={args['batch_size']}, "
f'steps={input_args["num_inference_steps"]}, width={input_args["width"]}, height={input_args["height"]}, guidance_scale={input_args["guidance_scale"]}'
)
out_str = f"Input params: Batch_size={args['batch_size']}, " \
f"steps={input_args['num_inference_steps']}, width={input_args['width']}, height={input_args['height']}"
if 'guidance_scale' in input_args:
out_str += f", guidance_scale={input_args['guidance_scale']}"
log.info(f"[{'warm-up' if num == 0 else num}][P{image_id}] {out_str}")

result_md5_list = []
max_rss_mem_consumption = ''
max_uss_mem_consumption = ''
Expand Down

0 comments on commit 7ae9763

Please sign in to comment.