diff --git a/tools/llm_bench/llm_bench_utils/gen_output_data.py b/tools/llm_bench/llm_bench_utils/gen_output_data.py index 99dc74740c..3b7c668c82 100644 --- a/tools/llm_bench/llm_bench_utils/gen_output_data.py +++ b/tools/llm_bench/llm_bench_utils/gen_output_data.py @@ -25,10 +25,10 @@ def gen_iterate_data( iter_data['generation_time'] = gen_time iter_data['latency'] = latency iter_data['result_md5'] = res_md5 - iter_data['first_token_latency'] = '' - iter_data['other_tokens_avg_latency'] = '' - iter_data['first_token_infer_latency'] = '' - iter_data['other_tokens_infer_avg_latency'] = '' + iter_data['first_token_latency'] = -1 + iter_data['other_tokens_avg_latency'] = -1 + iter_data['first_token_infer_latency'] = -1 + iter_data['other_tokens_infer_avg_latency'] = -1 iter_data['max_rss_mem_consumption'] = max_rss_mem iter_data['max_shared_mem_consumption'] = max_shared_mem iter_data['max_uss_mem_consumption'] = max_uss_mem diff --git a/tools/llm_bench/prompts/stable-diffusion.jsonl b/tools/llm_bench/prompts/stable-diffusion.jsonl index 59c23064a5..1dda44e0b4 100644 --- a/tools/llm_bench/prompts/stable-diffusion.jsonl +++ b/tools/llm_bench/prompts/stable-diffusion.jsonl @@ -1 +1,2 @@ -{"steps":"30", "width":"256", "height":"256", "guidance_scale":"1.0", "prompt": "side profile centered painted portrait, Gandhi rolling a blunt, Gloomhaven, matte painting concept art, art nouveau, 8K HD Resolution, beautifully background"} \ No newline at end of file +{"steps":"30", "width":"256", "height":"256", "guidance_scale":"1.0", "prompt": "side profile centered painted portrait, Gandhi rolling a blunt, Gloomhaven, matte painting concept art, art nouveau, 8K HD Resolution, beautifully background"} +{"prompt": "side profile centered painted portrait, Gandhi rolling a blunt, Gloomhaven, matte painting concept art, art nouveau, 8K HD Resolution, beautifully background"} diff --git a/tools/llm_bench/task/image_generation.py b/tools/llm_bench/task/image_generation.py index 678b2dd43b..b6260568bf 100644 --- a/tools/llm_bench/task/image_generation.py +++ b/tools/llm_bench/task/image_generation.py @@ -45,10 +45,11 @@ def run_image_generation(image_param, num, image_id, pipe, args, iter_data_list, set_seed(args['seed']) input_text = image_param['prompt'] input_args = collects_input_args(image_param, args['model_type'], args['model_name']) - log.info( - f"[{'warm-up' if num == 0 else num}][P{image_id}] Input params: Batch_size={args['batch_size']}, " - f'steps={input_args["num_inference_steps"]}, width={input_args["width"]}, height={input_args["height"]}, guidance_scale={input_args["guidance_scale"]}' - ) + out_str = f"Input params: Batch_size={args['batch_size']}, " \ + f"steps={input_args['num_inference_steps']}, width={input_args['width']}, height={input_args['height']}" + if 'guidance_scale' in input_args: + out_str += f", guidance_scale={input_args['guidance_scale']}" + log.info(f"[{'warm-up' if num == 0 else num}][P{image_id}]{out_str}") result_md5_list = [] max_rss_mem_consumption = '' @@ -107,10 +108,12 @@ def run_image_generation_genai(image_param, num, image_id, pipe, args, iter_data set_seed(args['seed']) input_text = image_param['prompt'] input_args = collects_input_args(image_param, args['model_type'], args['model_name']) - log.info( - f"[{'warm-up' if num == 0 else num}][P{image_id}] Input params: Batch_size={args['batch_size']}, " - f'steps={input_args["num_inference_steps"]}, width={input_args["width"]}, height={input_args["height"]}, guidance_scale={input_args["guidance_scale"]}' - ) + out_str = f"Input params: Batch_size={args['batch_size']}, " \ + f"steps={input_args['num_inference_steps']}, width={input_args['width']}, height={input_args['height']}" + if 'guidance_scale' in input_args: + out_str += f", guidance_scale={input_args['guidance_scale']}" + log.info(f"[{'warm-up' if num == 0 else num}][P{image_id}] {out_str}") + result_md5_list = [] max_rss_mem_consumption = '' max_uss_mem_consumption = ''