Skip to content

Commit

Permalink
Enable to change the batch size of bert_large_1_10_ipex and revise th…
Browse files Browse the repository at this point in the history
…e evaluation code (intel#1278)
  • Loading branch information
XuhuiRen authored Sep 23, 2022
1 parent 5ca1723 commit c98c428
Show file tree
Hide file tree
Showing 3 changed files with 8 additions and 5 deletions.
6 changes: 3 additions & 3 deletions examples/.config/model_params_pytorch.json
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@
"input_model": "",
"yaml": "conf.yaml",
"strategy": "basic",
"batchsize": 8,
"batch_size": 8,
"new_benchmark": false
},
"bert_large_ipex":{
Expand All @@ -78,7 +78,7 @@
"input_model": "",
"yaml": "conf.yaml",
"strategy": "basic",
"batchsize": 8,
"batch_size": 8,
"new_benchmark": false
},
"bert_large_1_10_ipex":{
Expand All @@ -87,7 +87,7 @@
"input_model": "/tf_dataset/pytorch/bert_large_ipex",
"yaml": "conf.yaml",
"strategy": "basic",
"batchsize": 8,
"batch_size": 8,
"new_benchmark": false
},
"resnet50_gpu": {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ function run_benchmark {
--max_seq_length 384 \
--no_cuda \
--output_dir $tuned_checkpoint \
--per_gpu_eval_batch_size $batch_size \
$mode_cmd \
${extra_cmd}
fi
Expand All @@ -86,6 +87,7 @@ function run_benchmark {
--max_seq_length 384 \
--no_cuda \
--output_dir $tuned_checkpoint \
--per_gpu_eval_batch_size $batch_size \
$mode_cmd \
${extra_cmd}
fi
Expand All @@ -102,6 +104,7 @@ function run_benchmark {
--doc_stride 128 \
--no_cuda \
--output_dir $tuned_checkpoint \
--per_gpu_eval_batch_size $batch_size \
$mode_cmd \
${extra_cmd}
fi
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -369,8 +369,8 @@ def benchmark_evaluate(args, model, eval_dataloader):
for it, batch in enumerate(eval_dataloader):
if epoch * steps_per_epoch + it >= total_steps:
throughput = args.eval_batch_size * args.perf_run_iters / total_time
print('Batch size = %d' % 1)
print('Latency: %.3f ms' % (throughput / 10**6))
print('Batch size = %d' % args.eval_batch_size)
print('Latency: %.3f ms' % (10**3 / throughput))
print("Throughput: {:.3f} sentence/s".format(throughput))
break
with torch.no_grad():
Expand Down

0 comments on commit c98c428

Please sign in to comment.