Skip to content

Commit

Permalink
enhance benchmark (#604)
Browse files Browse the repository at this point in the history
Signed-off-by: Xin He <[email protected]>
  • Loading branch information
xin3he authored Mar 9, 2023
1 parent a5d055a commit b445adb
Showing 1 changed file with 22 additions and 11 deletions.
33 changes: 22 additions & 11 deletions neural_compressor/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
from .utils import logger
from .conf.pythonic_config import Config
from .config import BenchmarkConfig
from .utils.utility import Statistics


def set_env_var(env_var, value, overwrite_existing=False):
Expand Down Expand Up @@ -162,13 +163,11 @@ def __init__(self, conf):
if self.conf.usr_cfg.model.framework != 'NA':
self.framework = self.conf.usr_cfg.model.framework.lower()

def __call__(self):
def __call__(self, raw_cmd=None):
"""Directly call a Benchmark object.
Args:
model: Get the model
b_dataloader: Set dataloader for benchmarking
b_func: Eval function for benchmark
raw_cmd: raw command used for benchmark
"""
cfg = self.conf.usr_cfg
assert cfg.evaluation is not None, 'benchmark evaluation filed should not be None...'
Expand All @@ -181,7 +180,9 @@ def __call__(self):
logger.info("Start to run Benchmark.")
if os.environ.get('NC_ENV_CONF') == 'True':
return self.run_instance()
self.config_instance()
if raw_cmd is None:
raw_cmd = sys.executable + ' ' + ' '.join(sys.argv)
self.config_instance(raw_cmd)
self.summary_benchmark()
return None

Expand All @@ -204,16 +205,26 @@ def summary_benchmark(self):
throughput_l.append(float(throughput.group(1))) if throughput and throughput.group(1) else None
assert len(latency_l)==len(throughput_l)==num_of_instance, \
"Multiple instance benchmark failed with some instance!"
logger.info("\n\nMultiple instance benchmark summary: ")
logger.info("Latency average: {:.3f} ms".format(sum(latency_l)/len(latency_l)))
logger.info("Throughput sum: {:.3f} images/sec".format(sum(throughput_l)))

output_data = [
["Latency average [second/sample]", "{:.3f}".format(sum(latency_l)/len(latency_l))],
["Throughput sum [samples/second]", "{:.3f}".format(sum(throughput_l))]
]
logger.info("********************************************")
Statistics(
output_data,
header='Multiple Instance Benchmark Summary',
field_names=["Items", "Result"]).print_stat()
else:
# (TODO) should add summary after win32 benchmark has log
pass

def config_instance(self):
"""Configure the multi-instance commands and trigger benchmark with sub process."""
raw_cmd = sys.executable + ' ' + ' '.join(sys.argv)
def config_instance(self, raw_cmd):
"""Configure the multi-instance commands and trigger benchmark with sub process.
Args:
raw_cmd: raw command used for benchmark
"""
multi_instance_cmd = ''
num_of_instance = int(os.environ.get('NUM_OF_INSTANCE'))
cores_per_instance = int(os.environ.get('CORES_PER_INSTANCE'))
Expand Down

0 comments on commit b445adb

Please sign in to comment.