Skip to content

Commit

Permalink
Merge pull request #432 from GATEOverflow/mlperf-inference
Browse files Browse the repository at this point in the history
Fixes for Latest MLPerf inference changes
  • Loading branch information
arjunsuresh authored Oct 28, 2024
2 parents fe7ec44 + 1a18996 commit cc3bd61
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 5 deletions.
2 changes: 1 addition & 1 deletion script/app-mlperf-inference/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,7 @@ def postprocess(i):
if os.path.exists(env['CM_MLPERF_USER_CONF']):
shutil.copy(env['CM_MLPERF_USER_CONF'], 'user.conf')

result, valid, power_result = mlperf_utils.get_result_from_log(env['CM_MLPERF_LAST_RELEASE'], model, scenario, output_dir, mode)
result, valid, power_result = mlperf_utils.get_result_from_log(env['CM_MLPERF_LAST_RELEASE'], model, scenario, output_dir, mode, env.get('CM_MLPERF_INFERENCE_SOURCE_VERSION'))
power = None
power_efficiency = None
if power_result:
Expand Down
14 changes: 10 additions & 4 deletions script/get-mlperf-inference-utils/mlperf_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from log_parser import MLPerfLog


def get_result_from_log(version, model, scenario, result_path, mode):
def get_result_from_log(version, model, scenario, result_path, mode, inference_src_version = None):

config = checker.Config(
version,
Expand All @@ -20,7 +20,14 @@ def get_result_from_log(version, model, scenario, result_path, mode):
valid = {}
if mode == "performance":
has_power = os.path.exists(os.path.join(result_path, "..", "power"))
result_ = checker.get_performance_metric(config, mlperf_model, result_path, scenario, None, None, has_power)
version_tuple = None
if inference_src_version:
version_tuple = tuple(map(int, inference_src_version.split('.')))

if version_tuple and version_tuple >= (4,1,22):
result_ = checker.get_performance_metric(config, mlperf_model, result_path, scenario)
else:
result_ = checker.get_performance_metric(config, mlperf_model, result_path, scenario, None, None, has_power)
mlperf_log = MLPerfLog(os.path.join(result_path, "mlperf_log_detail.txt"))
if (
"result_validity" not in mlperf_log.get_keys()
Expand Down Expand Up @@ -133,7 +140,7 @@ def get_accuracy_metric(config, model, path):

return is_valid, acc_results, acc_targets, acc_limits

def get_result_string(version, model, scenario, result_path, has_power, sub_res, division="open", system_json=None, model_precision="fp32"):
def get_result_string(version, model, scenario, result_path, has_power, sub_res, division="open", system_json=None, model_precision="fp32", inference_src_version = None):

config = checker.Config(
version,
Expand All @@ -152,7 +159,6 @@ def get_result_string(version, model, scenario, result_path, has_power, sub_res,
inferred = False
result = {}

inference_src_version = os.environ.get('CM_MLPERF_INFERENCE_SOURCE_VERSION', '')
version_tuple = None
if inference_src_version:
version_tuple = tuple(map(int, inference_src_version.split('.')))
Expand Down

0 comments on commit cc3bd61

Please sign in to comment.