From 7cd929d1601e183c8c5e0952688708dff82bf80e Mon Sep 17 00:00:00 2001 From: Submandarine Date: Fri, 22 Nov 2024 22:18:26 -0500 Subject: [PATCH 1/3] reindent all python files --- automation/cache/module.py | 6 +- automation/cache/module_misc.py | 2 +- automation/cfg/module.py | 30 +- automation/challenge/module.py | 2 +- automation/contributor/module.py | 12 +- automation/data/module.py | 2 +- automation/docker/module.py | 2 +- automation/docs/module.py | 2 +- automation/experiment/module.py | 192 ++++++------- automation/project/module.py | 2 +- automation/report/module.py | 2 +- automation/script/module.py | 180 ++++++------ automation/script/module_help.py | 8 +- automation/script/module_misc.py | 206 +++++++------- .../script/template-python/customize.py | 2 +- .../script/template-pytorch/customize.py | 2 +- automation/utils/module.py | 86 +++--- automation/utils/module_cfg.py | 64 ++--- .../customize.py | 2 +- .../src/onnx_classify.py | 4 +- .../src/pytorch_classify_preprocessed.py | 12 +- .../src/classify.py | 30 +- .../app-loadgen-generic-python/customize.py | 10 +- .../src/backend_pytorch.py | 21 +- script/app-loadgen-generic-python/src/main.py | 12 +- .../app-loadgen-generic-python/src/utils.py | 2 +- script/app-mlperf-inference-amd/customize.py | 2 +- .../customize.py | 2 +- .../app-mlperf-inference-intel/customize.py | 6 +- .../customize.py | 6 +- .../customize.py | 8 +- .../app-mlperf-inference-nvidia/customize.py | 30 +- .../customize.py | 6 +- .../app-mlperf-inference-redhat/customize.py | 2 +- .../app-mlperf-inference/build_dockerfiles.py | 17 +- script/app-mlperf-inference/customize.py | 90 +++--- .../app-mlperf-training-nvidia/customize.py | 2 +- .../customize.py | 2 +- .../customize.py | 2 +- script/benchmark-program-mlperf/customize.py | 6 +- script/benchmark-program/customize.py | 6 +- script/build-docker-image/customize.py | 2 +- script/build-dockerfile/customize.py | 40 +-- script/calibrate-model-for.qaic/customize.py | 1 - script/compile-model-for.qaic/customize.py | 2 +- script/convert-csv-to-md/customize.py | 2 +- script/convert-csv-to-md/process.py | 2 +- .../customize.py | 4 +- script/create-custom-cache-entry/customize.py | 2 +- .../customize.py | 2 +- .../customize.py | 2 +- script/destroy-terraform/customize.py | 2 - script/detect-cpu/customize.py | 14 +- script/detect-sudo/customize.py | 6 +- script/download-file/customize.py | 14 +- script/dump-pip-freeze/customize.py | 8 +- script/extract-file/customize.py | 14 +- .../customize.py | 46 +-- .../customize.py | 2 +- .../generate-mlperf-tiny-report/customize.py | 8 +- .../customize.py | 2 +- script/generate-nvidia-engine/customize.py | 2 +- script/get-android-sdk/customize.py | 26 +- script/get-aocl/customize.py | 2 - script/get-aria2/customize.py | 4 +- script/get-aws-cli/customize.py | 2 +- script/get-bazel/customize.py | 2 +- script/get-blis/customize.py | 2 - script/get-cl/customize.py | 16 +- script/get-cmake/customize.py | 2 +- script/get-conda/customize.py | 2 +- script/get-cuda-devices/customize.py | 10 +- script/get-cuda-devices/detect.py | 1 - script/get-cuda/customize.py | 4 +- script/get-cudnn/customize.py | 158 +++++------ script/get-dataset-coco/customize.py | 32 +-- script/get-dataset-coco2014/customize.py | 2 +- .../get-dataset-imagenet-helper/customize.py | 4 +- script/get-dataset-imagenet-val/customize.py | 1 - .../customize.py | 4 +- .../customize.py | 4 +- .../generate-test-dataset.py | 2 +- .../customize.py | 2 +- .../customize.py | 8 +- script/get-dlrm/customize.py | 1 - script/get-docker/customize.py | 2 +- .../get-generic-python-lib/detect-version.py | 2 +- script/get-generic-sys-util/customize.py | 16 +- script/get-git-repo/customize.py | 2 +- script/get-github-cli/customize.py | 16 +- script/get-go/customize.py | 2 +- script/get-google-saxml/customize.py | 4 +- script/get-google-test/customize.py | 4 +- script/get-ipol-src/customize.py | 8 +- script/get-java/customize.py | 64 ++--- script/get-javac/customize.py | 68 ++--- script/get-llvm/customize.py | 2 +- .../get-ml-model-3d-unet-kits19/customize.py | 4 +- .../customize.py | 1 - .../customize.py | 4 +- script/get-ml-model-gptj/convert_gptj_ckpt.py | 252 ++++++++-------- .../download_model.py | 128 ++++----- script/get-ml-model-mobilenet/customize.py | 4 +- .../get-ml-model-neuralmagic-zoo/customize.py | 6 +- .../nvidia_patch_retinanet_efficientnms.py | 2 +- script/get-ml-model-retinanet/customize.py | 1 - .../node-precision-info.py | 2 +- script/get-ml-model-rnnt/customize.py | 4 +- script/get-ml-model-tiny-resnet/customize.py | 1 - script/get-mlperf-inference-src/customize.py | 14 +- .../customize.py | 12 +- .../get_memory_info.py | 3 +- .../mlperf_utils.py | 8 +- script/get-nvidia-mitten/customize.py | 4 +- script/get-onnxruntime-prebuilt/customize.py | 14 +- script/get-platform-details/customize.py | 6 +- .../src/generic_preprocess.py | 2 +- .../preprocess_object_detection_dataset.py | 1 - .../customize.py | 4 +- .../preprocess.py | 2 +- .../nvidia_preprocess.py | 2 +- script/get-python3/customize.py | 2 +- script/get-qaic-software-kit/customize.py | 2 +- script/get-rclone-config/customize.py | 2 +- script/get-rclone/customize.py | 10 +- script/get-rocm-devices/customize.py | 10 +- script/get-rocm/customize.py | 2 +- script/get-sys-utils-cm/customize.py | 16 +- script/get-sys-utils-min/customize.py | 14 +- script/get-tensorrt/customize.py | 130 ++++----- script/get-terraform/customize.py | 2 +- script/get-tvm-model/customize.py | 6 +- script/get-tvm-model/process.py | 34 +-- script/gui/app.py | 14 +- script/gui/customize.py | 4 +- script/gui/graph.py | 100 +++---- script/gui/misc.py | 53 ++-- script/gui/playground.py | 8 +- script/gui/playground_apps.py | 2 +- script/gui/playground_beta.py | 8 +- script/gui/playground_challenges.py | 78 ++--- .../gui/playground_challenges_with_prizes.py | 46 +-- script/gui/playground_contributors.py | 16 +- script/gui/playground_howtorun.py | 46 +-- script/gui/playground_install.py | 38 +-- script/gui/playground_reports.py | 4 +- script/gui/playground_reproduce.py | 52 ++-- script/gui/playground_scripts.py | 64 ++--- script/gui/script.py | 268 +++++++++--------- .../customize.py | 10 +- .../customize.py | 28 +- .../customize.py | 12 +- script/install-bazel/customize.py | 14 +- script/install-cmake-prebuilt/customize.py | 26 +- .../customize.py | 2 +- script/install-llvm-prebuilt/customize.py | 132 ++++----- .../customize.py | 2 +- script/install-python-venv/customize.py | 2 +- .../customize.py | 2 +- script/launch-benchmark/customize.py | 173 ++++++----- .../prepare-training-data-bert/customize.py | 2 +- .../prepare-training-data-resnet/customize.py | 2 +- .../customize.py | 2 +- script/print-any-text/customize.py | 3 +- script/print-croissant-desc/code.py | 4 +- script/process-ae-users/code.py | 4 +- script/process-mlperf-accuracy/customize.py | 15 +- script/publish-results-to-dashboard/code.py | 6 +- script/remote-run-commands/customize.py | 2 - script/run-all-mlperf-models/customize.py | 2 +- script/run-docker-container/customize.py | 12 +- script/run-mlperf-inference-app/customize.py | 70 ++--- .../run-mlperf-inference-app/run_mobilenet.py | 3 - .../customize.py | 4 +- script/run-mlperf-power-client/customize.py | 4 +- .../customize.py | 2 +- script/run-vllm-server/customize.py | 2 +- script/runtime-system-infos/customize.py | 6 +- script/set-sqlite-dir/code.py | 1 - script/set-venv/customize.py | 2 +- script/test-cm-core/src/script/test_docker.py | 16 +- .../src/tutorials/test_tutorial_tvm.py | 1 - .../src/tutorials/test_tutorial_tvm_pip_ge.py | 1 - .../src/tutorials/test_tutorial_tvm_pip_vm.py | 1 - script/test-debug/python/main.py | 1 - setup.py | 16 +- tests/script/check.py | 1 - tests/script/test_docker.py | 10 +- tests/tutorials/test_tutorial_tvm.py | 1 - tests/tutorials/test_tutorial_tvm_pip_ge.py | 1 - tests/tutorials/test_tutorial_tvm_pip_vm.py | 1 - 191 files changed, 1909 insertions(+), 1944 deletions(-) diff --git a/automation/cache/module.py b/automation/cache/module.py index 0f0f2be75f..ac2d141131 100644 --- a/automation/cache/module.py +++ b/automation/cache/module.py @@ -18,7 +18,7 @@ def test(self, i): Test automation Args: - (CM input dict): + (CM input dict): (out) (str): if 'con', output to console @@ -57,7 +57,7 @@ def show(self, i): Show cache Args: - (CM input dict): + (CM input dict): (out) (str): if 'con', output to console @@ -77,7 +77,7 @@ def show(self, i): # Check parsed automation if 'parsed_automation' not in i: - return {'return':1, 'error':'automation is not specified'} + return {'return':1, 'error':'automation is not specified'} console = i.get('out') == 'con' diff --git a/automation/cache/module_misc.py b/automation/cache/module_misc.py index cc4a6ac31b..cfb0e8e84d 100644 --- a/automation/cache/module_misc.py +++ b/automation/cache/module_misc.py @@ -1,6 +1,6 @@ import os from cmind import utils - + ############################################################ def copy_to_remote(i): diff --git a/automation/cfg/module.py b/automation/cfg/module.py index f970c7bb26..9dea0683ab 100644 --- a/automation/cfg/module.py +++ b/automation/cfg/module.py @@ -22,7 +22,7 @@ def test(self, i): Test automation Args: - (CM input dict): + (CM input dict): (out) (str): if 'con', output to console @@ -61,7 +61,7 @@ def xset(self, i): Set keys in configuration Args: - (CM input dict): + (CM input dict): (out) (str): if 'con', output to console @@ -105,16 +105,16 @@ def check_to_delete(d): check_to_delete(v) else: if k.endswith('-'): - if k[:-1] in d: - del(d[k[:-1]]) - del(d[k]) + if k[:-1] in d: + del(d[k[:-1]]) + del(d[k]) else: - vsl = str(v).lower() - if vsl == 'none': v = None - elif vsl == 'false': v = False - elif vsl == 'true': v = True + vsl = str(v).lower() + if vsl == 'none': v = None + elif vsl == 'false': v = False + elif vsl == 'true': v = True - d[k]=v + d[k]=v utils.merge_dicts({'dict1':config, 'dict2':new_config, 'append_lists':True, 'append_unique':True}) @@ -136,7 +136,7 @@ def load(self, i): Load configuration Args: - (CM input dict): + (CM input dict): (out) (str): if 'con', output to console @@ -160,7 +160,7 @@ def load(self, i): def _find_cfg_artifact(self, i): """ Args: - (CM input dict): + (CM input dict): (out) (str): if 'con', output to console @@ -188,7 +188,7 @@ def _find_cfg_artifact(self, i): artifact = i.get('artifact', '') - if artifact == '': + if artifact == '': ii['artifact'] = 'default' tags = ii.get('tags', '') @@ -200,11 +200,11 @@ def _find_cfg_artifact(self, i): ii['tags'] = tags automation = ii['automation'] - if automation!='.' and ',' not in automation: + if automation!='.' and ',' not in automation: ii['automation'] = automation + ',' + self.meta['uid'] # Add placeholder (use common action) - + ii['action']='find' ii['out']='' ii['common']=True # Avoid recursion - use internal CM add function to add the script artifact diff --git a/automation/challenge/module.py b/automation/challenge/module.py index be8d6e7b1d..c6e81eed4c 100644 --- a/automation/challenge/module.py +++ b/automation/challenge/module.py @@ -18,7 +18,7 @@ def test(self, i): Test automation Args: - (CM input dict): + (CM input dict): (out) (str): if 'con', output to console diff --git a/automation/contributor/module.py b/automation/contributor/module.py index 82807638f8..89fee477f0 100644 --- a/automation/contributor/module.py +++ b/automation/contributor/module.py @@ -18,7 +18,7 @@ def test(self, i): Test automation Args: - (CM input dict): + (CM input dict): (out) (str): if 'con', output to console @@ -57,7 +57,7 @@ def add(self, i): Add CM script Args: - (CM input dict): + (CM input dict): (out) (str): if 'con', output to console @@ -97,9 +97,9 @@ def add(self, i): if r['return']>0: return r elif r['return']==0 and len(r['list'])>0: return {'return':1, 'error':'CM artifact with name {} already exists in {}'.format(name, r['list'][0].path)} - + meta = i.get('meta',{}) - + # Prepare meta org = meta.get('organization','') if org=='': @@ -128,7 +128,7 @@ def add(self, i): i['action'] = 'add' i['automation'] = self_automation i['artifact'] = artifact - + i['meta'] = meta print ('') @@ -139,7 +139,7 @@ def add(self, i): path = r['path'] path2 = os.path.dirname(path) - + print ('') print ('Please go to {}, add your directory to Git, commit and create PR:'.format(path2)) print ('') diff --git a/automation/data/module.py b/automation/data/module.py index be8d6e7b1d..c6e81eed4c 100644 --- a/automation/data/module.py +++ b/automation/data/module.py @@ -18,7 +18,7 @@ def test(self, i): Test automation Args: - (CM input dict): + (CM input dict): (out) (str): if 'con', output to console diff --git a/automation/docker/module.py b/automation/docker/module.py index aaf0f7802c..9e5339bd0a 100644 --- a/automation/docker/module.py +++ b/automation/docker/module.py @@ -18,7 +18,7 @@ def test(self, i): Test automation Args: - (CM input dict): + (CM input dict): (out) (str): if 'con', output to console diff --git a/automation/docs/module.py b/automation/docs/module.py index be8d6e7b1d..c6e81eed4c 100644 --- a/automation/docs/module.py +++ b/automation/docs/module.py @@ -18,7 +18,7 @@ def test(self, i): Test automation Args: - (CM input dict): + (CM input dict): (out) (str): if 'con', output to console diff --git a/automation/experiment/module.py b/automation/experiment/module.py index 57fa6f6458..1ea8d43022 100644 --- a/automation/experiment/module.py +++ b/automation/experiment/module.py @@ -1,4 +1,4 @@ -# Universal experiment automation to support universal benchmarking +# Universal experiment automation to support universal benchmarking # and optimization of apps and systems # # Written by Grigori Fursin @@ -19,7 +19,7 @@ class CAutomation(Automation): CM_RESULT_FILE = 'cm-result.json' CM_INPUT_FILE = 'cm-input.json' CM_OUTPUT_FILE = 'cm-output.json' - + ############################################################ def __init__(self, cmind, automation_file): super().__init__(cmind, __file__) @@ -30,7 +30,7 @@ def test(self, i): Test automation Args: - (CM input dict): + (CM input dict): (out) (str): if 'con', output to console @@ -63,16 +63,16 @@ def test(self, i): return {'return':0} - - - + + + ############################################################ def run(self, i): """ Run experiment Args: - (CM input dict): + (CM input dict): (out) (str): if 'con', output to console @@ -80,8 +80,8 @@ def run(self, i): (tags) (str): experiment tags separated by comma (dir) (str): force recording into a specific directory - - + + (script) (str): find and run CM script by name (s) @@ -110,11 +110,11 @@ def run(self, i): # Find or add artifact based on repo/alias/tags r = self._find_or_add_artifact(i) if r['return']>0: return r - + experiment = r['experiment'] - + console = i.get('out','')=='con' - + # Print experiment folder experiment_path = experiment.path @@ -140,7 +140,7 @@ def run(self, i): print ('Select experiment:') datetimes = sorted(datetimes) - + num = 0 print ('') for d in datetimes: @@ -163,7 +163,7 @@ def run(self, i): datetime = datetimes[selection] - + if datetime!='': experiment_path2 = os.path.join(experiment_path, datetime) else: @@ -207,10 +207,10 @@ def run(self, i): r = utils.save_json(file_name=experiment_input_file, meta=ii_copy) if r['return']>0: return r - + # Prepare run command cmd = '' - + unparsed = i.get('unparsed_cmd', []) if len(unparsed)>0: for u in unparsed: @@ -229,32 +229,32 @@ def run(self, i): # Prepare exploration # Note that from Python 3.7, dictionaries are ordered so we can define order for exploration in json/yaml # ${{XYZ}} ${{ABC(range(1,2,3))}} - + # Extract exploration expressions from {{VAR{expression}}} explore = i.get('explore', {}) j = 1 k = 0 while j>=0: - j = cmd.find('}}}', k) - if j>=0: - k = j+1 + j = cmd.find('}}}', k) + if j>=0: + k = j+1 - l = cmd.rfind('{{',0, j) + l = cmd.rfind('{{',0, j) - if l>=0: - l2 = cmd.find('{', l+2, j) - if l2>=0: - k = l2+1 + if l>=0: + l2 = cmd.find('{', l+2, j) + if l2>=0: + k = l2+1 - var = cmd[l+2:l2] - expr = cmd[l2+1:j] + var = cmd[l+2:l2] + expr = cmd[l2+1:j] - explore[var] = expr + explore[var] = expr + + cmd = cmd[:l2]+ cmd[j+1:] - cmd = cmd[:l2]+ cmd[j+1:] - # Separate Design Space Exploration into var and range explore_keys=[] explore_dimensions=[] @@ -281,9 +281,9 @@ def run(self, i): ii_copy = copy.deepcopy(ii) for dimensions in steps: - + step += 1 - + print ('================================================================') print ('Experiment step: {} out of {}'.format(step, num_steps)) @@ -330,9 +330,9 @@ def run(self, i): # Prepare and run experiment in a given placeholder directory os.chdir(experiment_path3) - + ii['env'] = env - + # Change only in CMD env_local={'CD':cur_dir, 'CM_EXPERIMENT_STEP':str(step), @@ -340,38 +340,38 @@ def run(self, i): 'CM_EXPERIMENT_PATH2':experiment_path2, 'CM_EXPERIMENT_PATH3':experiment_path3} - + # Update {{}} in CMD cmd_step = cmd - + j = 1 k = 0 while j>=0: - j = cmd_step.find('{{', k) - if j>=0: - k = j - l = cmd_step.find('}}',j+2) - if l>=0: - var = cmd_step[j+2:l] + j = cmd_step.find('{{', k) + if j>=0: + k = j + l = cmd_step.find('}}',j+2) + if l>=0: + var = cmd_step[j+2:l] - # Such vars must be in env - if var not in env and var not in env_local: - return {'return':1, 'error':'key "{}" is not in env during exploration'.format(var)} + # Such vars must be in env + if var not in env and var not in env_local: + return {'return':1, 'error':'key "{}" is not in env during exploration'.format(var)} - if var in env: - value = env[var] - else: - value = env_local[var] + if var in env: + value = env[var] + else: + value = env_local[var] - cmd_step = cmd_step[:j] + str(value) + cmd_step[l+2:] + cmd_step = cmd_step[:j] + str(value) + cmd_step[l+2:] ii['command'] = cmd_step - + print ('Generated CMD:') print ('') print (cmd_step) print ('') - + # Prepare experiment step input experiment_step_input_file = os.path.join(experiment_path3, self.CM_INPUT_FILE) @@ -401,7 +401,7 @@ def run(self, i): result = flatten_result except: pass - + # Add extra info result['uid'] = uid result['iso_datetime'] = current_datetime @@ -420,46 +420,46 @@ def run(self, i): r = utils.save_json(file_name=experiment_result_file, meta = all_results) if r['return']>0: return r - + rr = {'return':0, 'experiment_path':experiment_path, 'experiment_path2':experiment_path2} - + return rr - - + + ############################################################ def rerun(self, i): """ Rerun experiment - + cm run experiment --rerun=True ... """ i['rerun']=True return self.run(i) - - - - - - - - - - - - + + + + + + + + + + + + ############################################################ def replay(self, i): """ Replay experiment Args: - (CM input dict): + (CM input dict): (out) (str): if 'con', output to console @@ -485,11 +485,11 @@ def replay(self, i): i['fail_if_not_found']=True r = self._find_or_add_artifact(i) if r['return']>0: return r - + experiment = r['experiment'] - + console = i.get('out','')=='con' - + # Print experiment folder experiment_path = experiment.path @@ -510,7 +510,7 @@ def replay(self, i): if len(datetimes)==0: return {'return':1, 'error':'experiment(s) not found in {}'.format(experiment_path)} - + # Check datetime directory found_result = {} @@ -524,7 +524,7 @@ def replay(self, i): datetime = d experiment_path2 = os.path.join(experiment_path, datetime) break - + if len(found_result)==0: return {'return':1, 'error':'couldn\'t find result with UID {} in {}'.format(uid, experiment_path)} @@ -536,7 +536,7 @@ def replay(self, i): print ('Available experiments:') datetimes = sorted(datetimes) - + num = 0 print ('') for d in datetimes: @@ -545,7 +545,7 @@ def replay(self, i): if not console: return {'return':1, 'error':'more than 1 experiment found.\nPlease use "cm run experiment --dir={date and time}"'} - + print ('') x=input('Make your selection or press Enter for 0: ') @@ -558,7 +558,7 @@ def replay(self, i): selection = 0 datetime = datetimes[selection] - + # Final path to experiment experiment_path2 = os.path.join(experiment_path, datetime) @@ -581,7 +581,7 @@ def replay(self, i): print ('Available Unique IDs of results:') results = sorted(results, key=lambda x: x.get('uid','')) - + num = 0 print ('') for r in results: @@ -604,7 +604,7 @@ def replay(self, i): found_result = results[selection] uid = found_result['uid'] - + # Final info if console: print ('') @@ -629,21 +629,21 @@ def replay(self, i): if tags!='': tags+=',' tags+='replay' cm_input['tags'] = tags - + if console: print ('') print ('Experiment input:') print ('') print (json.dumps(cm_input, indent=2)) print ('') - + # Run experiment again r = self.cmind.access(cm_input) if r['return']>0: return r # TBA - validate experiment, etc ... - - + + return {'return':0} @@ -653,7 +653,7 @@ def _find_or_add_artifact(self, i): Find or add experiment artifact (reused in run and reply) Args: - (CM input dict): + (CM input dict): (fail_if_not_found) (bool) - if True, fail if experiment is not found @@ -666,7 +666,7 @@ def _find_or_add_artifact(self, i): * (error) (str): error string if return>0 experiment (CM artifact class): Experiment artifact - + """ console = i.get('out','')=='con' @@ -692,7 +692,7 @@ def _find_or_add_artifact(self, i): print ('More than 1 experiment artifact found:') lst = sorted(lst, key=lambda x: x.path) - + num = 0 print ('') for e in lst: @@ -702,7 +702,7 @@ def _find_or_add_artifact(self, i): if not console: return {'return':1, 'error':'more than 1 experiment artifact found.\nPlease use "cm run experiment {name}" or "cm run experiment --tags={tags separated by comma}"'} - + print ('') x=input('Make your selection or press Enter for 0: ') @@ -719,10 +719,10 @@ def _find_or_add_artifact(self, i): elif len(lst)==1: experiment = lst[0] else: - # Create new entry + # Create new entry if i.get('fail_if_not_found',False): return {'return':1, 'error':'experiment not found'} - + ii = copy.deepcopy(ii_copy) ii['action']='add' r = self.cmind.access(ii) @@ -740,7 +740,7 @@ def _find_or_add_artifact(self, i): return {'return':1, 'error':'created experiment artifact with UID {} but can\'t find it - weird'.format(experiment_uid)} experiment = lst[0] - + return {'return':0, 'experiment':experiment} ############################################################ @@ -749,7 +749,7 @@ def _find_uid(self, i): Find experiment result with a given UID Args: - (CM input dict): + (CM input dict): path (str): path to experiment artifact datetime (str): sub-path to experiment @@ -766,7 +766,7 @@ def _find_uid(self, i): path_to_file (str): path to experiment result file meta (dict): complete list of all results result (dict): result dictionary with a given UID - + """ path = i['path'] @@ -802,8 +802,8 @@ def flatten_dict(d, flat_dict = {}, prefix = ''): v = d[k] if type(v) is dict: - flatten_dict(v, flat_dict, prefix+k+'.') + flatten_dict(v, flat_dict, prefix+k+'.') else: - flat_dict[prefix+k] = v + flat_dict[prefix+k] = v return flat_dict diff --git a/automation/project/module.py b/automation/project/module.py index be8d6e7b1d..c6e81eed4c 100644 --- a/automation/project/module.py +++ b/automation/project/module.py @@ -18,7 +18,7 @@ def test(self, i): Test automation Args: - (CM input dict): + (CM input dict): (out) (str): if 'con', output to console diff --git a/automation/report/module.py b/automation/report/module.py index be8d6e7b1d..c6e81eed4c 100644 --- a/automation/report/module.py +++ b/automation/report/module.py @@ -18,7 +18,7 @@ def test(self, i): Test automation Args: - (CM input dict): + (CM input dict): (out) (str): if 'con', output to console diff --git a/automation/script/module.py b/automation/script/module.py index 84572e146b..7bc4afb558 100644 --- a/automation/script/module.py +++ b/automation/script/module.py @@ -728,14 +728,14 @@ def _run(self, i): # Prune cache list with the selected script if len(list_of_found_scripts) > 0: - script_artifact_uid = list_of_found_scripts[select_script].meta['uid'] + script_artifact_uid = list_of_found_scripts[select_script].meta['uid'] - new_cache_list = [] - for cache_entry in cache_list: - if cache_entry.meta['associated_script_artifact_uid'] == script_artifact_uid: - new_cache_list.append(cache_entry) + new_cache_list = [] + for cache_entry in cache_list: + if cache_entry.meta['associated_script_artifact_uid'] == script_artifact_uid: + new_cache_list.append(cache_entry) - cache_list = new_cache_list + cache_list = new_cache_list # Here a specific script is found and meta obtained # Set some useful local variables @@ -1531,9 +1531,9 @@ def _run(self, i): cached_tags.append('version-' + r['version']) if len(r.get('add_extra_cache_tags',[]))>0: - for t in r['add_extra_cache_tags']: - if t not in cached_tags: - cached_tags.append(t) + for t in r['add_extra_cache_tags']: + if t not in cached_tags: + cached_tags.append(t) if print_env: @@ -1721,7 +1721,7 @@ def _run(self, i): if len(script_prefix)>0: env_script.insert(where_to_add, '\n') for x in reversed(script_prefix): - env_script.insert(where_to_add, x) + env_script.insert(where_to_add, x) if shell: x=['cmd', '.', '','.bat',''] if os_info['platform'] == 'windows' else ['bash', ' ""', '"','.sh','. ./'] @@ -1749,7 +1749,7 @@ def _run(self, i): os.system(x) if not version and detected_version: - version = detected_version + version = detected_version # Add detected or forced version to the CM script run time state # to aggregate all resolved versions and dump them at the end @@ -1804,17 +1804,17 @@ def _run(self, i): docker_template_path = os.path.join(self.path, 'docker_repro_example') if os.path.isdir(docker_template_path): try: - shutil.copytree(docker_template_path, repro_dir, dirs_exist_ok=True) + shutil.copytree(docker_template_path, repro_dir, dirs_exist_ok=True) except Exception as e: - pass + pass docker_container = self._get_docker_container(cmd, run_state) try: - with open (os.path.join(repro_dir, 'ubuntu-23.04.Dockerfile'), 'a+') as f: - f.write(docker_container) + with open (os.path.join(repro_dir, 'ubuntu-23.04.Dockerfile'), 'a+') as f: + f.write(docker_container) except: - pass + pass if print_readme: with open('README-cm.md', 'w') as f: @@ -1838,7 +1838,7 @@ def _run(self, i): if repro_prefix !='': with open(repro_prefix+'-README-cm.md', 'w', encoding='utf-8') as f: - f.write(readme) + f.write(readme) dump_repro(repro_prefix, rr, run_state) @@ -1956,7 +1956,7 @@ def _update_state_from_variations(self, i, meta, variation_tags, variations, env run_state['variation_groups'] = variation_groups # Add variation(s) if specified in the "tags" input prefixed by _ - # If there is only 1 default variation, then just use it or substitute from CMD + # If there is only 1 default variation, then just use it or substitute from CMD default_variation = meta.get('default_variation', '') @@ -2368,7 +2368,7 @@ def search(self, i): for script in r['list']: # This should not be logging since the output can be consumed by other external tools and scripts # logging.info(script.path) - print (script.path) + print (script.path) # Finalize output r['script_tags'] = script_tags @@ -2415,7 +2415,7 @@ def test(self, i): # Check parsed automation if 'parsed_automation' not in i: - return {'return':1, 'error':'automation is not specified'} + return {'return':1, 'error':'automation is not specified'} console = i.get('out') == 'con' @@ -2461,7 +2461,7 @@ def test(self, i): run_inputs = run_inputs[index_index-1:] else: run_inputs = [ run_inputs[input_index - 1] ] - + for run_input in run_inputs: if test_input_id: if run_input.get('id', '') != test_input_id: @@ -2648,11 +2648,11 @@ def add(self, i): script_name = '' if 'script_name' in i: - script_name = i.get('script_name','').strip() - del(i['script_name']) + script_name = i.get('script_name','').strip() + del(i['script_name']) - if script_name != '' and not os.path.isfile(script_name): - return {'return':1, 'error':'file {} not found'.format(script_name)} + if script_name != '' and not os.path.isfile(script_name): + return {'return':1, 'error':'file {} not found'.format(script_name)} # Move tags from input to meta of the newly created script artifact tags_list = utils.convert_tags_to_list(i) @@ -2678,10 +2678,10 @@ def add(self, i): template = i.get('template','') if template == '': - if i.get('python', False): - template = 'python' - elif i.get('pytorch', False): - template = 'pytorch' + if i.get('python', False): + template = 'python' + elif i.get('pytorch', False): + template = 'pytorch' if template!='': template_dir += '-'+template @@ -3475,9 +3475,9 @@ def find_file_in_paths(self, i): if not duplicate: skip = False if hook!=None: - r=hook({'file':f}) - if r['return']>0: return r - skip = r['skip'] + r=hook({'file':f}) + if r['return']>0: return r + skip = r['skip'] if not skip: found_files.append(f) @@ -3515,9 +3515,9 @@ def find_file_in_paths(self, i): if not duplicate: skip = False if hook!=None: - r=hook({'file':f}) - if r['return']>0: return r - skip = r['skip'] + r=hook({'file':f}) + if r['return']>0: return r + skip = r['skip'] if not skip: found_files.append(f) @@ -3561,27 +3561,27 @@ def find_file_in_paths(self, i): run_script_input['recursion_spaces'] = recursion_spaces if rx['return']>0: - if rx['return'] != 2: - return rx + if rx['return'] != 2: + return rx else: - # Version was detected - detected_version = rx.get('version','') - - if detected_version != '': - if detected_version == -1: - logging.info(recursion_spaces + ' SKIPPED due to incompatibility ...') - else: - ry = check_version_constraints({'detected_version': detected_version, - 'version': version, - 'version_min': version_min, - 'version_max': version_max, - 'cmind':self.cmind}) - if ry['return']>0: return ry - - if not ry['skip']: - found_files_with_good_version.append(path_to_file) - else: - logging.info(recursion_spaces + ' SKIPPED due to version constraints ...') + # Version was detected + detected_version = rx.get('version','') + + if detected_version != '': + if detected_version == -1: + logging.info(recursion_spaces + ' SKIPPED due to incompatibility ...') + else: + ry = check_version_constraints({'detected_version': detected_version, + 'version': version, + 'version_min': version_min, + 'version_max': version_max, + 'cmind':self.cmind}) + if ry['return']>0: return ry + + if not ry['skip']: + found_files_with_good_version.append(path_to_file) + else: + logging.info(recursion_spaces + ' SKIPPED due to version constraints ...') found_files = found_files_with_good_version @@ -3669,19 +3669,19 @@ def detect_version_using_script(self, i): run_script_input['recursion_spaces'] = recursion_spaces if rx['return'] == 0: - # Version was detected - detected_version = rx.get('version','') + # Version was detected + detected_version = rx.get('version','') - if detected_version != '': - ry = check_version_constraints({'detected_version': detected_version, - 'version': version, - 'version_min': version_min, - 'version_max': version_max, - 'cmind':self.cmind}) - if ry['return']>0: return ry + if detected_version != '': + ry = check_version_constraints({'detected_version': detected_version, + 'version': version, + 'version_min': version_min, + 'version_max': version_max, + 'cmind':self.cmind}) + if ry['return']>0: return ry - if not ry['skip']: - return {'return':0, 'detected_version':detected_version} + if not ry['skip']: + return {'return':0, 'detected_version':detected_version} return {'return':16, 'error':'version was not detected'} @@ -3878,14 +3878,14 @@ def find_file_deep(self, i): p2 = os.path.join(p, f) if os.path.isdir(p2): - r = self.find_file_deep({'paths':[p2], 'file_name': file_name, 'restrict_paths':restrict_paths}) - if r['return']>0: return r + r = self.find_file_deep({'paths':[p2], 'file_name': file_name, 'restrict_paths':restrict_paths}) + if r['return']>0: return r - found_paths += r['found_paths'] + found_paths += r['found_paths'] else: - if f == file_name: - found_paths.append(p) - break + if f == file_name: + found_paths.append(p) + break if len(found_paths) > 0 and len(restrict_paths) > 0: filtered_found_paths = [] @@ -3982,12 +3982,12 @@ def parse_version(self, i): match_text = match_text, fail_if_no_match = 'version was not detected') if r['return']>0: - if r.get('string','')!='': - r['error'] += ' ({})'.format(r['string']) - return r + if r.get('string','')!='': + r['error'] += ' ({})'.format(r['string']) + return r string = r['string'] - + if r['match'].lastindex and r['match'].lastindex >= group_number: version = r['match'].group(group_number) else: @@ -4196,7 +4196,7 @@ def docker(self, i): (docker_shm_size) (docker_extra_run_args) - + Returns: (CM return dict): @@ -4255,10 +4255,10 @@ def clean_some_tmp_files(self, i): cur_work_dir = env.get('CM_TMP_CURRENT_SCRIPT_WORK_PATH','') if cur_work_dir !='' and os.path.isdir(cur_work_dir): - for x in ['tmp-run.bat', 'tmp-state.json']: - xx = os.path.join(cur_work_dir, x) - if os.path.isfile(xx): - os.remove(xx) + for x in ['tmp-run.bat', 'tmp-state.json']: + xx = os.path.join(cur_work_dir, x) + if os.path.isfile(xx): + os.remove(xx) return {'return':0} @@ -4777,15 +4777,15 @@ def prepare_and_run_script_with_postprocessing(i, postprocess="postprocess"): # Check if print files when error print_files = meta.get('print_files_if_script_error', []) if len(print_files)>0: - for pr in print_files: - if os.path.isfile(pr): - r = utils.load_txt(file_name = pr) - if r['return'] == 0: - logging.info("========================================================") - logging.info("Print file {}:".format(pr)) - logging.info("") - logging.info(r['string']) - logging.info("") + for pr in print_files: + if os.path.isfile(pr): + r = utils.load_txt(file_name = pr) + if r['return'] == 0: + logging.info("========================================================") + logging.info("Print file {}:".format(pr)) + logging.info("") + logging.info(r['string']) + logging.info("") # Check where to report errors and failures diff --git a/automation/script/module_help.py b/automation/script/module_help.py index e27d756877..5094207d5d 100644 --- a/automation/script/module_help.py +++ b/automation/script/module_help.py @@ -1,6 +1,6 @@ import os from cmind import utils - + # Pring help about script def print_help(i): @@ -44,8 +44,8 @@ def print_help(i): v = input_description[k] if v.get('sort',0)>0: sorted_keys.append(k) - - + + print ('') print ('Available flags (Python API dict keys):') print ('') @@ -94,7 +94,7 @@ def print_help(i): r = {'return':0} - if skip_delayed_help: + if skip_delayed_help: r['skip_delayed_help'] = True return r diff --git a/automation/script/module_misc.py b/automation/script/module_misc.py index eab3909bf3..fe6e6aacd8 100644 --- a/automation/script/module_misc.py +++ b/automation/script/module_misc.py @@ -1,6 +1,6 @@ import os from cmind import utils - + # Meta deps def process_deps(self_module, meta, meta_url, md_script_readme, key, extra_space='', skip_from_meta=False, skip_if_empty=False): @@ -32,7 +32,7 @@ def process_deps(self_module, meta, meta_url, md_script_readme, key, extra_space y.append(extra_space+' * {}:
\n`{}`'.format(k2, str(conditions))) if len(names)>0: - y.append(extra_space+' * CM names: `--adr.'+str(names)+'...`') + y.append(extra_space+' * CM names: `--adr.'+str(names)+'...`') # Attempt to find related CM scripts @@ -79,7 +79,7 @@ def doc(i): Add CM automation. Args: - (CM input dict): + (CM input dict): (out) (str): if 'con', output to console @@ -130,7 +130,7 @@ def doc(i): # Search for automations in repos lst = [] - + for repo in list_of_repos: parsed_artifact[1] = ('',repo) if utils.is_cm_uid(repo) else (repo,'') ii['parsed_artifact'] = parsed_artifact @@ -229,12 +229,12 @@ def doc(i): readme_about = '' if os.path.isfile(path_readme_about): r = utils.load_txt(path_readme_about, split = True) - if r['return']>0: return - + if r['return']>0: return + s = r['string'] readme_about = r['list'] - + ####################################################################### # Start automatically generated README md_script_readme = [ @@ -243,7 +243,7 @@ def doc(i): # '{{CM_README_TOC}}', # '', # '', - 'Automatically generated README for this automation recipe: **{}**'.format(meta['alias']), + 'Automatically generated README for this automation recipe: **{}**'.format(meta['alias']), ] @@ -268,19 +268,19 @@ def doc(i): # md_script_readme.append('*'+name+'*') # md_script_readme.append('') - - - + + + if os.path.isfile(path_readme): r = utils.load_txt(path_readme, split = True) - if r['return']>0: return - + if r['return']>0: return + s = r['string'] readme = r['list'] if not 'automatically generated' in s.lower(): found_path_readme_extra = True - + # Attempt to rename to README-extra.md if os.path.isfile(path_readme_extra): return {'return':1, 'error':'README.md is not auto-generated and README-extra.md already exists - can\'t rename'} @@ -291,17 +291,17 @@ def doc(i): os.chdir(path) os.system('git add README-extra.md') os.chdir(cur_dir) - + if category!='': md_script_readme.append('') md_script_readme.append('Category: **{}**'.format(category)) - + md_script_readme.append('') md_script_readme.append('License: **Apache 2.0**') - + md_script_readme.append('') if developers == '': @@ -312,9 +312,9 @@ def doc(i): x = '* [{}]({})'.format(alias, url) if name !='': x+=' *('+name+')*' toc.append(x) - - + + cm_readme_extra = '[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name={},{}) ] '.format(alias, uid) if os.path.isfile(path_readme_extra): @@ -346,7 +346,7 @@ def doc(i): ] toc_readme.append(x) - + # if category != '': # x = 'Category' # md_script_readme.append('___') @@ -369,7 +369,7 @@ def doc(i): md_script.append(x) md_script_readme.append(x) - + x = '* GitHub directory for this script: *[GitHub]({})*'.format(url) md_script.append(x) md_script_readme.append(x) @@ -402,7 +402,7 @@ def doc(i): input_mapping_pointer="[--input_flags]" else: input_mapping_pointer='' - + cli_all_tags = '`cm run script --tags={}`'.format(','.join(tags)) cli_all_tags3 = '`cm run script --tags={}{} {}`'.format(','.join(tags), variation_pointer, input_mapping_pointer) x = '* CM CLI with all tags: {}*'.format(cli_all_tags) @@ -444,7 +444,7 @@ def doc(i): - + md_script.append('') # md_script_readme.append('') @@ -473,8 +473,8 @@ def doc(i): # '' ] - - + + # Add usage x1 = 'Reuse this script in your project' x1a = 'Install MLCommons CM automation meta-framework' @@ -517,7 +517,7 @@ def doc(i): # '3. {}'.format(cli_uid), ''] - + x = ' and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.' if len(variation_keys)>0: md_script_readme += ['* *See the list of `variations` [here](#variations)'+x+'*', @@ -558,10 +558,10 @@ def doc(i): x = '```python\nr=cm.access({... , "'+key0+'":...}\n```' md_script_readme.append(x) - - - - + + + + md_script_readme += ['#### '+x3, '', '
', @@ -607,16 +607,16 @@ def doc(i): toc_readme.append(' '+x3) toc_readme.append(' '+x3a) toc_readme.append(' '+x4) - + x = 'Customization' md_script_readme.append('___') md_script_readme.append('### '+x) md_script_readme.append('') toc_readme.append(x) - - - + + + if len(variation_keys)>0: # x = 'Variation groups' # md_script_readme.append('___') @@ -630,7 +630,7 @@ def doc(i): # Normally should not use anymore. Should use default:true inside individual variations. default_variation = meta.get('default_variation','') - + for variation_key in sorted(variation_keys): variation = variations[variation_key] @@ -638,7 +638,7 @@ def doc(i): if alias!='': aliases = variation_alias.get(alias, []) - if variation_key not in aliases: + if variation_key not in aliases: aliases.append(variation_key) variation_alias[alias]=aliases @@ -651,7 +651,7 @@ def doc(i): # Check outdated if default_variation == variation_key: default = True - + extra1 = '' extra2 = '' if default: @@ -662,7 +662,7 @@ def doc(i): md_var = [] - + md_var.append('* {}`_{}`{}'.format(extra1, variation_key, extra2)) variation_md[variation_key] = md_var @@ -681,7 +681,7 @@ def doc(i): variation_groups[group].append(variation_key) - + x = 'Variations' md_script_readme.append('') md_script_readme.append('#### '+x) @@ -691,7 +691,7 @@ def doc(i): for variation in sorted(variation_groups): if variation not in variation_groups_order: variation_groups_order.append(variation) - + for group_key in variation_groups_order: md_script_readme.append('') @@ -714,7 +714,7 @@ def doc(i): aliases = variation_alias.get(variation_key,[]) aliases2 = ['_'+v for v in aliases] - + if len(aliases)>0: xmd.append(' - Aliases: `{}`'.format(','.join(aliases2))) @@ -726,8 +726,8 @@ def doc(i): xmd.append(' - Workflow:') for dep in ['deps', 'prehook_deps', 'posthook_deps', 'post_deps']: - process_deps(self_module, variation, meta_url, xmd, dep, ' ', True, True) - + process_deps(self_module, variation, meta_url, xmd, dep, ' ', True, True) + for x in xmd: md_script_readme.append(' '+x) @@ -745,7 +745,7 @@ def doc(i): md_script_readme.append('') md_script_readme.append('') toc_readme.append(' '+x) - + for v in vvc: vv = ['_'+x for x in v] md_script_readme.append('* `'+','.join(vv)+'`') @@ -760,7 +760,7 @@ def doc(i): md_script_readme.append('`{}`'.format(','.join(dv))) - + # Check if has valid_variation_combinations vvc = meta.get('valid_variation_combinations', []) if len(vvc)>0: @@ -771,15 +771,15 @@ def doc(i): md_script_readme.append('') md_script_readme.append('') toc_readme.append(' '+x) - + for v in vvc: vv = ['_'+x for x in v] md_script_readme.append('* `'+','.join(vv)+'`') - - + + # Check input flags if input_mapping and len(input_mapping)>0: x = 'Script flags mapped to environment' @@ -807,8 +807,8 @@ def doc(i): md_script_readme.append('') md_script_readme.append('
') md_script_readme.append('') - - + + # Default environment default_env = meta.get('default_env',{}) @@ -831,13 +831,13 @@ def doc(i): md_script_readme.append('') md_script_readme.append('') md_script_readme.append('') - - - - + + + + if len(version_keys)>0 or default_version!='': x = 'Versions' # md_script_readme.append('___') @@ -853,7 +853,7 @@ def doc(i): md_script_readme.append('* `{}`'.format(version)) - + # Add workflow x = 'Dependencies on other CM scripts' md_script_readme += ['___', @@ -878,7 +878,7 @@ def doc(i): r = utils.load_txt(path_customize, split=True) if r['return']>0: return r - + customize = r['string'] customize_l = r['list'] @@ -895,18 +895,18 @@ def doc(i): # if 'def postprocess' in l: # found_postprocess = True # else: - j = l.find(' env[') - if j>=0: - j1 = l.find(']', j+4) - if j1>=0: - j2 = l.find('=',j1+1) - if j2>=0: - key2 = l[j+5:j1].strip() - key=key2[1:-1] - - if key.startswith('CM_') and 'TMP' not in key and key not in found_output_env: - found_output_env.append(key) - + j = l.find(' env[') + if j>=0: + j1 = l.find(']', j+4) + if j1>=0: + j2 = l.find('=',j1+1) + if j2>=0: + key2 = l[j+5:j1].strip() + key=key2[1:-1] + + if key.startswith('CM_') and 'TMP' not in key and key not in found_output_env: + found_output_env.append(key) + process_deps(self_module, meta, meta_url, md_script_readme, 'deps') x = '' @@ -917,7 +917,7 @@ def doc(i): md_script_readme.append((' 1. '+x+'Run "preprocess" function from {}'+x).format(y)) process_deps(self_module, meta, meta_url, md_script_readme, 'prehook_deps') - + # Check scripts files = os.listdir(path) x = '' @@ -930,7 +930,7 @@ def doc(i): md_script_readme.append((' 1. '+x+'Run native script if exists'+x).format(y)) md_script_readme += y - + process_deps(self_module, meta, meta_url, md_script_readme, 'posthook_deps') x = '' @@ -943,7 +943,7 @@ def doc(i): process_deps(self_module, meta, meta_url, md_script_readme, 'post_deps') # md_script_readme.append('') md_script_readme.append('') - + # New environment new_env_keys = meta.get('new_env_keys',[]) @@ -977,7 +977,7 @@ def doc(i): if add: found_output_env_filtered.append(key) - + x = 'New environment keys auto-detected from customize' md_script_readme.append('#### '+x) toc_readme.append(x) @@ -1017,7 +1017,7 @@ def doc(i): s = s.replace('{{CM_README_EXTRA}}', cm_readme_extra) # s = s.replace('{{CM_SEE_README_EXTRA}}', cm_see_readme_extra) s = s.replace('{{CM_README_TOC}}', toc_readme_string) - + r = utils.save_txt(path_readme, s) if r['return']>0: return r @@ -1054,7 +1054,7 @@ def doc(i): category_link = category.lower().replace(' ','-').replace('/','') toc_category_string += '* [{}](#{})\n'.format(category, category_link) - + # Load template r = utils.load_txt(os.path.join(self_module.path, template_file)) if r['return']>0: return r @@ -1087,12 +1087,12 @@ def doc(i): # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # This function takes in a host path and returns the absolute path on host and the container -# If mounts is passed, the function appends the host path and the container path to mounts in the form "host_path:container_path" +# If mounts is passed, the function appends the host path and the container path to mounts in the form "host_path:container_path" def update_path_for_docker(path, mounts=None, force_path_target=''): path_orig = '' path_target = '' - + if path!='': # and (os.path.isfile(path) or os.path.isdir(path)): path = os.path.abspath(path) @@ -1101,7 +1101,7 @@ def update_path_for_docker(path, mounts=None, force_path_target=''): if os.name == 'nt': from pathlib import PureWindowsPath, PurePosixPath - + x = PureWindowsPath(path_orig) path_target = str(PurePosixPath('/', *x.parts[1:])) @@ -1125,22 +1125,22 @@ def update_path_for_docker(path, mounts=None, force_path_target=''): if to_add: mounts.append(x) - + return (path_orig, path_target) ############################################################ def process_inputs(i): import copy - + i_run_cmd_arc = i['run_cmd_arc'] docker_settings = i['docker_settings'] mounts = i['mounts'] - + # Check if need to update/map/mount inputs and env i_run_cmd = copy.deepcopy(i_run_cmd_arc) - + def get_value_using_key_with_dots(d, k): v = None j = k.find('.') @@ -1167,7 +1167,7 @@ def get_value_using_key_with_dots(d, k): v = d.get(k) return v, d, k - + docker_input_paths = docker_settings.get('input_paths',[]) if len(i_run_cmd)>0: for k in docker_input_paths: @@ -1180,7 +1180,7 @@ def get_value_using_key_with_dots(d, k): if path_target!='': i_run_cmd2[k2] = path_target - + return {'return':0, 'run_cmd':i_run_cmd} @@ -1240,7 +1240,7 @@ def regenerate_script_cmd(i): skip_input_for_fake_run = docker_settings.get('skip_input_for_fake_run', []) add_quotes_to_keys = docker_settings.get('add_quotes_to_keys', []) - + def rebuild_flags(i_run_cmd, fake_run, skip_input_for_fake_run, add_quotes_to_keys, key_prefix): @@ -1253,7 +1253,7 @@ def rebuild_flags(i_run_cmd, fake_run, skip_input_for_fake_run, add_quotes_to_ke tags_position = keys.index('tags') del(keys[tags_position]) keys = ['tags']+keys - + for k in keys: # Assemble long key if dictionary long_key = key_prefix @@ -1261,12 +1261,12 @@ def rebuild_flags(i_run_cmd, fake_run, skip_input_for_fake_run, add_quotes_to_ke long_key+=k if fake_run and long_key in skip_input_for_fake_run: - continue + continue v = i_run_cmd[k] - + q = '\\"' if long_key in add_quotes_to_keys else '' - + if type(v)==dict: run_cmd += rebuild_flags(v, fake_run, skip_input_for_fake_run, add_quotes_to_keys, long_key) elif type(v)==list: @@ -1296,10 +1296,10 @@ def aux_search(i): inp = i['input'] repos = inp.get('repos','') -# Grigori Fursin remarked on 20240412 because this line prevents +# Grigori Fursin remarked on 20240412 because this line prevents # from searching for scripts in other public or private repositories. # Not sure why we enforce just 2 repositories -# +# # if repos == '': repos='internal,a4705959af8e447a' parsed_artifact = inp.get('parsed_artifact',[]) @@ -1537,7 +1537,7 @@ def dockerfile(i): env['CM_DOCKER_PRE_RUN_COMMANDS'] = docker_run_final_cmds docker_path = i.get('docker_path', '').strip() - if docker_path == '': + if docker_path == '': docker_path = script_path dockerfile_path = os.path.join(docker_path, 'dockerfiles', dockerfilename_suffix +'.Dockerfile') @@ -1675,7 +1675,7 @@ def docker(i): if type(i.get('docker', None)) == dict: # Grigori started cleaning and refactoring this code on 20240929 - # + # # 1. use --docker dictionary instead of --docker_{keys} if utils.compare_versions(current_cm_version, '2.3.8.1') >= 0: @@ -1729,13 +1729,13 @@ def docker(i): if type(docker_cfg) == bool or str(docker_cfg).lower() in ['true','yes']: docker_cfg= '' - r = self_module.cmind.access({'action':'select_cfg', - 'automation':'utils,dc2743f8450541e3', - 'tags':'basic,docker,configurations', - 'title':'docker', + r = self_module.cmind.access({'action':'select_cfg', + 'automation':'utils,dc2743f8450541e3', + 'tags':'basic,docker,configurations', + 'title':'docker', 'alias':docker_cfg, 'uid':docker_cfg_uid}) - if r['return'] > 0: + if r['return'] > 0: if r['return'] == 16: return {'return':1, 'error':'Docker configuration {} was not found'.format(docker_cfg)} return r @@ -1924,7 +1924,7 @@ def docker(i): for tmp_value in tmp_values: if tmp_value in env: host_env_key = tmp_value - new_host_mount = get_host_path(env[tmp_value]) + new_host_mount = get_host_path(env[tmp_value]) else:# we skip those mounts mounts[index] = None skip = True @@ -1950,12 +1950,12 @@ def docker(i): mounts[index] = new_host_mount+":"+new_container_mount if host_env_key: container_env_string += " --env.{}={} ".format(host_env_key, container_env_key) - + for v in docker_input_mapping: if docker_input_mapping[v] == host_env_key: - i[v] = container_env_key + i[v] = container_env_key i_run_cmd[v] = container_env_key - + mounts = list(filter(lambda item: item is not None, mounts)) mount_string = "" if len(mounts)==0 else ",".join(mounts) @@ -1998,13 +1998,13 @@ def docker(i): cm_repo=i.get('docker_cm_repo', docker_settings.get('cm_repo', 'mlcommons@cm4mlops')) docker_path = i.get('docker_path', '').strip() - if docker_path == '': + if docker_path == '': docker_path = script_path dockerfile_path = os.path.join(docker_path, 'dockerfiles', dockerfilename_suffix +'.Dockerfile') # Skips docker run cmd and gives an interactive shell to the user - docker_skip_run_cmd = i.get('docker_skip_run_cmd', docker_settings.get('skip_run_cmd', False)) + docker_skip_run_cmd = i.get('docker_skip_run_cmd', docker_settings.get('skip_run_cmd', False)) docker_pre_run_cmds = i.get('docker_pre_run_cmds', []) + docker_settings.get('pre_run_cmds', []) diff --git a/automation/script/template-python/customize.py b/automation/script/template-python/customize.py index 10214b87df..1982792527 100644 --- a/automation/script/template-python/customize.py +++ b/automation/script/template-python/customize.py @@ -17,7 +17,7 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') print (' ENV CM_VAR1: {}'.format(env.get('CM_VAR1',''))) - + return {'return':0} def postprocess(i): diff --git a/automation/script/template-pytorch/customize.py b/automation/script/template-pytorch/customize.py index 10214b87df..1982792527 100644 --- a/automation/script/template-pytorch/customize.py +++ b/automation/script/template-pytorch/customize.py @@ -17,7 +17,7 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') print (' ENV CM_VAR1: {}'.format(env.get('CM_VAR1',''))) - + return {'return':0} def postprocess(i): diff --git a/automation/utils/module.py b/automation/utils/module.py index df4898410d..1af7e600dc 100644 --- a/automation/utils/module.py +++ b/automation/utils/module.py @@ -18,7 +18,7 @@ def test(self, i): Test automation Args: - (CM input dict): + (CM input dict): (out) (str): if 'con', output to console @@ -56,7 +56,7 @@ def get_host_os_info(self, i): """ Get some host platform name (currently windows or linux) and OS bits - Args: + Args: (CM input dict): (bits) (str): force host platform bits @@ -267,7 +267,7 @@ def unzip_file(self, i): """ Unzip file - Args: + Args: (CM input dict): filename (str): explicit file name @@ -292,7 +292,7 @@ def unzip_file(self, i): console = i.get('out') == 'con' - # Attempt to read cmr.json + # Attempt to read cmr.json file_name_handle = open(file_name, 'rb') file_name_zip = zipfile.ZipFile(file_name_handle) @@ -345,7 +345,7 @@ def compare_versions(self, i): """ Compare versions - Args: + Args: version1 (str): version 1 version2 (str): version 2 @@ -393,7 +393,7 @@ def json2yaml(self, i): """ Convert JSON file to YAML - Args: + Args: input (str): input file (.json) (output) (str): output file (.yaml) @@ -431,7 +431,7 @@ def yaml2json(self, i): """ Convert YAML file to JSON - Args: + Args: input (str): input file (.yaml) (output) (str): output file (.json) @@ -469,7 +469,7 @@ def sort_json(self, i): """ Sort JSON file - Args: + Args: input (str): input file (.json) (output) (str): output file @@ -506,7 +506,7 @@ def dos2unix(self, i): """ Convert DOS file to UNIX (remove \r) - Args: + Args: input (str): input file (.txt) (output) (str): output file @@ -543,7 +543,7 @@ def replace_string_in_file(self, i): """ Convert DOS file to UNIX (remove \r) - Args: + Args: input (str): input file (.txt) (output) (str): output file @@ -562,7 +562,7 @@ def replace_string_in_file(self, i): input_file = i.get('input', '') if input_file == '': return {'return':1, 'error':'please specify --input={txt file}'} - + string = i.get('string', '') if string == '': return {'return':1, 'error':'please specify --string={string to replace}'} @@ -570,12 +570,12 @@ def replace_string_in_file(self, i): replacement = i.get('replacement', '') if replacement == '': return {'return':1, 'error':'please specify --replacement={string to replace}'} - + output_file = i.get('output','') if output_file=='': output_file = input_file - + r = utils.load_txt(input_file, check_if_exists = True) if r['return']>0: return r @@ -593,7 +593,7 @@ def create_toc_from_md(self, i): """ Convert DOS file to UNIX (remove \r) - Args: + Args: input (str): input file (.md) (output) (str): output file (input+'.toc) @@ -608,12 +608,12 @@ def create_toc_from_md(self, i): input_file = i.get('input', '') if input_file == '': return {'return':1, 'error':'please specify --input={txt file}'} - + output_file = i.get('output','') if output_file=='': output_file = input_file + '.toc' - + r = utils.load_txt(input_file, check_if_exists = True) if r['return']>0: return r @@ -645,12 +645,12 @@ def create_toc_from_md(self, i): x = x.replace(z, '') y = ' '*(2*(j-1)) + '* ['+title+'](#'+x+')' - + toc.append(y) toc.append('') toc.append('') - + r = utils.save_txt(output_file, '\n'.join(toc)+'\n') if r['return']>0: return r @@ -661,7 +661,7 @@ def copy_to_clipboard(self, i): """ Copy string to a clipboard - Args: + Args: string (str): string to copy to a clipboard (add_quotes) (bool): add quotes to the string in a clipboard @@ -725,13 +725,13 @@ def copy_to_clipboard(self, i): warning = format(e) rr = {'return':0} - + if failed: if not i.get('skip_fail',False): return {'return':1, 'error':warning} - rr['warning']=warning - + rr['warning']=warning + return rr ############################################################################## @@ -739,7 +739,7 @@ def list_files_recursively(self, i): """ List files and concatenate into string separate by comma - Args: + Args: Returns: (CM return dict): @@ -772,19 +772,19 @@ def generate_secret(self, i): """ Generate secret for web apps - Args: + Args: Returns: (CM return dict): secret (str): secret - + * return (int): return code == 0 if no error and >0 if error * (error) (str): error string if return>0 """ import secrets - s = secrets.token_urlsafe(16) + s = secrets.token_urlsafe(16) print (s) @@ -795,19 +795,19 @@ def detect_tags_in_artifact(self, i): """ Detect if there are tags in an artifact name (spaces) and update input - Args: + Args: input (dict) : original input Returns: (CM return dict): - + * return (int): return code == 0 if no error and >0 if error * (error) (str): error string if return>0 """ inp = i['input'] - + artifact = inp.get('artifact','') if artifact == '.': del(inp['artifact']) @@ -824,12 +824,12 @@ def prune_input(self, i): """ Leave only input keys and remove the rest (to regenerate CM commands) - Args: + Args: input (dict) : original input - (extra_keys_starts_with) (list): remove keys that starts + (extra_keys_starts_with) (list): remove keys that starts with the ones from this list - + Returns: (CM return dict): @@ -840,10 +840,10 @@ def prune_input(self, i): """ import copy - + inp = i['input'] extra_keys = i.get('extra_keys_starts_with',[]) - + i_run_cmd_arc = copy.deepcopy(inp) for k in inp: remove = False @@ -855,7 +855,7 @@ def prune_input(self, i): remove = True break - if remove: + if remove: del(i_run_cmd_arc[k]) return {'return':0, 'new_input':i_run_cmd_arc} @@ -894,7 +894,7 @@ def system(self, i): Run system command and redirect output to string. Args: - (CM input dict): + (CM input dict): * cmd (str): command line * (path) (str): go to this directory and return back to current @@ -921,7 +921,7 @@ def system(self, i): path = i.get('path','') if path!='' and os.path.isdir(path): cur_dir = os.getcwd() - os.chdir(path) + os.chdir(path) if i.get('stdout','')!='': fn1=i['stdout'] @@ -947,7 +947,7 @@ def system(self, i): std = '' stdout = '' stderr = '' - + if os.path.isfile(fn1): r = utils.load_txt(file_name = fn1, remove_after_read = fn1_delete) if r['return'] == 0: stdout = r['string'].strip() @@ -1004,7 +1004,7 @@ def select_cfg(self, i): """ i['self_module'] = self - + return utils.call_internal_module(self, __file__, 'module_cfg', 'select_cfg', i) ############################################################ @@ -1027,7 +1027,7 @@ def print_yaml(self, i): filename = i.get('file', '') if filename == '': return {'return':1, 'error':'please specify --file={YAML file}'} - + r = utils.load_yaml(filename,check_if_exists = True) if r['return']>0: return r @@ -1035,7 +1035,7 @@ def print_yaml(self, i): import json print (json.dumps(meta, indent=2)) - + return {'return':0} ############################################################ @@ -1058,7 +1058,7 @@ def print_json(self, i): filename = i.get('file', '') if filename == '': return {'return':1, 'error':'please specify --file={JSON file}'} - + r = utils.load_json(filename,check_if_exists = True) if r['return']>0: return r @@ -1066,5 +1066,5 @@ def print_json(self, i): import json print (json.dumps(meta, indent=2)) - + return {'return':0} diff --git a/automation/utils/module_cfg.py b/automation/utils/module_cfg.py index 04ab0a9ad1..9e58d6ab47 100644 --- a/automation/utils/module_cfg.py +++ b/automation/utils/module_cfg.py @@ -33,25 +33,25 @@ def load_cfg(i): prune_meta_key_uid = prune.get('meta_key_uid', '') prune_uid = prune.get('uid', '') prune_list = prune.get('list',[]) - + # Checking individual files inside CM entry selection = [] - + if i.get('skip_files', False): for l in lst: - meta = l.meta - full_path = l.path - - meta['full_path']=full_path - - add = True - - if prune_key!='' and prune_key_uid!='': - if prune_key_uid not in meta.get(prune_key, []): - add = False - - if add: - selection.append(meta) + meta = l.meta + full_path = l.path + + meta['full_path']=full_path + + add = True + + if prune_key!='' and prune_key_uid!='': + if prune_key_uid not in meta.get(prune_key, []): + add = False + + if add: + selection.append(meta) else: for l in lst: path = l.path @@ -59,14 +59,14 @@ def load_cfg(i): main_meta = l.meta skip = False - + if prune_meta_key!='' and prune_meta_key_uid!='': if prune_meta_key_uid not in main_meta.get(prune_meta_key, []): skip = True - + if skip: continue - + all_tags = main_meta.get('tags',[]) files = os.listdir(path) @@ -101,7 +101,7 @@ def load_cfg(i): r = process_base(meta, full_path) if r['return']>0: return r meta = r['meta'] - + uid = meta['uid'] # Check pruning @@ -110,7 +110,7 @@ def load_cfg(i): if len(prune)>0: if prune_uid!='' and uid != prune_uid: add = False - + if add and len(prune_list)>0 and uid not in prune_list: add = False @@ -121,7 +121,7 @@ def load_cfg(i): meta['full_path']=full_path add_all_tags = copy.deepcopy(all_tags) - + name = meta.get('name','') if name=='': name = ' '.join(meta.get('tags',[])) @@ -134,7 +134,7 @@ def load_cfg(i): add_all_tags += [v.lower() for v in name.split(' ')] else: add_all_tags += file_tags.split(',') - + meta['all_tags']=add_all_tags meta['main_meta']=main_meta @@ -154,17 +154,17 @@ def process_base(meta, full_path): filename = _base full_path_base = os.path.dirname(full_path) - + if not filename.endswith('.yaml') and not filename.endswith('.json'): return {'return':1, 'error':'_base file {} in {} must be .yaml or .json'.format(filename, full_path)} - + if ':' in _base: x = _base.split(':') name = x[0] full_path_base = base_path.get(name, '') if full_path_base == '': - + # Find artifact r = cmind.access({'action':'find', 'automation':'cfg', @@ -174,21 +174,21 @@ def process_base(meta, full_path): lst = r['list'] if len(lst)==0: - if not os.path.isfile(path): + if not os.path.isfile(path): return {'return':1, 'error':'_base artifact {} not found in {}'.format(name, full_path)} full_path_base = lst[0].path - + base_path[name] = full_path_base - + filename = x[1] - + # Load base path = os.path.join(full_path_base, filename) - if not os.path.isfile(path): + if not os.path.isfile(path): return {'return':1, 'error':'_base file {} not found in {}'.format(filename, full_path)} - + if path in base_path_meta: base = copy.deepcopy(base_path_meta[path]) else: @@ -233,7 +233,7 @@ def select_cfg(i): uid = i.get('uid', '') title = i.get('title', '') - # Check if alias is not provided + # Check if alias is not provided r = self_module.cmind.access({'action':'find', 'automation':'cfg', 'tags':'basic,docker,configurations'}) if r['return'] > 0: return r diff --git a/script/app-image-classification-onnx-py/customize.py b/script/app-image-classification-onnx-py/customize.py index 0b2c7c0a47..1009beb13b 100644 --- a/script/app-image-classification-onnx-py/customize.py +++ b/script/app-image-classification-onnx-py/customize.py @@ -45,7 +45,7 @@ def postprocess(i): except Exception as e: print ('CM warning: {}'.format(e)) - + try: import yaml with open(fyaml, 'w', encoding='utf-8') as f: diff --git a/script/app-image-classification-onnx-py/src/onnx_classify.py b/script/app-image-classification-onnx-py/src/onnx_classify.py index 00baaab149..8057c39b5a 100644 --- a/script/app-image-classification-onnx-py/src/onnx_classify.py +++ b/script/app-image-classification-onnx-py/src/onnx_classify.py @@ -141,7 +141,7 @@ def load_a_batch(batch_filenames): batch_predictions = sess.run([output_layer_name], {input_layer_name: batch_data})[0] cm_status = {'classifications':[]} - + print ('') top_classification = '' for in_batch_idx in range(batch_size): @@ -169,4 +169,4 @@ def load_a_batch(batch_filenames): # Record cm_status to embedded it into CM workflows with open('tmp-run-state.json', 'w') as cm_file: - cm_file.write(json.dumps({'cm_app_image_classification_onnx_py':cm_status}, sort_keys=True, indent=2)) + cm_file.write(json.dumps({'cm_app_image_classification_onnx_py':cm_status}, sort_keys=True, indent=2)) diff --git a/script/app-image-classification-torch-py/src/pytorch_classify_preprocessed.py b/script/app-image-classification-torch-py/src/pytorch_classify_preprocessed.py index f3ee0b587d..1afe3271b8 100644 --- a/script/app-image-classification-torch-py/src/pytorch_classify_preprocessed.py +++ b/script/app-image-classification-torch-py/src/pytorch_classify_preprocessed.py @@ -84,7 +84,7 @@ def main(): normalize_data_bool=True subtract_mean_bool=False - + from PIL import Image def load_and_resize_image(image_filepath, height, width): @@ -115,13 +115,13 @@ def load_and_resize_image(image_filepath, height, width): return nchw_data BATCH_COUNT=1 - - + + for batch_index in range(BATCH_COUNT): batch_number = batch_index+1 if FULL_REPORT or (batch_number % 10 == 0): print("\nBatch {} of {}".format(batch_number, BATCH_COUNT)) - + begin_time = time.time() if image_path=='': @@ -172,10 +172,10 @@ def load_and_resize_image(image_filepath, height, width): for class_idx in top5_indices: print("\t{}\t{}\t{}".format(class_idx, softmax_vector[class_idx], labels[class_idx])) print("") - + test_time = time.time() - test_time_begin - + if BATCH_COUNT > 1: avg_classification_time = (total_classification_time - first_classification_time) / (images_loaded - BATCH_SIZE) else: diff --git a/script/app-image-classification-tvm-onnx-py/src/classify.py b/script/app-image-classification-tvm-onnx-py/src/classify.py index 0eb299f2df..1dbea81431 100644 --- a/script/app-image-classification-tvm-onnx-py/src/classify.py +++ b/script/app-image-classification-tvm-onnx-py/src/classify.py @@ -44,24 +44,24 @@ def resize_with_aspectratio(img, out_height, out_width, scale=87.5, inter_pol=cv # returns list of pairs (prob, class_index) def get_top5(all_probs): - probs_with_classes = [] + probs_with_classes = [] - for class_index in range(len(all_probs)): - prob = all_probs[class_index] - probs_with_classes.append((prob, class_index)) + for class_index in range(len(all_probs)): + prob = all_probs[class_index] + probs_with_classes.append((prob, class_index)) - sorted_probs = sorted(probs_with_classes, key = lambda pair: pair[0], reverse=True) - return sorted_probs[0:5] + sorted_probs = sorted(probs_with_classes, key = lambda pair: pair[0], reverse=True) + return sorted_probs[0:5] def run_case(dtype, image, target): - # Check image + # Check image import os import json import sys STAT_REPEAT=os.environ.get('STAT_REPEAT','') if STAT_REPEAT=='' or STAT_REPEAT==None: - STAT_REPEAT=10 + STAT_REPEAT=10 STAT_REPEAT=int(STAT_REPEAT) # FGG: set model files via CM env @@ -137,10 +137,10 @@ def run_case(dtype, image, target): # Init TVM # TBD: add tvm platform selector if os.environ.get('USE_CUDA','')=='yes': - # TVM package must be built with CUDA enabled - ctx = tvm.cuda(0) + # TVM package must be built with CUDA enabled + ctx = tvm.cuda(0) else: - ctx = tvm.cpu(0) + ctx = tvm.cpu(0) tvm_ctx = ctx build_conf = {'relay.backend.use_auto_scheduler': False} @@ -236,8 +236,8 @@ def run_case(dtype, image, target): else: - inp={inputs[0]:np.array([img], dtype=np.float32)} - output=sess.run(outputs, inp) + inp={inputs[0]:np.array([img], dtype=np.float32)} + output=sess.run(outputs, inp) @@ -262,7 +262,7 @@ def run_case(dtype, image, target): } with open('tmp-ck-timer.json', 'w') as ck_results_file: - json.dump(ck_results, ck_results_file, indent=2, sort_keys=True) + json.dump(ck_results, ck_results_file, indent=2, sort_keys=True) return @@ -287,6 +287,6 @@ def run_case(dtype, image, target): dtype='float32' if os.environ.get('CM_TVM_DTYPE','')!='': - dtype=os.environ['CM_TVM_DTYPE'] + dtype=os.environ['CM_TVM_DTYPE'] run_case(dtype, args.image, args.target) diff --git a/script/app-loadgen-generic-python/customize.py b/script/app-loadgen-generic-python/customize.py index c8810dcd7b..5d67a3da13 100644 --- a/script/app-loadgen-generic-python/customize.py +++ b/script/app-loadgen-generic-python/customize.py @@ -52,7 +52,7 @@ def preprocess(i): if env.get('CM_ML_MODEL_CODE_WITH_PATH', '') != '': run_opts +=" --model_code "+env['CM_ML_MODEL_CODE_WITH_PATH'] - + if env.get('CM_ML_MODEL_CFG_WITH_PATH', '') != '': run_opts +=" --model_cfg "+env['CM_ML_MODEL_CFG_WITH_PATH'] else: @@ -76,7 +76,7 @@ def preprocess(i): if env.get('CM_ML_MODEL_SAMPLE_WITH_PATH', '') != '': run_opts +=" --model_sample_pickle "+env['CM_ML_MODEL_SAMPLE_WITH_PATH'] - + # Add path to file model weights at the end of command line run_opts += ' '+env['CM_ML_MODEL_FILE_WITH_PATH'] @@ -85,16 +85,16 @@ def preprocess(i): print ('') print ('Assembled flags: {}'.format(run_opts)) - print ('') + print ('') return {'return':0} def postprocess(i): env = i['env'] - + tfile = env.get('CM_APP_LOADGEN_GENERIC_PYTHON_TMP_CFG_FILE', '') - + if tfile!='' and os.path.isfile(tfile): os.remove(tfile) diff --git a/script/app-loadgen-generic-python/src/backend_pytorch.py b/script/app-loadgen-generic-python/src/backend_pytorch.py index 1fef350b44..fbfd63bd31 100644 --- a/script/app-loadgen-generic-python/src/backend_pytorch.py +++ b/script/app-loadgen-generic-python/src/backend_pytorch.py @@ -22,16 +22,16 @@ def __init__(self, session): self.session = session def predict(self, input: ModelInput): - + print ('') utils.print_host_memory_use('Host memory used') - + print ('Running inference ...') with torch.no_grad(): output = self.session(input) - + utils.print_host_memory_use('Host memory used') - + return output @@ -67,7 +67,7 @@ def create(self) -> Model: raise Exception('Error: CUDA is forced but not available or installed in PyTorch!') else: raise Exception('Error: execution provider is unknown ({})!'.format(self.execution_provider)) - + checkpoint = torch.load(self.model_path, map_location=torch.device(torch_provider)) if self.model_code == '': @@ -79,8 +79,8 @@ def create(self) -> Model: # Load sample import pickle with open (self.model_sample_pickle, 'rb') as handle: - self.input_sample = pickle.load(handle) - + self.input_sample = pickle.load(handle) + # Check if has CM connector cm_model_module = os.path.join(self.model_code, 'cmc.py') if not os.path.isfile(cm_model_module): @@ -89,7 +89,7 @@ def create(self) -> Model: print ('') print ('Collective Mind Connector for the model found: {}'.format(cm_model_module)) - + # Load CM interface for the model import sys sys.path.insert(0, self.model_code) @@ -99,7 +99,7 @@ def create(self) -> Model: # Init model if len(self.model_cfg)>0: print ('Model cfg: {}'.format(self.model_cfg)) - + r = model_module.model_init(checkpoint, self.model_cfg) if r['return']>0: raise Exception('Error: {}'.format(r['error'])) @@ -108,7 +108,7 @@ def create(self) -> Model: if torch_provider=='cuda': model.cuda() - + model.eval() return XModel(model) @@ -123,4 +123,3 @@ def __init__(self, model_factory: XModelFactory): def sample(self, id_: int) -> ModelInput: input = self.input_sample return input - diff --git a/script/app-loadgen-generic-python/src/main.py b/script/app-loadgen-generic-python/src/main.py index 0055ecaf2f..692293b78e 100644 --- a/script/app-loadgen-generic-python/src/main.py +++ b/script/app-loadgen-generic-python/src/main.py @@ -37,9 +37,9 @@ def main( loadgen_expected_qps: float, loadgen_duration_sec: float ): - + print ('=====================================================================') - + if backend == 'onnxruntime': from backend_onnxruntime import XModelFactory from backend_onnxruntime import XModelInputSampler @@ -56,7 +56,7 @@ def main( with open(model_cfg) as mc: model_cfg_dict = json.load(mc) - + model_factory = XModelFactory( model_path, execution_provider, @@ -67,9 +67,9 @@ def main( model_cfg_dict, model_sample_pickle ) - + model_dataset = XModelInputSampler(model_factory) - + runner: ModelRunner = None if runner_name == "inline": runner = ModelRunnerInline(model_factory) @@ -164,7 +164,7 @@ def main( if __name__ == "__main__": print ('') - + logging.basicConfig( level=logging.DEBUG, format="%(asctime)s %(levelname)s %(threadName)s - %(name)s %(funcName)s: %(message)s", diff --git a/script/app-loadgen-generic-python/src/utils.py b/script/app-loadgen-generic-python/src/utils.py index 8c182650c5..1fc04b0cfa 100644 --- a/script/app-loadgen-generic-python/src/utils.py +++ b/script/app-loadgen-generic-python/src/utils.py @@ -10,7 +10,7 @@ def print_host_memory_use(text=''): memoryUse = python_process.memory_info()[0] if text == '': text = 'host memory use' - + print('{}: {} MB'.format(text, int(memoryUse/1000000))) return diff --git a/script/app-mlperf-inference-amd/customize.py b/script/app-mlperf-inference-amd/customize.py index 87819e2e25..7e8b96587d 100644 --- a/script/app-mlperf-inference-amd/customize.py +++ b/script/app-mlperf-inference-amd/customize.py @@ -29,7 +29,7 @@ def preprocess(i): env['CM_RUN_CMD'] = "bash run-llama2.sh " else: return {'return':1, 'error':'Model {} not supported'.format(env['CM_MODEL'])} - + return {'return':0} #return {'return':1, 'error': 'Run command needs to be tested'} diff --git a/script/app-mlperf-inference-ctuning-cpp-tflite/customize.py b/script/app-mlperf-inference-ctuning-cpp-tflite/customize.py index ebd588c9f2..3445631dd3 100644 --- a/script/app-mlperf-inference-ctuning-cpp-tflite/customize.py +++ b/script/app-mlperf-inference-ctuning-cpp-tflite/customize.py @@ -34,7 +34,7 @@ def preprocess(i): if '+CPLUS_INCLUDE_PATH' not in env: env['+CPLUS_INCLUDE_PATH'] = [] - env['+CPLUS_INCLUDE_PATH'].append(os.path.join(script_path, "inc")) + env['+CPLUS_INCLUDE_PATH'].append(os.path.join(script_path, "inc")) env['+C_INCLUDE_PATH'].append(os.path.join(script_path, "inc")) # TODO: get cuda path ugly fix diff --git a/script/app-mlperf-inference-intel/customize.py b/script/app-mlperf-inference-intel/customize.py index cd3328eadc..ef02276990 100644 --- a/script/app-mlperf-inference-intel/customize.py +++ b/script/app-mlperf-inference-intel/customize.py @@ -59,7 +59,7 @@ def preprocess(i): if 'CM_MLPERF_USER_CONF' not in env: env['CM_MLPERF_USER_CONF'] = os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf") - + loadgen_mode = env['CM_MLPERF_LOADGEN_MODE'] env['CONDA_PREFIX'] = env['CM_CONDA_PREFIX'] @@ -136,8 +136,8 @@ def preprocess(i): elif env['CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE'] == "run_harness": print(f"Harness Root: {harness_root}") if env.get('CM_MLPERF_LOADGEN_MODE', '') == "compliance": - audit_path = env['CM_MLPERF_INFERENCE_AUDIT_PATH'] - shutil.copy(audit_path, env['CM_RUN_DIR']) + audit_path = env['CM_MLPERF_INFERENCE_AUDIT_PATH'] + shutil.copy(audit_path, env['CM_RUN_DIR']) if env['CM_MLPERF_LOADGEN_MODE'] == "accuracy": env['LOADGEN_MODE'] = 'Accuracy' diff --git a/script/app-mlperf-inference-mlcommons-cpp/customize.py b/script/app-mlperf-inference-mlcommons-cpp/customize.py index ebe8cf7d97..0f4d74cba0 100644 --- a/script/app-mlperf-inference-mlcommons-cpp/customize.py +++ b/script/app-mlperf-inference-mlcommons-cpp/customize.py @@ -15,10 +15,10 @@ def preprocess(i): print ('WARNING: this script was not thoroughly tested on Windows and compilation may fail - please help us test and improve it!') print ('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~') # # Currently support only LLVM on Windows -# print ('# Forcing LLVM on Windows') +# print ('# Forcing LLVM on Windows') # r = automation.update_deps({'deps':meta['post_deps'], 'update_deps':{'compile-program': {'adr':{'compiler':{'tags':'llvm'}}}}}) # if r['return']>0: return r - + env = i['env'] if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": @@ -46,7 +46,7 @@ def preprocess(i): if '+CPLUS_INCLUDE_PATH' not in env: env['+CPLUS_INCLUDE_PATH'] = [] - env['+CPLUS_INCLUDE_PATH'].append(os.path.join(script_path, "inc")) + env['+CPLUS_INCLUDE_PATH'].append(os.path.join(script_path, "inc")) env['+C_INCLUDE_PATH'].append(os.path.join(script_path, "inc")) if env['CM_MLPERF_DEVICE'] == 'gpu': diff --git a/script/app-mlperf-inference-mlcommons-python/customize.py b/script/app-mlperf-inference-mlcommons-python/customize.py index 9ae17695ed..fc16ba0ff3 100644 --- a/script/app-mlperf-inference-mlcommons-python/customize.py +++ b/script/app-mlperf-inference-mlcommons-python/customize.py @@ -14,7 +14,7 @@ def preprocess(i): if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": return {'return':0} - if env.get('CM_RUN_DOCKER_CONTAINER', '') == "yes": + if env.get('CM_RUN_DOCKER_CONTAINER', '') == "yes": return {'return':0} if env.get('CM_MLPERF_POWER','') == "yes": @@ -154,7 +154,7 @@ def preprocess(i): if env.get('CM_MLPERF_OUTPUT_DIR', '') == '': env['CM_MLPERF_OUTPUT_DIR'] = os.getcwd() - mlperf_implementation = env.get('CM_MLPERF_IMPLEMENTATION', 'reference') + mlperf_implementation = env.get('CM_MLPERF_IMPLEMENTATION', 'reference') cmd, run_dir = get_run_cmd(os_info, env, scenario_extra_options, mode_extra_options, dataset_options, mlperf_implementation) if env.get('CM_NETWORK_LOADGEN', '') == "lon": @@ -233,7 +233,7 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, mode_extra_optio env['LOG_PATH'] = env['CM_MLPERF_OUTPUT_DIR'] - + extra_options = " --output "+ env['CM_MLPERF_OUTPUT_DIR'] +" --model-name resnet50 --dataset " + env['CM_MLPERF_VISION_DATASET_OPTION'] + ' --max-batchsize ' + env.get('CM_MLPERF_LOADGEN_MAX_BATCHSIZE', '1') + \ " --dataset-path "+env['CM_DATASET_PREPROCESSED_PATH']+" --model "+env['MODEL_FILE'] + \ " --preprocessed_dir "+env['CM_DATASET_PREPROCESSED_PATH'] @@ -336,7 +336,7 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, mode_extra_optio cmd = cmd.replace("--count", "--total-sample-count") cmd = cmd.replace("--max-batchsize", "--batch-size") - + elif "mixtral-8x7b" in env['CM_MODEL']: env['RUN_DIR'] = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "language", "mixtral-8x7b") backend = env['CM_MLPERF_BACKEND'] diff --git a/script/app-mlperf-inference-nvidia/customize.py b/script/app-mlperf-inference-nvidia/customize.py index 01f90b57bc..354fb4afdb 100644 --- a/script/app-mlperf-inference-nvidia/customize.py +++ b/script/app-mlperf-inference-nvidia/customize.py @@ -43,7 +43,7 @@ def preprocess(i): model_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'models', 'ResNet50', 'resnet50_v1.onnx') if not os.path.exists(os.path.dirname(model_path)): - cmds.append(f"mkdir -p {os.path.dirname(model_path)}") + cmds.append(f"mkdir -p {os.path.dirname(model_path)}") if not os.path.exists(model_path): cmds.append(f"ln -sf {env['CM_ML_MODEL_FILE_WITH_PATH']} {model_path}") @@ -59,7 +59,7 @@ def preprocess(i): vocab_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'models', 'bert', 'vocab.txt') if not os.path.exists(os.path.dirname(fp32_model_path)): - cmds.append(f"mkdir -p {os.path.dirname(fp32_model_path)}") + cmds.append(f"mkdir -p {os.path.dirname(fp32_model_path)}") if not os.path.exists(fp32_model_path): cmds.append(f"ln -sf {env['CM_ML_MODEL_BERT_LARGE_FP32_PATH']} {fp32_model_path}") @@ -104,7 +104,7 @@ def preprocess(i): target_data_path_base_dir = os.path.dirname(target_data_path) if not os.path.exists(target_data_path_base_dir): cmds.append(f"mkdir -p {target_data_path_base_dir}") - + inference_cases_json_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'data', 'KiTS19', 'inference_cases.json') calibration_cases_json_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'data', 'KiTS19', 'calibration_cases.json') @@ -134,7 +134,7 @@ def preprocess(i): model_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'models', 'dlrm', 'tb00_40M.pt') if not os.path.exists(os.path.dirname(model_path)): - cmds.append(f"mkdir -p {os.path.dirname(model_path)}") + cmds.append(f"mkdir -p {os.path.dirname(model_path)}") if not os.path.exists(model_path): cmds.append(f"ln -sf {env['CM_ML_MODEL_FILE_WITH_PATH']} {model_path}") @@ -192,9 +192,9 @@ def preprocess(i): vocab_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'models', 'bert', 'vocab.txt') if not os.path.exists(os.path.dirname(fp32_model_path)): - cmds.append(f"mkdir -p {os.path.dirname(fp32_model_path)}") + cmds.append(f"mkdir -p {os.path.dirname(fp32_model_path)}") if not os.path.exists(os.path.dirname(fp8_model_path)): - cmds.append(f"mkdir -p {os.path.dirname(fp8_model_path)}") + cmds.append(f"mkdir -p {os.path.dirname(fp8_model_path)}") if not os.path.exists(fp32_model_path): env['CM_REQUIRE_GPTJ_MODEL_DOWNLOAD'] = 'yes' # download via prehook_deps @@ -203,7 +203,7 @@ def preprocess(i): model_name = "gptj" model_path = fp8_model_path - + elif "llama2" in env["CM_MODEL"]: # path to which the data file is present target_data_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'preprocessed_data', 'open_orca') @@ -221,8 +221,8 @@ def preprocess(i): cmds.append(f"mkdir {target_data_path}") cmds.append(f"ln -sf {env['CM_NVIDIA_LLAMA_DATASET_FILE_PATH']} {target_data_file_path}") - - + + model_name = "llama2-70b" model_path = fp8_model_path @@ -230,10 +230,10 @@ def preprocess(i): if make_command == "download_model": if not os.path.exists(model_path): if "llama2" in env['CM_MODEL']: - if not os.path.exists(os.path.join(model_path, 'config.json')): - return {'return': 1, 'error': f'Quantised model absent - did not detect config.json in path {model_path}'} + if not os.path.exists(os.path.join(model_path, 'config.json')): + return {'return': 1, 'error': f'Quantised model absent - did not detect config.json in path {model_path}'} else: - cmds.append(f"make download_model BENCHMARKS='{model_name}'") + cmds.append(f"make download_model BENCHMARKS='{model_name}'") elif "stable-diffusion" in env['CM_MODEL']: folders = ["clip1", "clip2", "unetxl", "vae"] for folder in folders: @@ -255,14 +255,14 @@ def preprocess(i): cmds.append(f"rm -rf {os.path.join(env['MLPERF_SCRATCH_PATH'], 'preprocessed_data', 'rnnt_dev_clean_500_raw')}") cmds.append(f"rm -rf {os.path.join(env['MLPERF_SCRATCH_PATH'], 'preprocessed_data', 'rnnt_train_clean_512_wav')}") if "llama2" in env["CM_MODEL"]: - # Preprocessing script in the inference results repo is not checking whether the preprocessed + # Preprocessing script in the inference results repo is not checking whether the preprocessed # file is already there, so we are handling it here. target_preprocessed_data_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'preprocessed_data', 'open_orca', 'input_ids_padded.npy') if not os.path.exists(target_preprocessed_data_path): cmds.append(f"make preprocess_data BENCHMARKS='{model_name}'") else: cmds.append(f"make preprocess_data BENCHMARKS='{model_name}'") - + else: scenario=scenario.lower() @@ -472,7 +472,7 @@ def preprocess(i): enable_sort = env.get('CM_MLPERF_NVIDIA_HARNESS_ENABLE_SORT') if enable_sort and enable_sort.lower() not in [ "no", "false", "0" ]: run_config += f" --enable_sort" - + sdxl_server_batcher_time_limit = env.get('CM_MLPERF_NVIDIA_HARNESS_ENABLE_SORT') if sdxl_server_batcher_time_limit: run_config += f" --sdxl_batcher_time_limit {sdxl_server_batcher_time_limit}" diff --git a/script/app-mlperf-inference-qualcomm/customize.py b/script/app-mlperf-inference-qualcomm/customize.py index fc858d9539..e99e538dd0 100644 --- a/script/app-mlperf-inference-qualcomm/customize.py +++ b/script/app-mlperf-inference-qualcomm/customize.py @@ -87,7 +87,7 @@ def preprocess(i): env['+ CXXFLAGS'].append("-DNETWORK_DIVISION=1") elif env.get('CM_BENCHMARK', '') == 'NETWORK_BERT_CLIENT': #source_files.append(os.path.join(kilt_root, "benchmarks", "network", "bert", "client", "pack.cpp")) - #env['+CPLUS_INCLUDE_PATH'].append(kilt_root) + #env['+CPLUS_INCLUDE_PATH'].append(kilt_root) #source_files.append(os.path.join(kilt_root, "benchmarks", "network", "bert", "client", "client.cpp")) env['+ CXXFLAGS'].append("-DNETWORK_DIVISION") elif env.get('CM_BENCHMARK', '') == 'STANDALONE_BERT': @@ -107,7 +107,7 @@ def preprocess(i): #source_files.append(env['CM_QAIC_API_SRC_FILE']) - env['+CPLUS_INCLUDE_PATH'].append(kilt_root) + env['+CPLUS_INCLUDE_PATH'].append(kilt_root) env['+C_INCLUDE_PATH'].append(kilt_root) if env['CM_MLPERF_DEVICE'] == 'gpu': @@ -163,7 +163,7 @@ def preprocess(i): if 'CM_MLPERF_USER_CONF' not in env: env['CM_MLPERF_USER_CONF'] = os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf") - + env['loadgen_mlperf_conf_path'] = env['CM_MLPERF_CONF']# to LOADGEN_MLPERF_CONF env['loadgen_user_conf_path'] = env['CM_MLPERF_USER_CONF']# to LOADGEN_USER_CONF env['loadgen_scenario'] = env['CM_MLPERF_LOADGEN_SCENARIO'] diff --git a/script/app-mlperf-inference-redhat/customize.py b/script/app-mlperf-inference-redhat/customize.py index 3737101114..522bafcb3a 100644 --- a/script/app-mlperf-inference-redhat/customize.py +++ b/script/app-mlperf-inference-redhat/customize.py @@ -57,7 +57,7 @@ def get_run_cmd(model, i): run_dir = os.path.join(env['CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO'], "open", submitter, "code", "gptj-99") return {'return': 0, 'run_cmd': run_cmd, 'run_dir': run_dir} - + if "llama2" in model: scenario = env['CM_MLPERF_LOADGEN_SCENARIO'] device = env['CM_MLPERF_DEVICE'] diff --git a/script/app-mlperf-inference/build_dockerfiles.py b/script/app-mlperf-inference/build_dockerfiles.py index 10579d33ea..4f5e7603ad 100644 --- a/script/app-mlperf-inference/build_dockerfiles.py +++ b/script/app-mlperf-inference/build_dockerfiles.py @@ -55,12 +55,12 @@ variation_string=",_"+model+",_"+backend+",_"+device+",_"+implementation file_name_ext = "_" + implementation + "_" + backend+"_"+device dockerfile_path = os.path.join(current_file_path,'dockerfiles', model, _os +'_'+version+ file_name_ext +'.Dockerfile') - cm_input = {'action': 'run', + cm_input = {'action': 'run', 'automation': 'script', 'tags': 'app,mlperf,inference,generic'+variation_string, - 'adr': {'compiler': + 'adr': {'compiler': {'tags': 'gcc'}, - 'inference-src': + 'inference-src': {'tags': '_octoml'}, 'openimages-preprocessed': {'tags': '_50'} @@ -75,11 +75,11 @@ comments = [ "#RUN " + dep for dep in print_deps ] comments.append("") comments.append("# Run CM workflow for MLPerf inference") - cm_docker_input = {'action': 'run', - 'automation': 'script', - 'tags': 'build,dockerfile', - 'docker_os': _os, - 'docker_os_version': version, + cm_docker_input = {'action': 'run', + 'automation': 'script', + 'tags': 'build,dockerfile', + 'docker_os': _os, + 'docker_os_version': version, 'file_path': dockerfile_path, 'comments': comments, 'run_cmd': 'cm run script --tags=app,mlperf,inference,generic'+variation_string+' --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml', @@ -95,4 +95,3 @@ print ('') print ("Dockerfile generated at " + dockerfile_path) - diff --git a/script/app-mlperf-inference/customize.py b/script/app-mlperf-inference/customize.py index 993e25a756..4178a1c506 100644 --- a/script/app-mlperf-inference/customize.py +++ b/script/app-mlperf-inference/customize.py @@ -86,43 +86,43 @@ def postprocess(i): model_full_name = env.get('CM_ML_MODEL_FULL_NAME', model) if mode == "accuracy" or mode== "compliance" and env['CM_MLPERF_LOADGEN_COMPLIANCE_TEST'] == "TEST01": - if model == "resnet50": - accuracy_filename = "accuracy-imagenet.py" - accuracy_filepath = os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", \ - accuracy_filename) - dataset_args = " --imagenet-val-file " + \ - os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt") - accuracy_log_file_option_name = " --mlperf-accuracy-file " - datatype_option = " --dtype "+env['CM_IMAGENET_ACCURACY_DTYPE'] - - elif model == "retinanet": - accuracy_filename = "accuracy-openimages.py" - accuracy_filepath = os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", \ - accuracy_filename) - dataset_args = " --openimages-dir " + os.getcwd() #just to make the script happy - accuracy_log_file_option_name = " --mlperf-accuracy-file " - datatype_option = "" - - elif 'bert' in model: - accuracy_filename = "accuracy-squad.py" - accuracy_filepath = os.path.join(env['CM_MLPERF_INFERENCE_BERT_PATH'], accuracy_filename) - dataset_args = " --val_data '" + env['CM_DATASET_SQUAD_VAL_PATH'] + "' --vocab_file '" + env['CM_DATASET_SQUAD_VOCAB_PATH'] + "' --out_file predictions.json " - accuracy_log_file_option_name = " --log_file " - datatype_option = " --output_dtype "+env['CM_SQUAD_ACCURACY_DTYPE'] - - elif 'stable-diffusion-xl' in model: - pass #No compliance check for now - elif 'gpt' in model: - pass #No compliance check for now - elif 'llama2-70b' in model: - pass #No compliance check for now - elif 'mixtral-8x7b' in model: - pass #No compliance check for now - else: - pass # Not giving an error now. But accuracy paths need to be done for other benchmarks which may need the non-determinism test - #return {'return': 1, 'error': f'Accuracy paths not done for model {model}'} + if model == "resnet50": + accuracy_filename = "accuracy-imagenet.py" + accuracy_filepath = os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", \ + accuracy_filename) + dataset_args = " --imagenet-val-file " + \ + os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt") + accuracy_log_file_option_name = " --mlperf-accuracy-file " + datatype_option = " --dtype "+env['CM_IMAGENET_ACCURACY_DTYPE'] + + elif model == "retinanet": + accuracy_filename = "accuracy-openimages.py" + accuracy_filepath = os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", \ + accuracy_filename) + dataset_args = " --openimages-dir " + os.getcwd() #just to make the script happy + accuracy_log_file_option_name = " --mlperf-accuracy-file " + datatype_option = "" + + elif 'bert' in model: + accuracy_filename = "accuracy-squad.py" + accuracy_filepath = os.path.join(env['CM_MLPERF_INFERENCE_BERT_PATH'], accuracy_filename) + dataset_args = " --val_data '" + env['CM_DATASET_SQUAD_VAL_PATH'] + "' --vocab_file '" + env['CM_DATASET_SQUAD_VOCAB_PATH'] + "' --out_file predictions.json " + accuracy_log_file_option_name = " --log_file " + datatype_option = " --output_dtype "+env['CM_SQUAD_ACCURACY_DTYPE'] + + elif 'stable-diffusion-xl' in model: + pass #No compliance check for now + elif 'gpt' in model: + pass #No compliance check for now + elif 'llama2-70b' in model: + pass #No compliance check for now + elif 'mixtral-8x7b' in model: + pass #No compliance check for now + else: + pass # Not giving an error now. But accuracy paths need to be done for other benchmarks which may need the non-determinism test + #return {'return': 1, 'error': f'Accuracy paths not done for model {model}'} scenario = env['CM_MLPERF_LOADGEN_SCENARIO'] - + if not state.get('cm-mlperf-inference-results'): state['cm-mlperf-inference-results'] = {} if not state.get('cm-mlperf-inference-results-last'): @@ -213,7 +213,7 @@ def postprocess(i): if env.get("CM_MLPERF_PRINT_SUMMARY", "").lower() not in [ "no", "0", "false"]: print("\n") print(mlperf_log_summary) - + with open ("measurements.json", "w") as fp: json.dump(measurements, fp, indent=2) @@ -243,10 +243,10 @@ def postprocess(i): if os.path.exists(env['CM_MLPERF_CONF']): shutil.copy(env['CM_MLPERF_CONF'], 'mlperf.conf') - + if os.path.exists(env['CM_MLPERF_USER_CONF']): shutil.copy(env['CM_MLPERF_USER_CONF'], 'user.conf') - + result, valid, power_result = mlperf_utils.get_result_from_log(env['CM_MLPERF_LAST_RELEASE'], model, scenario, output_dir, mode, env.get('CM_MLPERF_INFERENCE_SOURCE_VERSION')) power = None power_efficiency = None @@ -274,7 +274,7 @@ def postprocess(i): # Record basic host info host_info = { "os_version":platform.platform(), - "cpu_version":platform.processor(), + "cpu_version":platform.processor(), "python_version":sys.version, "cm_version":cm.__version__ } @@ -310,7 +310,7 @@ def postprocess(i): with open ("cm-host-info.json", "w") as fp: fp.write(json.dumps(host_info, indent=2)+'\n') - + # Prepare README if "cmd" in inp: cmd = "cm run script \\\n\t"+" \\\n\t".join(inp['cmd']) @@ -323,19 +323,19 @@ def postprocess(i): readme_init+= "*Check [CM MLPerf docs](https://docs.mlcommons.org/inference) for more details.*\n\n" - readme_body = "## Host platform\n\n* OS version: {}\n* CPU version: {}\n* Python version: {}\n* MLCommons CM version: {}\n\n".format(platform.platform(), + readme_body = "## Host platform\n\n* OS version: {}\n* CPU version: {}\n* Python version: {}\n* MLCommons CM version: {}\n\n".format(platform.platform(), platform.processor(), sys.version, cm.__version__) x = repo_name if repo_hash!='': x+=' --checkout='+str(repo_hash) - + readme_body += "## CM Run Command\n\nSee [CM installation guide](https://docs.mlcommons.org/inference/install/).\n\n"+ \ "```bash\npip install -U cmind\n\ncm rm cache -f\n\ncm pull repo {}\n\n{}\n```".format(x, xcmd) readme_body += "\n*Note that if you want to use the [latest automation recipes](https://docs.mlcommons.org/inference) for MLPerf (CM scripts),\n"+ \ " you should simply reload {} without checkout and clean CM cache as follows:*\n\n".format(repo_name) + \ "```bash\ncm rm repo {}\ncm pull repo {}\ncm rm cache -f\n\n```".format(repo_name, repo_name) - + extra_readme_init = '' extra_readme_body = '' if env.get('CM_MLPERF_README', '') == "yes": @@ -478,7 +478,7 @@ def postprocess(i): ''' #print(f"{sys_utilisation_log['timestamp'][0]} {power_begin_time}") #print(sys_utilisation_log['timestamp'][0]>=power_begin_time) - filtered_log = sys_utilisation_log[(sys_utilisation_log['timestamp'] >= power_begin_time) & + filtered_log = sys_utilisation_log[(sys_utilisation_log['timestamp'] >= power_begin_time) & (sys_utilisation_log['timestamp'] <= power_end_time)] #print(filtered_log) # Calculate average of cpu_utilisation and used_memory_gb diff --git a/script/app-mlperf-training-nvidia/customize.py b/script/app-mlperf-training-nvidia/customize.py index 3c5fdf6d8a..7163c8d04f 100644 --- a/script/app-mlperf-training-nvidia/customize.py +++ b/script/app-mlperf-training-nvidia/customize.py @@ -14,7 +14,7 @@ def preprocess(i): if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": return {'return':0} - if env.get('CM_RUN_DOCKER_CONTAINER', '') == "yes": + if env.get('CM_RUN_DOCKER_CONTAINER', '') == "yes": return {'return':0} if env.get('CM_MLPERF_POWER','') == "yes": diff --git a/script/app-mlperf-training-reference/customize.py b/script/app-mlperf-training-reference/customize.py index f7c77bc55a..55f12fb47c 100644 --- a/script/app-mlperf-training-reference/customize.py +++ b/script/app-mlperf-training-reference/customize.py @@ -14,7 +14,7 @@ def preprocess(i): if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": return {'return':0} - if env.get('CM_RUN_DOCKER_CONTAINER', '') == "yes": + if env.get('CM_RUN_DOCKER_CONTAINER', '') == "yes": return {'return':0} if env.get('CM_MLPERF_POWER','') == "yes": diff --git a/script/benchmark-any-mlperf-inference-implementation/customize.py b/script/benchmark-any-mlperf-inference-implementation/customize.py index ae6462118b..ae034f55d5 100644 --- a/script/benchmark-any-mlperf-inference-implementation/customize.py +++ b/script/benchmark-any-mlperf-inference-implementation/customize.py @@ -139,7 +139,7 @@ def preprocess(i): r = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':run_file_name}) if r['return']>0: return r - + return {'return':0} def assemble_tflite_cmds(cmds): diff --git a/script/benchmark-program-mlperf/customize.py b/script/benchmark-program-mlperf/customize.py index 4ca4f70df5..c3236fb325 100644 --- a/script/benchmark-program-mlperf/customize.py +++ b/script/benchmark-program-mlperf/customize.py @@ -14,9 +14,9 @@ def postprocess(i): env['CM_MLPERF_RUN_CMD'] = env.get('CM_RUN_CMD') - + if env.get('CM_MLPERF_POWER', '') == "yes": - + if env.get('CM_MLPERF_SHORT_RANGING_RUN', '') != 'no': # Write '0' to the count.txt file in CM_RUN_DIR count_file = os.path.join(env.get('CM_RUN_DIR', ''), 'count.txt') @@ -34,7 +34,7 @@ def postprocess(i): export CM_MLPERF_USER_CONF="${CM_MLPERF_RANGING_USER_CONF}"; else export CM_MLPERF_USER_CONF="${CM_MLPERF_TESTING_USER_CONF}"; -fi && +fi && """ + env.get('CM_RUN_CMD', '').strip() else: env['CM_MLPERF_RUN_CMD'] = r""" diff --git a/script/benchmark-program/customize.py b/script/benchmark-program/customize.py index 6b5cb6ebfe..2e051607e6 100644 --- a/script/benchmark-program/customize.py +++ b/script/benchmark-program/customize.py @@ -50,7 +50,7 @@ def preprocess(i): # additional arguments and tags for measuring system informations(only if 'CM_PROFILE_NVIDIA_POWER' is 'on') if env.get('CM_PROFILE_NVIDIA_POWER', '') == "on": env['CM_SYS_UTILISATION_SCRIPT_TAGS'] = '' - # this section is for selecting the variation + # this section is for selecting the variation if env.get('CM_MLPERF_DEVICE', '') == "gpu": env['CM_SYS_UTILISATION_SCRIPT_TAGS'] += ',_cuda' elif env.get('CM_MLPERF_DEVICE', '') == "cpu": @@ -59,8 +59,8 @@ def preprocess(i): env['CM_SYS_UTILISATION_SCRIPT_TAGS'] += ' --log_dir=\'' + logs_dir + '\'' # specify the logs directory if env.get('CM_SYSTEM_INFO_MEASUREMENT_INTERVAL', '') != '': # specifying the interval in which the system information should be measured env['CM_SYS_UTILISATION_SCRIPT_TAGS'] += ' --interval=\"' + env['CM_SYSTEM_INFO_MEASUREMENT_INTERVAL'] + '\"' - - # generate the pre run cmd - recording runtime system infos + + # generate the pre run cmd - recording runtime system infos pre_run_cmd = "" if env.get('CM_PRE_RUN_CMD_EXTERNAL', '') != '': diff --git a/script/build-docker-image/customize.py b/script/build-docker-image/customize.py index a3a3bc8df7..a533deaab9 100644 --- a/script/build-docker-image/customize.py +++ b/script/build-docker-image/customize.py @@ -125,7 +125,7 @@ def postprocess(i): r = os.system(PCMD) print ('') - if r>0: + if r>0: return {'return':1, 'error':'pushing to Docker Hub failed'} return {'return':0} diff --git a/script/build-dockerfile/customize.py b/script/build-dockerfile/customize.py index da22e56c58..4bbfe0e572 100644 --- a/script/build-dockerfile/customize.py +++ b/script/build-dockerfile/customize.py @@ -9,7 +9,7 @@ def preprocess(i): os_info = i['os_info'] env = i['env'] - + if env["CM_DOCKER_OS"] not in [ "ubuntu", "rhel", "arch" ]: return {'return': 1, 'error': f"Specified docker OS: {env['CM_DOCKER_OS']}. Currently only ubuntu, rhel and arch are supported in CM docker"} @@ -46,7 +46,7 @@ def preprocess(i): arg=arg+"="+str(default_env[env_]) #build_args.append(arg) #input_args.append("--"+input_+"="+"$"+env_) - + if "CM_DOCKER_OS_VERSION" not in env: env["CM_DOCKER_OS_VERSION"] = "20.04" @@ -58,34 +58,34 @@ def preprocess(i): if env.get("CM_REPO_PATH", "") != "": use_copy_repo = True cm_repo_path = os.path.abspath(env["CM_REPO_PATH"]) - + if not os.path.exists(cm_repo_path): return {'return': 1, 'error': f"Specified CM_REPO_PATH does not exist: {cm_repo_path}"} - + cmr_yml_path = os.path.join(cm_repo_path, "cmr.yaml") if not os.path.isfile(cmr_yml_path): return {'return': 1, 'error': f"cmr.yaml not found in CM_REPO_PATH: {cm_repo_path}"} - + # Define the build context directory (where the Dockerfile will be) build_context_dir = os.path.dirname(env.get('CM_DOCKERFILE_WITH_PATH', os.path.join(os.getcwd(), "Dockerfile"))) os.makedirs(build_context_dir, exist_ok=True) - + # Create cm_repo directory relative to the build context repo_build_context_path = os.path.join(build_context_dir, "cm_repo") - + # Remove existing directory if it exists if os.path.exists(repo_build_context_path): shutil.rmtree(repo_build_context_path) - + try: print(f"Copying repository from {cm_repo_path} to {repo_build_context_path}") shutil.copytree(cm_repo_path, repo_build_context_path) except Exception as e: return {'return': 1, 'error': f"Failed to copy repository to build context: {str(e)}"} - + if not os.path.isdir(repo_build_context_path): return {'return': 1, 'error': f"Repository was not successfully copied to {repo_build_context_path}"} - + # (Optional) Verify the copy if not os.path.isdir(repo_build_context_path): return {'return': 1, 'error': f"cm_repo was not successfully copied to the build context at {repo_build_context_path}"} @@ -96,7 +96,7 @@ def preprocess(i): else: # CM_REPO_PATH is not set; use cm pull repo as before use_copy_repo = False - + if env.get("CM_MLOPS_REPO", "") != "": cm_mlops_repo = env["CM_MLOPS_REPO"] # the below pattern matches both the HTTPS and SSH git link formats @@ -114,7 +114,7 @@ def preprocess(i): cm_mlops_repo = "mlcommons@cm4mlops" cm_mlops_repo_branch_string = f" --branch={env['CM_MLOPS_REPO_BRANCH']}" - + if env.get('CM_DOCKERFILE_WITH_PATH', '') == '': env['CM_DOCKERFILE_WITH_PATH'] = os.path.join(os.getcwd(), "Dockerfile") @@ -189,7 +189,7 @@ def preprocess(i): if env['CM_DOCKER_OS'] == "ubuntu": if int(env['CM_DOCKER_OS_VERSION'].split('.')[0]) >= 23: if "--break-system-packages" not in env.get('CM_DOCKER_PIP_INSTALL_EXTRA_FLAGS', ''): - env['CM_DOCKER_PIP_INSTALL_EXTRA_FLAGS'] = " --break-system-packages" + env['CM_DOCKER_PIP_INSTALL_EXTRA_FLAGS'] = " --break-system-packages" pip_extra_flags = env.get('CM_DOCKER_PIP_INSTALL_EXTRA_FLAGS', '') @@ -244,17 +244,17 @@ def preprocess(i): if use_copy_repo: docker_repo_dest = "/home/cmuser/CM/repos/mlcommons@cm4mlops" f.write(f'COPY --chown=cmuser:cm {relative_repo_path} {docker_repo_dest}' + EOL) - + f.write(EOL + '# Register CM repository' + EOL) f.write('RUN cm pull repo --url={} --quiet'.format(docker_repo_dest) + EOL) f.write(EOL) - - + + else: # Use cm pull repo as before x = env.get('CM_DOCKER_ADD_FLAG_TO_CM_MLOPS_REPO','') if x!='': x=' '+x - + f.write('RUN cm pull repo ' + cm_mlops_repo + cm_mlops_repo_branch_string + x + EOL) # Check extra repositories @@ -288,12 +288,12 @@ def preprocess(i): skip_extra = True else: if str(env.get('CM_DOCKER_NOT_PULL_UPDATE', 'False')).lower() not in ["yes", "1", "true"]: - env['CM_DOCKER_RUN_CMD'] += "cm pull repo && " + env['CM_DOCKER_RUN_CMD'] += "cm pull repo && " env['CM_DOCKER_RUN_CMD'] += "cm run script --tags=" + env['CM_DOCKER_RUN_SCRIPT_TAGS']+ ' --quiet' else: if str(env.get('CM_DOCKER_NOT_PULL_UPDATE', 'False')).lower() not in ["yes", "1", "true"]: env['CM_DOCKER_RUN_CMD']="cm pull repo && " + env['CM_DOCKER_RUN_CMD'] - + print(env['CM_DOCKER_RUN_CMD']) fake_run = env.get("CM_DOCKER_FAKE_RUN_OPTION"," --fake_run") + dockerfile_env_input_string fake_run = fake_run + " --fake_deps" if env.get('CM_DOCKER_FAKE_DEPS') else fake_run @@ -317,7 +317,7 @@ def preprocess(i): if '--quiet' not in x: x+=' --quiet ' x+=EOL - + f.write(x) diff --git a/script/calibrate-model-for.qaic/customize.py b/script/calibrate-model-for.qaic/customize.py index 62c4dbdbae..b6e91c4335 100644 --- a/script/calibrate-model-for.qaic/customize.py +++ b/script/calibrate-model-for.qaic/customize.py @@ -201,4 +201,3 @@ def get_scale_offset(min_val, max_val): scale = total_range/256.0 offset = round(-min_val / scale) return scale, offset - diff --git a/script/compile-model-for.qaic/customize.py b/script/compile-model-for.qaic/customize.py index 1e178f1897..7644821c5c 100644 --- a/script/compile-model-for.qaic/customize.py +++ b/script/compile-model-for.qaic/customize.py @@ -19,7 +19,7 @@ def preprocess(i): if r['return'] > 0: return r cmd = r['cmd'] - + print("Compiling from "+ os.getcwd()) env['CM_QAIC_MODEL_FINAL_COMPILATION_CMD'] = cmd diff --git a/script/convert-csv-to-md/customize.py b/script/convert-csv-to-md/customize.py index 8181e9437f..cbcbdaf386 100644 --- a/script/convert-csv-to-md/customize.py +++ b/script/convert-csv-to-md/customize.py @@ -17,7 +17,7 @@ def preprocess(i): md_file = env.get('CM_MD_FILE', '') process_file = os.path.join(i['run_script_input']['path'], "process.py") - env['CM_RUN_CMD'] = '{} {} {} {} '.format(env["CM_PYTHON_BIN_WITH_PATH"], process_file, csv_file, md_file) + env['CM_RUN_CMD'] = '{} {} {} {} '.format(env["CM_PYTHON_BIN_WITH_PATH"], process_file, csv_file, md_file) return {'return':0} diff --git a/script/convert-csv-to-md/process.py b/script/convert-csv-to-md/process.py index e010441d3e..1a563cdabb 100644 --- a/script/convert-csv-to-md/process.py +++ b/script/convert-csv-to-md/process.py @@ -7,4 +7,4 @@ df=pd.read_csv(csv_file, engine='python') with open(md_file, "w") as md: - df.to_markdown(buf=md) + df.to_markdown(buf=md) diff --git a/script/convert-ml-model-huggingface-to-onnx/customize.py b/script/convert-ml-model-huggingface-to-onnx/customize.py index e02a1fb6af..49c588fc38 100644 --- a/script/convert-ml-model-huggingface-to-onnx/customize.py +++ b/script/convert-ml-model-huggingface-to-onnx/customize.py @@ -9,7 +9,7 @@ def preprocess(i): if env.get("CM_MODEL_HUGG_PATH","") == "": return {'return': 1, 'error': 'CM_MODEL_HUGG_PATH is not set'} - + automation = i['automation'] cm = automation.cmind @@ -23,4 +23,4 @@ def postprocess(i): env = i['env'] env['HUGGINGFACE_ONNX_FILE_PATH'] = os.path.join(os.getcwd(),"model.onnx") - return {'return':0} \ No newline at end of file + return {'return':0} diff --git a/script/create-custom-cache-entry/customize.py b/script/create-custom-cache-entry/customize.py index 8d2d31db32..ddfbe05a5d 100644 --- a/script/create-custom-cache-entry/customize.py +++ b/script/create-custom-cache-entry/customize.py @@ -26,7 +26,7 @@ def postprocess(i): if not os.path.isdir(path): os.makedirs(path) else: - path = os.getcwd() + path = os.getcwd() x = '' env_key = env.get('CM_CUSTOM_CACHE_ENTRY_ENV_KEY', '') diff --git a/script/create-fpgaconvnet-app-tinyml/customize.py b/script/create-fpgaconvnet-app-tinyml/customize.py index c139e3a476..8a70d706de 100644 --- a/script/create-fpgaconvnet-app-tinyml/customize.py +++ b/script/create-fpgaconvnet-app-tinyml/customize.py @@ -34,5 +34,5 @@ def postprocess(i): print(f"JSON configuration file for {network} created at {json_location}") else: return {'return':1, 'error': "JSON configuration file generation failed"} - + return {'return':0} diff --git a/script/create-fpgaconvnet-config-tinyml/customize.py b/script/create-fpgaconvnet-config-tinyml/customize.py index 8590890bb9..6489bb7f16 100644 --- a/script/create-fpgaconvnet-config-tinyml/customize.py +++ b/script/create-fpgaconvnet-config-tinyml/customize.py @@ -49,5 +49,5 @@ def postprocess(i): env['CM_TINY_FPGACONVNET_CONFIG_FILE_' + network_env_name + '_PATH'] = json_location env['CM_GET_DEPENDENT_CACHED_PATH'] = json_location - + return {'return':0} diff --git a/script/destroy-terraform/customize.py b/script/destroy-terraform/customize.py index b10640e6d3..fd604e38d0 100644 --- a/script/destroy-terraform/customize.py +++ b/script/destroy-terraform/customize.py @@ -14,5 +14,3 @@ def preprocess(i): def postprocess(i): return {'return':0} - - diff --git a/script/detect-cpu/customize.py b/script/detect-cpu/customize.py index 7a5586667a..56a753af3b 100644 --- a/script/detect-cpu/customize.py +++ b/script/detect-cpu/customize.py @@ -21,13 +21,13 @@ def postprocess(i): automation = i['automation'] logger = automation.cmind.logger - + if os_info['platform'] == 'windows': sys = [] sys1 = [] cpu = [] cpu1 = [] - + import csv try: @@ -47,7 +47,7 @@ def postprocess(i): for k in range(0, len(s)): x[keys[k]]=s[k] - sys.append(x) + sys.append(x) if j==1: sys1 = x @@ -76,20 +76,20 @@ def postprocess(i): for k in range(0, len(s)): x[keys[k]]=s[k] - cpu.append(x) + cpu.append(x) if j==2: cpu1 = x j+=1 - + except Exception as e: logger.warning ('WARNING: problem processing file {} ({})!'.format(f, format(e))) pass - + state['host_device_raw_info']={'sys':sys, 'sys1':sys1, 'cpu':cpu, 'cpu1':cpu1} - + logger.warning ('WARNING: need to unify system and cpu output on Windows') return {'return':0} diff --git a/script/detect-sudo/customize.py b/script/detect-sudo/customize.py index 1019cbf8e3..7ad623ece5 100644 --- a/script/detect-sudo/customize.py +++ b/script/detect-sudo/customize.py @@ -66,10 +66,10 @@ def prompt_retry(timeout=10, default_retry=False): return default_retry # Automatically use the default in non-interactive terminals print(f"Timeout occurred. Do you want to try again? (y/n): ", end='', flush=True) - + # Use select to wait for user input with a timeout ready, _, _ = select.select([sys.stdin], [], [], timeout) - + if ready: answer = sys.stdin.readline().strip().lower() if answer in ['y', 'n']: @@ -117,7 +117,7 @@ def prompt_sudo(): text=True, stderr=subprocess.STDOUT, timeout=15 # Capture the command output - ) + ) else: r = subprocess.check_output( ['sudo', '-S', 'echo'] , diff --git a/script/download-file/customize.py b/script/download-file/customize.py index 4f21ce9f81..2a21e88e61 100644 --- a/script/download-file/customize.py +++ b/script/download-file/customize.py @@ -9,18 +9,18 @@ def escape_special_chars(text, tool=None): for char in special_chars: text = text.replace(char, f'^{char}') - - #handle URL special cases + + #handle URL special cases if tool != "rclone": text = text.replace('%', "%%") - + return text def preprocess(i): os_info = i['os_info'] env = i['env'] - + # env to be passed to the subprocess subprocess_env = os.environ.copy() subprocess_env['PATH'] += os.pathsep + os.pathsep.join(env.get('+PATH', '')) @@ -63,7 +63,7 @@ def preprocess(i): print ('Using local file: {}'.format(filepath)) else: url = env.get('CM_DOWNLOAD_URL','') - + if url=='': return {'return':1, 'error': 'please specify URL using --url={URL} or --env.CM_DOWNLOAD_URL={URL}'} @@ -152,9 +152,9 @@ def preprocess(i): if url == '': break print(f"Download from {oldurl} failed, trying from {url}") - + if r['return']>0: return r - + env['CM_DOWNLOAD_CMD'] = "" env['CM_DOWNLOAD_FILENAME'] = r['filename'] diff --git a/script/dump-pip-freeze/customize.py b/script/dump-pip-freeze/customize.py index eb5eeab8af..00b5bb9fdb 100644 --- a/script/dump-pip-freeze/customize.py +++ b/script/dump-pip-freeze/customize.py @@ -26,7 +26,7 @@ def postprocess(i): os_info = i['os_info'] automation = i['automation'] - + pip_freeze = {} pip_freeze_file = env['CM_DUMP_RAW_PIP_FREEZE_FILE_PATH'] if not os.path.isfile(pip_freeze_file): @@ -38,15 +38,15 @@ def postprocess(i): 'cmd':'py -m pip freeze', 'stdout':pip_freeze_file}) # skip output - + if os.path.isfile(pip_freeze_file): with open(pip_freeze_file, "r") as f: for line in f.readlines(): if "==" in line: split = line.split("==") - pip_freeze[split[0]] = split[1].strip() + pip_freeze[split[0]] = split[1].strip() + - state['pip_freeze'] = pip_freeze return {'return':0} diff --git a/script/extract-file/customize.py b/script/extract-file/customize.py index 3fba68636c..561e6636b0 100644 --- a/script/extract-file/customize.py +++ b/script/extract-file/customize.py @@ -25,7 +25,7 @@ def preprocess(i): filename = env.get('CM_EXTRACT_FILEPATH','') if filename == '': return {'return': 1, 'error': 'Extract with no download requested and CM_EXTRACT_FILEPATH is not set'} - + if windows: filename = filename.replace("%", "%%") @@ -93,7 +93,7 @@ def preprocess(i): return {'return': 1, 'error': 'Neither CM_EXTRACT_UNZIP nor CM_EXTRACT_UNTAR is yes'} env['CM_EXTRACT_PRE_CMD'] = '' - + extract_to_folder = env.get('CM_EXTRACT_TO_FOLDER', '') # Check if extract to additional folder in the current directory (or external path) @@ -123,7 +123,7 @@ def preprocess(i): print ('Current directory: {}'.format(os.getcwd())) print ('Command line: "{}"'.format(env['CM_EXTRACT_CMD'])) print ('') - + final_file = env.get('CM_EXTRACT_EXTRACTED_FILENAME', '') if final_file!='': @@ -143,7 +143,7 @@ def preprocess(i): # for x in ['CM_EXTRACT_CMD', 'CM_EXTRACT_EXTRACTED_CHECKSUM_CMD']: # env[x+'_USED']='YES' if env.get(x,'')!='' else 'NO' - + # If force cache, add filepath to tag unless _path is used ... path_tag = 'path.'+filename @@ -162,12 +162,12 @@ def postprocess(i): extract_to_folder = env.get('CM_EXTRACT_TO_FOLDER', '') extract_path = env.get('CM_EXTRACT_PATH', '') - + extracted_file = env.get('CM_EXTRACT_EXTRACTED_FILENAME', '') # Preparing filepath # Can be either full extracted filename (such as model) or folder - + if extracted_file != '': filename = os.path.basename(extracted_file) @@ -190,7 +190,7 @@ def postprocess(i): if env.get('CM_EXTRACT_FINAL_ENV_NAME', '')!='': env[env['CM_EXTRACT_FINAL_ENV_NAME']] = filepath - # Detect if this file will be deleted or moved + # Detect if this file will be deleted or moved env['CM_GET_DEPENDENT_CACHED_PATH'] = filepath # Check if need to remove archive after extraction diff --git a/script/generate-mlperf-inference-submission/customize.py b/script/generate-mlperf-inference-submission/customize.py index 6516451fd2..a5e158c63e 100644 --- a/script/generate-mlperf-inference-submission/customize.py +++ b/script/generate-mlperf-inference-submission/customize.py @@ -20,7 +20,7 @@ def fill_from_json(file_path, keys, sut_info): elif key in data and sut_info[key] != data[key]: return -1 # error saying there is a mismatch in the value of a key return sut_info - + # Helper function to check whether all the keys(sut information) are assigned def check_dict_filled(keys, sut_info): for key in keys: @@ -32,7 +32,7 @@ def check_dict_filled(keys, sut_info): def model_in_valid_models(model, mlperf_version): import submission_checker as checker config = checker.MODEL_CONFIG - + if model not in config[mlperf_version]['models']: internal_model_name = config[mlperf_version]["model_mapping"].get(model, '') # resnet50 -> resnet if internal_model_name == '': @@ -92,13 +92,13 @@ def generate_submission(env, state, inp, submission_division): # set pytorch as the default framework if system_meta_default['framework'] == '': system_meta_default['framework'] = "pytorch" - + system_meta_tmp = {} if 'CM_MLPERF_SUBMISSION_SYSTEM_TYPE' in env: system_meta_tmp['system_type'] = env['CM_MLPERF_SUBMISSION_SYSTEM_TYPE'] if submission_division != "": - system_meta_tmp['division'] = submission_division + system_meta_tmp['division'] = submission_division division = submission_division else: division = system_meta_default['division'] @@ -162,7 +162,7 @@ def generate_submission(env, state, inp, submission_division): "framework": None, "framework_version": "default", "run_config": "default" - } # variable to store the system meta + } # variable to store the system meta model_mapping_combined = {} # to store all the model mapping related to an SUT @@ -187,7 +187,7 @@ def generate_submission(env, state, inp, submission_division): # Preprocessing part. # Even the model mapping json file is present in root directory, the folders are traversed - # and the data is updated provided not duplicated. + # and the data is updated provided not duplicated. models = [f for f in os.listdir(result_path) if not os.path.isfile(os.path.join(result_path, f))] if division == "open" and len(model_mapping_combined) == 0: for model in models: @@ -202,7 +202,7 @@ def generate_submission(env, state, inp, submission_division): result_mode_path = os.path.join(result_scenario_path,mode) if mode == "performance": compliance_performance_run_path = os.path.join(result_mode_path, "run_1") - # model mapping part + # model mapping part tmp_model_mapping_file_path = os.path.join(compliance_performance_run_path, "model_mapping.json") if os.path.exists(tmp_model_mapping_file_path): with open(tmp_model_mapping_file_path, 'r') as f: @@ -215,8 +215,8 @@ def generate_submission(env, state, inp, submission_division): else: if returned_model_name != model: model_mapping_combined.update({model:returned_model_name}) - - if check_dict_filled(sut_info.keys(), sut_info): + + if check_dict_filled(sut_info.keys(), sut_info): system = sut_info["hardware_name"] implementation = sut_info["implementation"] device = sut_info["device"] @@ -228,7 +228,7 @@ def generate_submission(env, state, inp, submission_division): new_res = res print(f"The SUT folder name for submission generation is: {new_res}") - + platform_prefix = inp.get('platform_prefix', '') if platform_prefix: sub_res = platform_prefix + "-" + new_res @@ -301,7 +301,7 @@ def generate_submission(env, state, inp, submission_division): continue if not os.path.isdir(measurement_scenario_path): - os.makedirs(measurement_scenario_path) + os.makedirs(measurement_scenario_path) for mode in modes: result_mode_path = os.path.join(result_scenario_path, mode) @@ -374,7 +374,7 @@ def generate_submission(env, state, inp, submission_division): #if division == "closed" and not os.path.isdir(submission_compliance_path): # os.makedirs(submission_compliance_path) - user_conf_path = os.path.join(result_scenario_path, "user.conf") + user_conf_path = os.path.join(result_scenario_path, "user.conf") if os.path.exists(user_conf_path): shutil.copy(user_conf_path, os.path.join(measurement_scenario_path, 'user.conf')) else: @@ -390,7 +390,7 @@ def generate_submission(env, state, inp, submission_division): if not os.path.exists(measurements_json_path): measurements_json_path = os.path.join(result_mode_path, "measurements.json") target_measurement_json_path = submission_measurement_path - + if os.path.exists(measurements_json_path): with open(measurements_json_path, "r") as f: measurements_json = json.load(f) @@ -400,7 +400,7 @@ def generate_submission(env, state, inp, submission_division): else: if mode.lower() == "performance": return {"return":1, "error":f"measurements.json missing in both paths: {measurements_json_path} and {os.path.join(result_scenario_path, 'user.conf')}"} - + files = [] readme = False @@ -432,8 +432,8 @@ def generate_submission(env, state, inp, submission_division): elif f in [ "README.md", "README-extra.md", "cm-version-info.json", "os_info.json", "cpu_info.json", "pip_freeze.json", "system_info.txt", "cm-deps.png", "cm-deps.mmd" ] and mode == "performance": shutil.copy(os.path.join(result_mode_path, f), os.path.join(submission_measurement_path, f)) if f == "system_info.txt" and not platform_info_file: - # the first found system_info.txt will be taken as platform info file for a specific model to be placed in - # measurements-model folder when generating the final submission + # the first found system_info.txt will be taken as platform info file for a specific model to be placed in + # measurements-model folder when generating the final submission platform_info_file = os.path.join(result_mode_path, f) elif f in [ "console.out" ]: shutil.copy(os.path.join(result_mode_path, f), os.path.join(submission_measurement_path, mode+"_"+f)) @@ -469,7 +469,7 @@ def generate_submission(env, state, inp, submission_division): # Copy system_info.txt to the submission measurements model folder if any scenario performance run has it sys_info_file = None - + if os.path.exists(os.path.join(result_model_path, "system_info.txt")): sys_info_file = os.path.join(result_model_path, "system_info.txt") elif platform_info_file: @@ -481,7 +481,7 @@ def generate_submission(env, state, inp, submission_division): #Copy system_info.txt to the submission measurements folder if any model performance run has it sys_info_file = None - + if os.path.exists(os.path.join(result_path, "system_info.txt")): sys_info_file = os.path.join(result_path, "system_info.txt") elif model_platform_info_file: @@ -500,12 +500,12 @@ def generate_submission(env, state, inp, submission_division): r = cmind.access(cm_input) if r['return'] > 0: return r - + with open(system_file, "w") as fp: json.dump(system_meta, fp, indent=2) - + result_table, headers = mlperf_utils.get_result_table(results) print(tabulate(result_table, headers = headers, tablefmt="pretty")) @@ -522,13 +522,13 @@ def postprocess(i): state = i['state'] inp=i['input'] - submission_divisions = [] - + submission_divisions = [] + if env.get('CM_MLPERF_SUBMISSION_DIVISION', '') in ["open-closed", "closed-open"]: submission_divisions = ["open", "closed"] elif env.get('CM_MLPERF_SUBMISSION_DIVISION', '') != '': submission_divisions.append(env['CM_MLPERF_SUBMISSION_DIVISION']) - + if env.get('CM_MLPERF_SUBMISSION_DIVISION', '') == '': #if submission division is not assigned, default value would be taken in submission_generation function r = generate_submission(env, state, inp, submission_division="") else: diff --git a/script/generate-mlperf-inference-user-conf/customize.py b/script/generate-mlperf-inference-user-conf/customize.py index 093223f806..7c7d078c01 100644 --- a/script/generate-mlperf-inference-user-conf/customize.py +++ b/script/generate-mlperf-inference-user-conf/customize.py @@ -203,7 +203,7 @@ def preprocess(i): sut_name = env.get('CM_SUT_NAME', env['CM_MLPERF_BACKEND'] + "-" + env['CM_MLPERF_DEVICE']) OUTPUT_DIR = os.path.join(env['CM_MLPERF_INFERENCE_FINAL_RESULTS_DIR'], sut_name, \ model_full_name, scenario.lower(), mode) - + env['CM_MLPERF_INFERENCE_RESULTS_SUT_PATH'] = os.path.join(env['CM_MLPERF_INFERENCE_FINAL_RESULTS_DIR'], sut_name) if 'CM_MLPERF_POWER' in env and mode == "performance": diff --git a/script/generate-mlperf-tiny-report/customize.py b/script/generate-mlperf-tiny-report/customize.py index 59b16019fb..825a682cb1 100644 --- a/script/generate-mlperf-tiny-report/customize.py +++ b/script/generate-mlperf-tiny-report/customize.py @@ -15,7 +15,7 @@ def preprocess(i): # Query cache for results dirs env_repo_tags=env.get('CM_IMPORT_TINYMLPERF_REPO_TAGS','').strip() xtags='' if env_repo_tags =='' else ',version-'+env_repo_tags - + r = cm.access({'action':'find', 'automation':'cache,541d6f712a6b464e', 'tags':'get,repo,mlperf-tiny-results'+xtags}) @@ -51,8 +51,8 @@ def preprocess(i): print ('') print ('Repo path: {}'.format(path)) - r = automation.run_native_script({'run_script_input':run_script_input, - 'env':env, + r = automation.run_native_script({'run_script_input':run_script_input, + 'env':env, 'script_name':'run_submission_checker'}) if r['return']>0: return r @@ -68,7 +68,7 @@ def postprocess(i): version = env['CM_TINYMLPERF_REPO_VERSION'] for ext in ['.csv', '.xlsx']: - + p1 = os.path.join (path, 'summary'+ext) p2 = os.path.join (cur_dir, 'summary-{}{}'.format(version,ext)) diff --git a/script/generate-mlperf-tiny-submission/customize.py b/script/generate-mlperf-tiny-submission/customize.py index 026c6d623f..534915c3b6 100644 --- a/script/generate-mlperf-tiny-submission/customize.py +++ b/script/generate-mlperf-tiny-submission/customize.py @@ -101,7 +101,7 @@ def generate_submission(i): submission_scenario_path = os.path.join(submission_model_path, scenario) measurement_scenario_path = os.path.join(measurement_model_path, scenario) compliance_scenario_path = os.path.join(compliance_model_path, scenario) - + modes = [f for f in os.listdir(result_scenario_path) if not os.path.isfile(os.path.join(result_scenario_path, f))] for mode in modes: result_mode_path = os.path.join(result_scenario_path, mode) diff --git a/script/generate-nvidia-engine/customize.py b/script/generate-nvidia-engine/customize.py index 9fcaff093c..1cbe9e3d9a 100644 --- a/script/generate-nvidia-engine/customize.py +++ b/script/generate-nvidia-engine/customize.py @@ -21,7 +21,7 @@ def preprocess(i): " --gpu_batch_size="+env['CM_MODEL_BATCH_SIZE'] +\ " --gpu_copy_streams="+env['CM_GPU_COPY_STREAMS'] +\ " --workspace_size="+env['CM_TENSORRT_WORKSPACE_SIZE'] -~ +~ return {'return':0} def postprocess(i): diff --git a/script/get-android-sdk/customize.py b/script/get-android-sdk/customize.py index 88248df9d9..625df997a2 100644 --- a/script/get-android-sdk/customize.py +++ b/script/get-android-sdk/customize.py @@ -2,7 +2,7 @@ import os def preprocess(i): - + os_info = i['os_info'] platform = os_info['platform'] @@ -56,13 +56,13 @@ def preprocess(i): new_path = os.path.join(android_home, 'cmdline-tools') if not os.path.isdir(new_path): os.makedirs(new_path) - + os.chdir(new_path) - + cmdline_tools_version=env.get('CM_ANDROID_CMDLINE_TOOLS_VERSION','') env['CM_ANDROID_CMDLINE_TOOLS_VERSION'] = cmdline_tools_version - + package_url = env['CM_ANDROID_CMDLINE_TOOLS_URL'] package_url = package_url.replace('${CM_ANDROID_CMDLINE_TOOLS_OS}', host_os_for_android) package_url = package_url.replace('${CM_ANDROID_CMDLINE_TOOLS_VERSION}', cmdline_tools_version) @@ -74,8 +74,8 @@ def preprocess(i): cm = automation.cmind - r = cm.access({'action':'download_file', - 'automation':'utils,dc2743f8450541e3', + r = cm.access({'action':'download_file', + 'automation':'utils,dc2743f8450541e3', 'url':package_url}) if r['return']>0: return r @@ -83,8 +83,8 @@ def preprocess(i): print ('Unzipping file {}'.format(filename)) - r = cm.access({'action':'unzip_file', - 'automation':'utils,dc2743f8450541e3', + r = cm.access({'action':'unzip_file', + 'automation':'utils,dc2743f8450541e3', 'filename':filename, 'strip_folders':0}) if r['return']>0: return r @@ -98,9 +98,9 @@ def preprocess(i): os.chdir(cur_dir) sdk_manager_path = os.path.join(android_home, 'cmdline-tools', 'tools', 'bin', sdk_manager_file) - + sdk_manager_dir = os.path.dirname(sdk_manager_path) - + env['CM_ANDROID_SDK_MANAGER_BIN'] = sdk_manager_file env['CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH'] = sdk_manager_path @@ -147,16 +147,16 @@ def preprocess(i): path_tools = os.path.join(android_home, 'tools') env['CM_ANDROID_TOOLS_PATH']=path_tools paths.append(path_tools) - + android_ndk_version=env['CM_ANDROID_NDK_VERSION'] # Check Android NDK path_ndk = os.path.join(android_home, 'ndk', android_ndk_version) env['CM_ANDROID_NDK_PATH']=path_ndk env['ANDROID_NDK_HOME']=path_ndk - - + + path_ndk_compiler = os.path.join(path_ndk, 'toolchains', 'llvm', 'prebuilt', host_os_for_ndk, 'bin') env['CM_ANDROID_LLVM_PATH']=path_ndk_compiler env['CM_ANDROID_LLVM_CLANG_BIN_WITH_PATH']=os.path.join(path_ndk_compiler, 'clang.exe') diff --git a/script/get-aocl/customize.py b/script/get-aocl/customize.py index e67702b8ca..62c5a185a9 100644 --- a/script/get-aocl/customize.py +++ b/script/get-aocl/customize.py @@ -27,5 +27,3 @@ def postprocess(i): env['+LD_LIBRARY_PATH'] = [ aocl_lib_path ] if '+LD_LIBRARY_PATH' not in env else env['+LD_LIBRARY_PATH'] + [ aocl_lib_path ] return {'return':0} - - diff --git a/script/get-aria2/customize.py b/script/get-aria2/customize.py index 3c65bbe4f6..d6401d28ae 100644 --- a/script/get-aria2/customize.py +++ b/script/get-aria2/customize.py @@ -25,7 +25,7 @@ def preprocess(i): 'env_path_key':'CM_ARIA2_BIN_WITH_PATH', 'run_script_input':i['run_script_input'], 'recursion_spaces':recursion_spaces}) - if r['return'] >0 : + if r['return'] >0 : if r['return'] == 16: # Not found, try install force_install = True @@ -43,7 +43,7 @@ def preprocess(i): ext = '.zip' ext2 = '' else: - archive = 'aria2-{}' + archive = 'aria2-{}' ext = '.tar.bz2' ext2 = '.tar' diff --git a/script/get-aws-cli/customize.py b/script/get-aws-cli/customize.py index af7fd1603b..af92de93b0 100644 --- a/script/get-aws-cli/customize.py +++ b/script/get-aws-cli/customize.py @@ -20,7 +20,7 @@ def preprocess(i): 'env_path_key':'CM_AWS_BIN_WITH_PATH', 'run_script_input':i['run_script_input'], 'recursion_spaces':recursion_spaces}) - if r['return'] >0 : + if r['return'] >0 : if r['return'] == 16: env['CM_REQUIRE_INSTALL'] = "yes" return {'return': 0} diff --git a/script/get-bazel/customize.py b/script/get-bazel/customize.py index c4622a7f4a..9e14fc5a51 100644 --- a/script/get-bazel/customize.py +++ b/script/get-bazel/customize.py @@ -20,7 +20,7 @@ def preprocess(i): 'env_path_key':'CM_BAZEL_BIN_WITH_PATH', 'run_script_input':i['run_script_input'], 'recursion_spaces':recursion_spaces}) - if r['return'] >0 : + if r['return'] >0 : if r['return'] == 16: env['CM_REQUIRE_INSTALL'] = "yes" return {'return': 0} diff --git a/script/get-blis/customize.py b/script/get-blis/customize.py index 063366ed21..fc5d8303a7 100644 --- a/script/get-blis/customize.py +++ b/script/get-blis/customize.py @@ -32,5 +32,3 @@ def postprocess(i): env['+LD_LIBRARY_PATH'] = [ blis_lib_path ] if '+LD_LIBRARY_PATH' not in env else env['+LD_LIBRARY_PATH'] + [ blis_lib_path ] return {'return':0} - - diff --git a/script/get-cl/customize.py b/script/get-cl/customize.py index 1d205d8fdf..1fb9fbcd2b 100644 --- a/script/get-cl/customize.py +++ b/script/get-cl/customize.py @@ -27,7 +27,7 @@ def preprocess(i): 'recursion_spaces':recursion_spaces} rr = automation.find_artifact(ii) - if rr['return'] >0 : + if rr['return'] >0 : # If not found in PATH, try a longer search if rr['return'] != 16: return rr @@ -40,30 +40,30 @@ def preprocess(i): 'C:\\Program Files (x86)\\Microsoft Visual Studio', 'C:\\Program Files (x86)\\Microsoft Visual Studio 14'] - restrict_paths = ['Hostx64\\x64'] + restrict_paths = ['Hostx64\\x64'] - r = automation.find_file_deep({'paths':paths, - 'file_name':file_name, + r = automation.find_file_deep({'paths':paths, + 'file_name':file_name, 'restrict_paths':restrict_paths}) if r['return']>0: return r found_paths = r['found_paths'] if len(found_paths) == 0: - return rr + return rr tmp_paths = ';'.join(found_paths) - + env['CM_TMP_PATH'] = tmp_paths env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' - + ii['env']=env rr = automation.find_artifact(ii) if rr['return'] >0 : return rr else: - return rr + return rr found_path = rr['found_path'] diff --git a/script/get-cmake/customize.py b/script/get-cmake/customize.py index ebfd0c319a..c9a58db920 100644 --- a/script/get-cmake/customize.py +++ b/script/get-cmake/customize.py @@ -20,7 +20,7 @@ def preprocess(i): 'env_path_key':'CM_CMAKE_BIN_WITH_PATH', 'run_script_input':i['run_script_input'], 'recursion_spaces':recursion_spaces}) - if r['return'] >0 : + if r['return'] >0 : if r['return'] == 16: env['CM_REQUIRE_INSTALL'] = "yes" return {'return': 0} diff --git a/script/get-conda/customize.py b/script/get-conda/customize.py index d8fcf6eacd..4d6f37ca1a 100644 --- a/script/get-conda/customize.py +++ b/script/get-conda/customize.py @@ -37,7 +37,7 @@ def preprocess(i): bin_dir = 'Scripts' if os_info['platform'] == 'windows' else 'bin' env['CM_CONDA_BIN_WITH_PATH'] = os.path.join(env['CM_CONDA_INSTALL_PATH'], bin_dir, file_name) - if conda_prefix_name != '' or r['return'] >0 : + if conda_prefix_name != '' or r['return'] >0 : if conda_prefix_name != '' or r['return'] == 16: if conda_prefix_name == '': if env.get('CM_TMP_FAIL_IF_NOT_FOUND','').lower() == 'yes': diff --git a/script/get-cuda-devices/customize.py b/script/get-cuda-devices/customize.py index 242044e7e8..bf59c1ccb6 100644 --- a/script/get-cuda-devices/customize.py +++ b/script/get-cuda-devices/customize.py @@ -9,7 +9,7 @@ def preprocess(i): if str(env.get('CM_DETECT_USING_PYCUDA', '')).lower() in [ "1", "yes", "true"]: i['run_script_input']['script_name'] = 'detect' - return {'return':0} + return {'return':0} def postprocess(i): @@ -47,16 +47,16 @@ def postprocess(i): if gpu_id < 0: continue - gpu[gpu_id][key] = val + gpu[gpu_id][key] = val p[key] = val key_env = 'CM_CUDA_DEVICE_PROP_'+key.upper().replace(' ','_') env[key_env] = val - + state['cm_cuda_num_devices'] = gpu_id + 1 env['CM_CUDA_NUM_DEVICES'] = gpu_id + 1 state['cm_cuda_device_prop'] = p state['cm_cuda_devices_prop'] = gpu - - return {'return':0} + + return {'return':0} diff --git a/script/get-cuda-devices/detect.py b/script/get-cuda-devices/detect.py index 817e46a6fb..2b9a3383cb 100644 --- a/script/get-cuda-devices/detect.py +++ b/script/get-cuda-devices/detect.py @@ -44,4 +44,3 @@ def get_gpu_info(): print(f"GPU {idx}:") for key, value in gpu_info.items(): f.write(f"{key}: {value}\n") - diff --git a/script/get-cuda/customize.py b/script/get-cuda/customize.py index 11de3c6cd7..7bae3eb392 100644 --- a/script/get-cuda/customize.py +++ b/script/get-cuda/customize.py @@ -64,7 +64,7 @@ def preprocess(i): 'env_path_key':env_key, 'run_script_input':i['run_script_input'], 'recursion_spaces':recursion_spaces}) - if r['return'] >0 : + if r['return'] >0 : if os_info['platform'] == 'windows': return r @@ -175,7 +175,7 @@ def postprocess(i): # Check extra paths for key in ['+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: - env[key] = [] + env[key] = [] ## Include cuda_path_include = os.path.join(cuda_path, 'include') diff --git a/script/get-cudnn/customize.py b/script/get-cudnn/customize.py index 93a965b850..9f02b2cb9f 100644 --- a/script/get-cudnn/customize.py +++ b/script/get-cudnn/customize.py @@ -17,85 +17,85 @@ def preprocess(i): # If TAR file is not explicitly specified, search if env.get('CM_CUDNN_TAR_FILE_PATH','')=='': - cuda_path_lib = env.get('CM_CUDA_PATH_LIB') - - if os_info['platform'] == 'windows': - extra_pre='' - extra_ext='lib' - else: - extra_pre='lib' - extra_ext='so' - - libfilename = extra_pre + 'cudnn.' +extra_ext - env['CM_CUDNN_VERSION'] = 'vdetected' - - if os.path.exists(os.path.join(cuda_path_lib, libfilename)): - env['CM_CUDA_PATH_LIB_CUDNN'] = env['CM_CUDA_PATH_LIB'] - return {'return': 0} - - if env.get('CM_TMP_PATH', '').strip() != '': - path = env.get('CM_TMP_PATH') - if os.path.exists(os.path.join(path, libfilename)): - env['CM_CUDA_PATH_LIB_CUDNN'] = path - return {'return': 0} - - if env.get('CM_INPUT','').strip()=='': - if os_info['platform'] == 'windows': - if env.get('CM_TMP_PATH','').strip()=='': - # Check in "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA" - paths = [] - for path in ["C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA", "C:\\Program Files (x86)\\NVIDIA GPU Computing Toolkit\\CUDA"]: - if os.path.isdir(path): - dirs = os.listdir(path) - for dr in dirs: - path2 = os.path.join(path, dr, 'lib') - if os.path.isdir(path2): - paths.append(path2) - - if len(paths)>0: - tmp_paths = ';'.join(paths) - tmp_paths += ';'+os.environ.get('PATH','') - - env['CM_TMP_PATH'] = tmp_paths - env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' - - else: - # paths to cuda are not always in PATH - add a few typical locations to search for - # (unless forced by a user) - - cm_tmp_path = env.get('CM_TMP_PATH','').strip() - if cm_tmp_path!='': - cm_tmp_path+=':' - cm_tmp_path+='/usr/local/cuda/lib64:/usr/cuda/lib64:/usr/local/cuda/lib:/usr/cuda/lib:/usr/local/cuda-11/lib64:/usr/cuda-11/lib:/usr/local/cuda-12/lib:/usr/cuda-12/lib:/usr/local/packages/cuda/lib' - env['CM_TMP_PATH'] = cm_tmp_path - env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' - - for lib_path in env.get('+CM_HOST_OS_DEFAULT_LIBRARY_PATH', []): - if(os.path.exists(lib_path)): - env['CM_TMP_PATH']+=':'+lib_path - - r = i['automation'].find_artifact({'file_name': libfilename, - 'env': env, - 'os_info':os_info, - 'default_path_env_key': 'LD_LIBRARY_PATH', - 'detect_version':False, - 'env_path_key':'CM_CUDA_PATH_LIB_CUDNN', - 'run_script_input':i['run_script_input'], - 'recursion_spaces':recursion_spaces}) - if r['return'] >0 : - if os_info['platform'] == 'windows': - return r - - if r['return'] == 16: - env['CM_TMP_REQUIRE_INSTALL'] = "yes" - else: - return r - else: - # On Linux we may detected file instead of path to cudnn - if os.path.isfile(env['CM_CUDA_PATH_LIB_CUDNN']): - env['CM_CUDA_PATH_LIB_CUDNN'] = os.path.dirname(env['CM_CUDA_PATH_LIB_CUDNN']) - - return {'return':0} + cuda_path_lib = env.get('CM_CUDA_PATH_LIB') + + if os_info['platform'] == 'windows': + extra_pre='' + extra_ext='lib' + else: + extra_pre='lib' + extra_ext='so' + + libfilename = extra_pre + 'cudnn.' +extra_ext + env['CM_CUDNN_VERSION'] = 'vdetected' + + if os.path.exists(os.path.join(cuda_path_lib, libfilename)): + env['CM_CUDA_PATH_LIB_CUDNN'] = env['CM_CUDA_PATH_LIB'] + return {'return': 0} + + if env.get('CM_TMP_PATH', '').strip() != '': + path = env.get('CM_TMP_PATH') + if os.path.exists(os.path.join(path, libfilename)): + env['CM_CUDA_PATH_LIB_CUDNN'] = path + return {'return': 0} + + if env.get('CM_INPUT','').strip()=='': + if os_info['platform'] == 'windows': + if env.get('CM_TMP_PATH','').strip()=='': + # Check in "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA" + paths = [] + for path in ["C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA", "C:\\Program Files (x86)\\NVIDIA GPU Computing Toolkit\\CUDA"]: + if os.path.isdir(path): + dirs = os.listdir(path) + for dr in dirs: + path2 = os.path.join(path, dr, 'lib') + if os.path.isdir(path2): + paths.append(path2) + + if len(paths)>0: + tmp_paths = ';'.join(paths) + tmp_paths += ';'+os.environ.get('PATH','') + + env['CM_TMP_PATH'] = tmp_paths + env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' + + else: + # paths to cuda are not always in PATH - add a few typical locations to search for + # (unless forced by a user) + + cm_tmp_path = env.get('CM_TMP_PATH','').strip() + if cm_tmp_path!='': + cm_tmp_path+=':' + cm_tmp_path+='/usr/local/cuda/lib64:/usr/cuda/lib64:/usr/local/cuda/lib:/usr/cuda/lib:/usr/local/cuda-11/lib64:/usr/cuda-11/lib:/usr/local/cuda-12/lib:/usr/cuda-12/lib:/usr/local/packages/cuda/lib' + env['CM_TMP_PATH'] = cm_tmp_path + env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' + + for lib_path in env.get('+CM_HOST_OS_DEFAULT_LIBRARY_PATH', []): + if(os.path.exists(lib_path)): + env['CM_TMP_PATH']+=':'+lib_path + + r = i['automation'].find_artifact({'file_name': libfilename, + 'env': env, + 'os_info':os_info, + 'default_path_env_key': 'LD_LIBRARY_PATH', + 'detect_version':False, + 'env_path_key':'CM_CUDA_PATH_LIB_CUDNN', + 'run_script_input':i['run_script_input'], + 'recursion_spaces':recursion_spaces}) + if r['return'] >0 : + if os_info['platform'] == 'windows': + return r + + if r['return'] == 16: + env['CM_TMP_REQUIRE_INSTALL'] = "yes" + else: + return r + else: + # On Linux we may detected file instead of path to cudnn + if os.path.isfile(env['CM_CUDA_PATH_LIB_CUDNN']): + env['CM_CUDA_PATH_LIB_CUDNN'] = os.path.dirname(env['CM_CUDA_PATH_LIB_CUDNN']) + + return {'return':0} if env.get('CM_CUDNN_TAR_FILE_PATH','')=='': return {'return': 1, 'error': 'Please envoke cm run script "get cudnn" --tar_file={full path to the cuDNN tar file}'} diff --git a/script/get-dataset-coco/customize.py b/script/get-dataset-coco/customize.py index 78ced4d7bd..02fa5289bd 100644 --- a/script/get-dataset-coco/customize.py +++ b/script/get-dataset-coco/customize.py @@ -21,7 +21,7 @@ def preprocess(i): # Check which dataset p = os.path.join(path, 'annotations') if os.path.isdir(p): - for d in [('val2017','val','2017'), + for d in [('val2017','val','2017'), ('train2017','train','2017')]: p = os.path.join(path, d[0]) @@ -36,7 +36,7 @@ def preprocess(i): print ('') print ('Detected COCO dataset {} {}'.format(tp,ver)) - + env['CM_DATASET_COCO_DETECTED'] = 'yes' env['CM_DATASET_COCO_PATH'] = path else: @@ -57,7 +57,7 @@ def preprocess(i): else: url_data = env['CM_DATASET_COCO_URL_DATA'] url_ann = env['CM_DATASET_COCO_URL_ANNOTATIONS'] - + filename_data = tp + ver + '.zip' filename_annotation = 'annotations_trainval' + ver + '.zip' @@ -91,7 +91,7 @@ def preprocess(i): if path_to!='': dae_input_data['extract_path'] = path_to dae_input_annotation['extract_path'] = path_to - + path_store = env.get('CM_STORE', '') if path_store!='': dae_input_data['download_path'] = path_store @@ -99,7 +99,7 @@ def preprocess(i): dae_input_annotation['download_path'] = path_store dae_input_annotation['tags'] = '_keep' - + r = automation.update_deps({'deps':meta['prehook_deps'], 'update_deps':{ '746e5dad5e784ad6': dae_input_data, @@ -122,11 +122,11 @@ def preprocess(i): if ver == '2017': if tp == 'val': if size == 'small': - md5sum_data = '16fab985a33afa66beeb987f68c2023c' - md5sum_ann = '78c0cfd9fc32c825d4ae693fd0d91407' + md5sum_data = '16fab985a33afa66beeb987f68c2023c' + md5sum_ann = '78c0cfd9fc32c825d4ae693fd0d91407' else: - md5sum_data = '442b8da7639aecaf257c1dceb8ba8c80' - md5sum_ann = 'f4bbac642086de4f52a3fdda2de5fa2c' + md5sum_data = '442b8da7639aecaf257c1dceb8ba8c80' + md5sum_ann = 'f4bbac642086de4f52a3fdda2de5fa2c' if md5sum_data != '': env['CM_DATASET_COCO_MD5SUM_DATA'] = md5sum_data @@ -141,7 +141,7 @@ def preprocess(i): # Add version and type to tags extra_cache_tags = [] for tag in [ver, tp]: - if tag not in variation_tags: + if tag not in variation_tags: extra_cache_tags.append(tag) return {'return':0, 'add_extra_cache_tags':extra_cache_tags} @@ -155,7 +155,7 @@ def postprocess(i): tp_ver = env['CM_DATASET_COCO_TYPE_AND_VERSION'] path_to = env.get('CM_TO','') - + # Check if detected or downloaded if env.get('CM_DATASET_COCO_DETECTED', '').lower() == 'yes' or path_to!='': path_all = env['CM_DATASET_COCO_PATH'] if path_to=='' else path_to @@ -164,9 +164,9 @@ def postprocess(i): env['CM_DATASET_COCO_ANNOTATIONS_PATH'] = os.path.join(path_all, 'annotations') else: path_all = os.getcwd() - + # Moving 2 directories to 1 place - + path_data = env['CM_DATASET_COCO_DATA_PATH'] path_ann = env['CM_DATASET_COCO_ANNOTATIONS_PATH'] @@ -180,16 +180,16 @@ def postprocess(i): if os_info['platform'] == 'windows': # Moving to this directory since can't make symbolic links command1 = ' move /y ' + path_data_full + ' ' + tp_ver - command2 = ' move /y ' + path_ann_full + ' annotations' + command2 = ' move /y ' + path_ann_full + ' annotations' env['CM_DATASET_COCO_DATA_PATH'] = os.path.join(path_all, tp_ver) env['CM_DATASET_COCO_ANNOTATIONS_PATH'] = os.path.join(path_all, 'annotations') else: - # Make soft links from data and annotations into 1 directory + # Make soft links from data and annotations into 1 directory # (standard way for COCO) command1 = ' ln -s ' + path_data_full + ' ' + tp_ver - command2 = ' ln -s ' + path_ann_full + ' annotations' + command2 = ' ln -s ' + path_ann_full + ' annotations' for command in [command1, command2]: print (command) diff --git a/script/get-dataset-coco2014/customize.py b/script/get-dataset-coco2014/customize.py index a38f336bc2..77fe3c8b0b 100644 --- a/script/get-dataset-coco2014/customize.py +++ b/script/get-dataset-coco2014/customize.py @@ -16,7 +16,7 @@ def preprocess(i): def postprocess(i): env = i['env'] - if env.get('CM_GENERATE_SAMPLE_ID', '') == "yes": + if env.get('CM_GENERATE_SAMPLE_ID', '') == "yes": env['CM_COCO2014_SAMPLE_ID_PATH'] = os.path.join(os.getcwd(), 'install', 'sample_ids.txt') print(env['CM_COCO2014_SAMPLE_ID_PATH']) if env.get('CM_DATASET_CALIBRATION','') == "no": diff --git a/script/get-dataset-imagenet-helper/customize.py b/script/get-dataset-imagenet-helper/customize.py index b1b7d90de6..08de452865 100644 --- a/script/get-dataset-imagenet-helper/customize.py +++ b/script/get-dataset-imagenet-helper/customize.py @@ -3,9 +3,9 @@ def postprocess(i): env = i['env'] - + script_path = env['CM_TMP_CURRENT_SCRIPT_PATH'] - + env['CM_DATASET_IMAGENET_HELPER_PATH'] = script_path env['+PYTHONPATH'] = [ script_path ] diff --git a/script/get-dataset-imagenet-val/customize.py b/script/get-dataset-imagenet-val/customize.py index e35af9664f..378b7927d2 100644 --- a/script/get-dataset-imagenet-val/customize.py +++ b/script/get-dataset-imagenet-val/customize.py @@ -76,4 +76,3 @@ def postprocess(i): env['CM_GET_DEPENDENT_CACHED_PATH'] = path return {'return':0} - diff --git a/script/get-dataset-mlperf-inference-gnn/customize.py b/script/get-dataset-mlperf-inference-gnn/customize.py index 192144f602..bed3d59337 100644 --- a/script/get-dataset-mlperf-inference-gnn/customize.py +++ b/script/get-dataset-mlperf-inference-gnn/customize.py @@ -17,7 +17,7 @@ def preprocess(i): graph_folder = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], 'graph', 'R-GAT') download_loc = env.get('CM_IGBH_DATASET_OUT_PATH', os.getcwd()) - + run_cmd += f"cd {graph_folder} " x_sep = " && " @@ -33,7 +33,7 @@ def preprocess(i): # compress graph(for glt implementation) if env.get('CM_IGBH_GRAPH_COMPRESS', '') == "yes": run_cmd += x_sep + f"{env['CM_PYTHON_BIN_WITH_PATH']} tools/compress_graph.py --path {download_loc} --dataset_size {env['CM_IGBH_DATASET_SIZE']} --layout {env['CM_IGBH_GRAPH_COMPRESS_LAYOUT']}" - + env['CM_RUN_CMD'] = run_cmd return {'return':0} diff --git a/script/get-dataset-mlperf-inference-mixtral/customize.py b/script/get-dataset-mlperf-inference-mixtral/customize.py index 118f32449a..6f37e396be 100644 --- a/script/get-dataset-mlperf-inference-mixtral/customize.py +++ b/script/get-dataset-mlperf-inference-mixtral/customize.py @@ -9,13 +9,13 @@ def preprocess(i): if env.get('CM_DATASET_MIXTRAL_GENERATE_TEST_DATA', '') == "yes": env['CM_DATASET_MIXTRAL_TEST_DATA_GENERATED_PATH'] = os.path.join(os.getcwd(), "mixtral-test-dataset.pkl") - + return {'return':0} def postprocess(i): env = i['env'] - + env['CM_DATASET_MIXTRAL_PREPROCESSED_PATH'] = env['CM_DATASET_PREPROCESSED_PATH'] if env.get('CM_DATASET_MIXTRAL_GENERATE_TEST_DATA', '') == "yes": diff --git a/script/get-dataset-mlperf-inference-mixtral/generate-test-dataset.py b/script/get-dataset-mlperf-inference-mixtral/generate-test-dataset.py index 004fd476f6..cd449cf5b2 100644 --- a/script/get-dataset-mlperf-inference-mixtral/generate-test-dataset.py +++ b/script/get-dataset-mlperf-inference-mixtral/generate-test-dataset.py @@ -8,7 +8,7 @@ def main(): parser.add_argument('--dataset-path', required=True, help="Path to the input dataset (pickle file).") parser.add_argument('--output-path', default=os.path.join(os.getcwd(),"mixtral-test-dataset.pkl"), help="Path to save the output dataset (pickle file).") parser.add_argument('--samples', default=2, help="Number of entries to be extracted from each group.") - + args = parser.parse_args() dataset_path = args.dataset_path output_path = args.output_path diff --git a/script/get-dataset-openimages-annotations/customize.py b/script/get-dataset-openimages-annotations/customize.py index a8954c67d6..d85402e2bf 100644 --- a/script/get-dataset-openimages-annotations/customize.py +++ b/script/get-dataset-openimages-annotations/customize.py @@ -4,7 +4,7 @@ def preprocess(i): os_info = i['os_info'] - + env = i['env'] return {'return':0} diff --git a/script/get-dlrm-data-mlperf-inference/customize.py b/script/get-dlrm-data-mlperf-inference/customize.py index 1e72e38b3d..0d1c878f9c 100644 --- a/script/get-dlrm-data-mlperf-inference/customize.py +++ b/script/get-dlrm-data-mlperf-inference/customize.py @@ -13,7 +13,7 @@ def preprocess(i): dlrm_data_path = os.getcwd() elif not os.path.exists(dlrm_data_path): return {'return':1, 'error':"given dlrm data path does not exists"} - + # creating required folders inside the dlrm data path if not exists # criteo dataset criteo_fp32_path = os.path.join(dlrm_data_path, "criteo", "day23", "fp32") @@ -38,7 +38,7 @@ def preprocess(i): if variation == "nvidia": if not os.path.exists(os.path.join(dlrm_data_path, "model")): print(f'model directory is missing inside {dlrm_data_path}') - env['CM_DLRM_MODEL_DOWNLOAD'] = True + env['CM_DLRM_MODEL_DOWNLOAD'] = True if not os.path.exists(os.path.join(dlrm_data_path, "criteo")): print(f'criteo directory is missing inside {dlrm_data_path}') env['CM_DLRM_DATASET_DOWNLOAD'] = True @@ -78,11 +78,11 @@ def preprocess(i): os.system(f"unzip {os.path.join(dlrm_data_path, 'criteo', 'day23', 'fp32', 'day_23_sparse_multi_hot.npz')} -d {os.path.join(dlrm_data_path, 'criteo', 'day23', 'fp32', 'day_23_sparse_multi_hot_unpacked')}") else: run_cmd += f"unzip {os.path.join(dlrm_data_path, 'criteo', 'day23', 'fp32', 'day_23_sparse_multi_hot.npz')} -d {os.path.join(dlrm_data_path, 'criteo', 'day23', 'fp32', 'day_23_sparse_multi_hot_unpacked')}" + xsep - + if os.path.exists(os.path.join(dlrm_data_path, "criteo", "day23", "fp32", "day_23_sparse_multi_hot.npz")) or env['CM_DLRM_DATASET_DOWNLOAD'] == True: file_path = os.path.join(dlrm_data_path, "criteo", "day23", "fp32", "day_23_sparse_multi_hot.npz") run_cmd += ("echo {} {} | md5sum -c").format('c46b7e31ec6f2f8768fa60bdfc0f6e40', file_path) + xsep - + file_path = os.path.join(dlrm_data_path, "criteo", "day23", "fp32", "day_23_dense.npy") run_cmd += ("echo {} {} | md5sum -c").format('cdf7af87cbc7e9b468c0be46b1767601', file_path) + xsep diff --git a/script/get-dlrm/customize.py b/script/get-dlrm/customize.py index 479efc350c..5b4959942a 100644 --- a/script/get-dlrm/customize.py +++ b/script/get-dlrm/customize.py @@ -34,4 +34,3 @@ def postprocess(i): env['DLRM_DIR'] = os.path.join(os.getcwd(), "dlrm") return {'return':0} - diff --git a/script/get-docker/customize.py b/script/get-docker/customize.py index d030a11432..322a087baa 100644 --- a/script/get-docker/customize.py +++ b/script/get-docker/customize.py @@ -23,7 +23,7 @@ def preprocess(i): 'env_path_key':'CM_DOCKER_BIN_WITH_PATH', 'run_script_input':i['run_script_input'], 'recursion_spaces':recursion_spaces}) - if r['return'] >0 : + if r['return'] >0 : if r['return'] == 16: run_file_name = "install" r = automation.run_native_script({'run_script_input':i['run_script_input'], 'env':env, 'script_name':run_file_name}) diff --git a/script/get-generic-python-lib/detect-version.py b/script/get-generic-python-lib/detect-version.py index 86ab8adf0b..b80edf858d 100644 --- a/script/get-generic-python-lib/detect-version.py +++ b/script/get-generic-python-lib/detect-version.py @@ -28,7 +28,7 @@ if error!='': error += '\n' error += format(e) - # We generally skip error since it usually means that + # We generally skip error since it usually means that # package is not installed with open(filename, 'w') as file: diff --git a/script/get-generic-sys-util/customize.py b/script/get-generic-sys-util/customize.py index 1bf2a31f60..54c84cacb3 100644 --- a/script/get-generic-sys-util/customize.py +++ b/script/get-generic-sys-util/customize.py @@ -50,7 +50,7 @@ def preprocess(i): print ('') return {'return':0} - + if not pm: return {'return': 1, 'error': 'Package manager not detected for the given OS'} @@ -65,7 +65,7 @@ def preprocess(i): return {'return': 0} else: return {'return': 1, 'error': f'No package name specified for {pm} and util name {util}'} - + if util == "libffi": if env.get("CM_HOST_OS_FLAVOR", "") == "ubuntu": if env.get("CM_HOST_OS_VERSION", "") in [ "20.04", "20.10", "21.04", "21.10" ]: @@ -76,11 +76,11 @@ def preprocess(i): # Temporary handling of dynamic state variables tmp_values = re.findall(r'<<<(.*?)>>>', str(package_name)) for tmp_value in tmp_values: - if tmp_value not in env: - return {'return':1, 'error':'variable {} is not in env'.format(tmp_value)} - if tmp_value in env: - if type(package_name) == str: - package_name = package_name.replace("<<<"+tmp_value+">>>", str(env[tmp_value])) + if tmp_value not in env: + return {'return':1, 'error':'variable {} is not in env'.format(tmp_value)} + if tmp_value in env: + if type(package_name) == str: + package_name = package_name.replace("<<<"+tmp_value+">>>", str(env[tmp_value])) install_cmd = env.get('CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD') if not install_cmd: @@ -102,7 +102,7 @@ def preprocess(i): env['CM_SYS_UTIL_INSTALL_CMD'] = '' if env.get('CM_SYS_UTIL_CHECK_CMD', '') != '' and env['CM_SYS_UTIL_INSTALL_CMD'] != '': - env['CM_SYS_UTIL_INSTALL_CMD'] = f"""{env['CM_SYS_UTIL_CHECK_CMD']} || {env['CM_SYS_UTIL_INSTALL_CMD']}""" + env['CM_SYS_UTIL_INSTALL_CMD'] = f"""{env['CM_SYS_UTIL_CHECK_CMD']} || {env['CM_SYS_UTIL_INSTALL_CMD']}""" return {'return':0} diff --git a/script/get-git-repo/customize.py b/script/get-git-repo/customize.py index 125308f33f..0a1d7b0729 100644 --- a/script/get-git-repo/customize.py +++ b/script/get-git-repo/customize.py @@ -12,7 +12,7 @@ def preprocess(i): env = i['env'] meta = i['meta'] - + env_key = get_env_key(env) cm_git_url = env['CM_GIT_URL'] diff --git a/script/get-github-cli/customize.py b/script/get-github-cli/customize.py index 8c64641189..9c15d17d72 100644 --- a/script/get-github-cli/customize.py +++ b/script/get-github-cli/customize.py @@ -20,17 +20,17 @@ def preprocess(i): 'env_path_key':'CM_GITHUBCLI_BIN_WITH_PATH', 'run_script_input':i['run_script_input'], 'recursion_spaces':recursion_spaces}) - if r['return'] >0 : - if r['return'] == 16: - if env.get('CM_TMP_FAIL_IF_NOT_FOUND','').lower() == 'yes': - return r + if r['return'] >0 : + if r['return'] == 16: + if env.get('CM_TMP_FAIL_IF_NOT_FOUND','').lower() == 'yes': + return r - print (recursion_spaces+' # {}'.format(r['error'])) + print (recursion_spaces+' # {}'.format(r['error'])) - # Attempt to run installer - r = {'return':0, 'skip':True, 'script':{'tags':'install,github-cli'}} + # Attempt to run installer + r = {'return':0, 'skip':True, 'script':{'tags':'install,github-cli'}} - return r + return r found_path = r['found_path'] diff --git a/script/get-go/customize.py b/script/get-go/customize.py index d65126585b..72f0874c8c 100644 --- a/script/get-go/customize.py +++ b/script/get-go/customize.py @@ -20,7 +20,7 @@ def preprocess(i): 'env_path_key':'CM_GO_BIN_WITH_PATH', 'run_script_input':i['run_script_input'], 'recursion_spaces':recursion_spaces}) - if r['return'] >0 : + if r['return'] >0 : if r['return'] == 16: env['CM_REQUIRE_INSTALL'] = "yes" return {'return': 0} diff --git a/script/get-google-saxml/customize.py b/script/get-google-saxml/customize.py index d38c8c2ca5..cc9342a50a 100644 --- a/script/get-google-saxml/customize.py +++ b/script/get-google-saxml/customize.py @@ -6,7 +6,7 @@ def preprocess(i): os_info = i['os_info'] # TBD - + return {'return':0} def postprocess(i): @@ -17,6 +17,6 @@ def postprocess(i): # TBD cur_dir = os.getcwd() - + return {'return':0} diff --git a/script/get-google-test/customize.py b/script/get-google-test/customize.py index 299778e3f7..5d7427929d 100644 --- a/script/get-google-test/customize.py +++ b/script/get-google-test/customize.py @@ -20,9 +20,9 @@ def postprocess(i): env = i['env'] if '+C_INCLUDE_PATH' not in env: - env['+C_INCLUDE_PATH'] = [] + env['+C_INCLUDE_PATH'] = [] if '+LD_LIBRARY_PATH' not in env: - env['+LD_LIBRARY_PATH'] = [] + env['+LD_LIBRARY_PATH'] = [] gtest_install_path = os.path.join(os.getcwd(), "install") env['CM_GOOGLE_TEST_SRC_PATH'] = env['CM_GIT_REPO_CHECKOUT_PATH'] diff --git a/script/get-ipol-src/customize.py b/script/get-ipol-src/customize.py index f2b5dd1fca..42824f8916 100644 --- a/script/get-ipol-src/customize.py +++ b/script/get-ipol-src/customize.py @@ -24,8 +24,8 @@ def preprocess(i): print ('Downloading from {}'.format(url)) - r = cm.access({'action':'download_file', - 'automation':'utils,dc2743f8450541e3', + r = cm.access({'action':'download_file', + 'automation':'utils,dc2743f8450541e3', 'url':url}) if r['return']>0: return r @@ -33,8 +33,8 @@ def preprocess(i): print ('Unzipping file {}'.format(filename)) - r = cm.access({'action':'unzip_file', - 'automation':'utils,dc2743f8450541e3', + r = cm.access({'action':'unzip_file', + 'automation':'utils,dc2743f8450541e3', 'filename':filename}) if r['return']>0: return r diff --git a/script/get-java/customize.py b/script/get-java/customize.py index 8cfc211bcf..3e28a58d85 100644 --- a/script/get-java/customize.py +++ b/script/get-java/customize.py @@ -35,7 +35,7 @@ def preprocess(i): 'run_script_input':i['run_script_input'], 'hook': skip_path, 'recursion_spaces':recursion_spaces}) - if rr['return'] == 0 : + if rr['return'] == 0 : found = True elif rr['return'] != 16: return rr @@ -43,48 +43,48 @@ def preprocess(i): # If not found or force install if not found or install: - if os_info['platform'] == 'windows': - env['CM_JAVA_PREBUILT_HOST_OS']='windows' - env['CM_JAVA_PREBUILT_EXT']='.zip' - else: - env['CM_JAVA_PREBUILT_HOST_OS']='linux' - env['CM_JAVA_PREBUILT_EXT']='.tar.gz' + if os_info['platform'] == 'windows': + env['CM_JAVA_PREBUILT_HOST_OS']='windows' + env['CM_JAVA_PREBUILT_EXT']='.zip' + else: + env['CM_JAVA_PREBUILT_HOST_OS']='linux' + env['CM_JAVA_PREBUILT_EXT']='.tar.gz' - url = env['CM_JAVA_PREBUILT_URL'] - filename = env['CM_JAVA_PREBUILT_FILENAME'] + url = env['CM_JAVA_PREBUILT_URL'] + filename = env['CM_JAVA_PREBUILT_FILENAME'] - java_prebuilt_version = env['CM_JAVA_PREBUILT_VERSION'] - java_prebuilt_build = env['CM_JAVA_PREBUILT_BUILD'] + java_prebuilt_version = env['CM_JAVA_PREBUILT_VERSION'] + java_prebuilt_build = env['CM_JAVA_PREBUILT_BUILD'] - for key in ['CM_JAVA_PREBUILT_VERSION', - 'CM_JAVA_PREBUILT_BUILD', - 'CM_JAVA_PREBUILT_HOST_OS', - 'CM_JAVA_PREBUILT_EXT']: - url = url.replace('${'+key+'}', env[key]) - filename = filename.replace('${'+key+'}', env[key]) + for key in ['CM_JAVA_PREBUILT_VERSION', + 'CM_JAVA_PREBUILT_BUILD', + 'CM_JAVA_PREBUILT_HOST_OS', + 'CM_JAVA_PREBUILT_EXT']: + url = url.replace('${'+key+'}', env[key]) + filename = filename.replace('${'+key+'}', env[key]) - env['CM_JAVA_PREBUILT_URL'] = url - env['CM_JAVA_PREBUILT_FILENAME'] = filename + env['CM_JAVA_PREBUILT_URL'] = url + env['CM_JAVA_PREBUILT_FILENAME'] = filename - print ('') - print (recursion_spaces + ' Downloading and installing prebuilt Java from {} ...'.format(url+filename)) + print ('') + print (recursion_spaces + ' Downloading and installing prebuilt Java from {} ...'.format(url+filename)) - rr = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':'install-prebuilt'}) - if rr['return']>0: return rr + rr = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':'install-prebuilt'}) + if rr['return']>0: return rr - target_path = os.path.join(cur_dir, 'jdk-'+java_prebuilt_version, 'bin') - target_file = os.path.join(target_path, file_name) + target_path = os.path.join(cur_dir, 'jdk-'+java_prebuilt_version, 'bin') + target_file = os.path.join(target_path, file_name) - if not os.path.isfile(target_file): - return {'return':1, 'error':'can\'t find target file {}'.format(target_file)} + if not os.path.isfile(target_file): + return {'return':1, 'error':'can\'t find target file {}'.format(target_file)} - print ('') - print (recursion_spaces + ' Registering file {} ...'.format(target_file)) + print ('') + print (recursion_spaces + ' Registering file {} ...'.format(target_file)) - env[env_path_key] = target_file + env[env_path_key] = target_file - if '+PATH' not in env: env['+PATH'] = [] - env['+PATH'].append(target_path) + if '+PATH' not in env: env['+PATH'] = [] + env['+PATH'].append(target_path) return {'return':0} diff --git a/script/get-javac/customize.py b/script/get-javac/customize.py index f7e076bd93..2f9481c4bf 100644 --- a/script/get-javac/customize.py +++ b/script/get-javac/customize.py @@ -23,7 +23,7 @@ def preprocess(i): install = env.get('CM_JAVAC_PREBUILT_INSTALL','') in ['on', 'True', True] env_path_key = 'CM_JAVAC_BIN_WITH_PATH' - + # If not force install, search for artifact if not install: rr = i['automation'].find_artifact({'file_name': file_name, @@ -35,7 +35,7 @@ def preprocess(i): 'run_script_input':i['run_script_input'], 'hook': skip_path, 'recursion_spaces':recursion_spaces}) - if rr['return'] == 0 : + if rr['return'] == 0 : found = True elif rr['return'] != 16: return rr @@ -43,49 +43,49 @@ def preprocess(i): # If not found or force install if not found or install: - if os_info['platform'] == 'windows': - env['CM_JAVAC_PREBUILT_HOST_OS']='windows' - env['CM_JAVAC_PREBUILT_EXT']='.zip' - else: - env['CM_JAVAC_PREBUILT_HOST_OS']='linux' - env['CM_JAVAC_PREBUILT_EXT']='.tar.gz' + if os_info['platform'] == 'windows': + env['CM_JAVAC_PREBUILT_HOST_OS']='windows' + env['CM_JAVAC_PREBUILT_EXT']='.zip' + else: + env['CM_JAVAC_PREBUILT_HOST_OS']='linux' + env['CM_JAVAC_PREBUILT_EXT']='.tar.gz' + + url = env['CM_JAVAC_PREBUILT_URL'] + filename = env['CM_JAVAC_PREBUILT_FILENAME'] - url = env['CM_JAVAC_PREBUILT_URL'] - filename = env['CM_JAVAC_PREBUILT_FILENAME'] + javac_prebuilt_version = env['CM_JAVAC_PREBUILT_VERSION'] + javac_prebuilt_build = env['CM_JAVAC_PREBUILT_BUILD'] - javac_prebuilt_version = env['CM_JAVAC_PREBUILT_VERSION'] - javac_prebuilt_build = env['CM_JAVAC_PREBUILT_BUILD'] + for key in ['CM_JAVAC_PREBUILT_VERSION', + 'CM_JAVAC_PREBUILT_BUILD', + 'CM_JAVAC_PREBUILT_HOST_OS', + 'CM_JAVAC_PREBUILT_EXT']: + url = url.replace('${'+key+'}', env[key]) + filename = filename.replace('${'+key+'}', env[key]) - for key in ['CM_JAVAC_PREBUILT_VERSION', - 'CM_JAVAC_PREBUILT_BUILD', - 'CM_JAVAC_PREBUILT_HOST_OS', - 'CM_JAVAC_PREBUILT_EXT']: - url = url.replace('${'+key+'}', env[key]) - filename = filename.replace('${'+key+'}', env[key]) + env['CM_JAVAC_PREBUILT_URL'] = url + env['CM_JAVAC_PREBUILT_FILENAME'] = filename - env['CM_JAVAC_PREBUILT_URL'] = url - env['CM_JAVAC_PREBUILT_FILENAME'] = filename + print ('') + print (recursion_spaces + ' Downloading and installing prebuilt Java from {} ...'.format(url+filename)) - print ('') - print (recursion_spaces + ' Downloading and installing prebuilt Java from {} ...'.format(url+filename)) - - rr = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':'install-prebuilt'}) - if rr['return']>0: return rr + rr = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':'install-prebuilt'}) + if rr['return']>0: return rr - target_path = os.path.join(cur_dir, 'jdk-'+java_prebuilt_version, 'bin') - target_file = os.path.join(target_path, file_name) + target_path = os.path.join(cur_dir, 'jdk-'+java_prebuilt_version, 'bin') + target_file = os.path.join(target_path, file_name) - if not os.path.isfile(target_file): - return {'return':1, 'error':'can\'t find target file {}'.format(target_file)} + if not os.path.isfile(target_file): + return {'return':1, 'error':'can\'t find target file {}'.format(target_file)} - print ('') - print (recursion_spaces + ' Registering file {} ...'.format(target_file)) + print ('') + print (recursion_spaces + ' Registering file {} ...'.format(target_file)) - env[env_path_key] = target_file + env[env_path_key] = target_file - if '+PATH' not in env: env['+PATH'] = [] - env['+PATH'].append(target_path) + if '+PATH' not in env: env['+PATH'] = [] + env['+PATH'].append(target_path) return {'return':0} diff --git a/script/get-llvm/customize.py b/script/get-llvm/customize.py index c9d872a23f..dd0d69eec4 100644 --- a/script/get-llvm/customize.py +++ b/script/get-llvm/customize.py @@ -22,7 +22,7 @@ def preprocess(i): 'env_path_key':'CM_LLVM_CLANG_BIN_WITH_PATH', 'run_script_input':i['run_script_input'], 'recursion_spaces':recursion_spaces}) - if r['return'] >0 : + if r['return'] >0 : if r['return'] == 16: env['CM_REQUIRE_INSTALL'] = "yes" return {'return': 0} diff --git a/script/get-ml-model-3d-unet-kits19/customize.py b/script/get-ml-model-3d-unet-kits19/customize.py index 66c0a28ff7..e874c46d65 100644 --- a/script/get-ml-model-3d-unet-kits19/customize.py +++ b/script/get-ml-model-3d-unet-kits19/customize.py @@ -12,7 +12,7 @@ def preprocess(i): cm = automation.cmind path = os.path.dirname(env['CM_ML_MODEL_FILE_WITH_PATH']) - + if env.get("CM_DAE_EXTRACT_DOWNLOADED", " ") != " ": env['CM_ML_MODEL_PATH'] = os.path.join(path, env['CM_ML_MODEL_FILE']) env['CM_ML_MODEL_FILE_WITH_PATH'] = env['CM_ML_MODEL_PATH'] @@ -20,5 +20,5 @@ def preprocess(i): env['CM_ML_MODEL_PATH'] = path env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_PATH'] - + return {'return':0} diff --git a/script/get-ml-model-bert-large-squad/customize.py b/script/get-ml-model-bert-large-squad/customize.py index 1c8e02aa43..6960f00027 100644 --- a/script/get-ml-model-bert-large-squad/customize.py +++ b/script/get-ml-model-bert-large-squad/customize.py @@ -29,4 +29,3 @@ def postprocess(i): env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] return {'return':0} - diff --git a/script/get-ml-model-efficientnet-lite/customize.py b/script/get-ml-model-efficientnet-lite/customize.py index 5571383453..6aaac4cfaf 100644 --- a/script/get-ml-model-efficientnet-lite/customize.py +++ b/script/get-ml-model-efficientnet-lite/customize.py @@ -18,8 +18,8 @@ def preprocess(i): print ('Downloading from {}'.format(url)) - r = cm.access({'action':'download_file', - 'automation':'utils,dc2743f8450541e3', + r = cm.access({'action':'download_file', + 'automation':'utils,dc2743f8450541e3', 'url':url}) if r['return']>0: return r diff --git a/script/get-ml-model-gptj/convert_gptj_ckpt.py b/script/get-ml-model-gptj/convert_gptj_ckpt.py index 34f404932a..26fc7b02da 100644 --- a/script/get-ml-model-gptj/convert_gptj_ckpt.py +++ b/script/get-ml-model-gptj/convert_gptj_ckpt.py @@ -33,147 +33,147 @@ def convert(base_model_path, pax_model_path): - """Convert from gpt-j-6b to pax.""" - print(f'Loading the base model from {base_model_path}') - - base = AutoModelForCausalLM.from_pretrained( - base_model_path, low_cpu_mem_usage=True - ) - for key, value in base.state_dict().items(): - print('%s %s' % (key, value.data.numpy().shape)) - - jax_weights = { - 'lm': { - 'embedding_lookup': { - 'emb_var': base.state_dict()[ - 'transformer.wte.weight' - ].data.numpy()[:vocab, :] - }, - 'softmax': { - 'logits_ffn': { - 'linear': { - 'w': ( - base.state_dict()['lm_head.weight'] - .data.numpy() - .transpose()[:, :vocab] - ), - }, - 'bias': {'b': base.state_dict()['lm_head.bias'].data.numpy()}, - } - }, - 'final_ln': { - 'scale': base.state_dict()[ - 'transformer.ln_f.weight' - ].data.numpy(), - 'bias': base.state_dict()['transformer.ln_f.bias'].data.numpy(), - }, - 'transformer': {}, - } - } - - for layer_idx in range(num_layers): - query = base.state_dict()[ - 'transformer.h.%d.attn.q_proj.weight' % layer_idx - ].data.numpy() - key = base.state_dict()[ - 'transformer.h.%d.attn.k_proj.weight' % layer_idx - ].data.numpy() - value = base.state_dict()[ - 'transformer.h.%d.attn.v_proj.weight' % layer_idx - ].data.numpy() - wc = np.stack((query, key, value)) - wc = np.reshape( - wc, [3, num_heads, dims_per_head, num_heads * dims_per_head] - ) - wc = np.transpose(wc, (0, 3, 1, 2)) + """Convert from gpt-j-6b to pax.""" + print(f'Loading the base model from {base_model_path}') - w_post = base.state_dict()[ - 'transformer.h.%d.attn.out_proj.weight' % layer_idx - ].data.numpy() - w_post = np.reshape( - w_post, [num_heads * dims_per_head, num_heads, dims_per_head] + base = AutoModelForCausalLM.from_pretrained( + base_model_path, low_cpu_mem_usage=True ) - layer_weight = { - 'self_attention': { - 'combined_qkv': { - 'w': wc, + for key, value in base.state_dict().items(): + print('%s %s' % (key, value.data.numpy().shape)) + + jax_weights = { + 'lm': { + 'embedding_lookup': { + 'emb_var': base.state_dict()[ + 'transformer.wte.weight' + ].data.numpy()[:vocab, :] + }, + 'softmax': { + 'logits_ffn': { + 'linear': { + 'w': ( + base.state_dict()['lm_head.weight'] + .data.numpy() + .transpose()[:, :vocab] + ), + }, + 'bias': {'b': base.state_dict()['lm_head.bias'].data.numpy()}, + } }, - 'post': { - 'w': w_post, + 'final_ln': { + 'scale': base.state_dict()[ + 'transformer.ln_f.weight' + ].data.numpy(), + 'bias': base.state_dict()['transformer.ln_f.bias'].data.numpy(), }, - }, - 'ff_layer': { - 'ffn_layer1': { - 'linear': { - 'w': ( - base.state_dict()[ - 'transformer.h.%d.mlp.fc_in.weight' % layer_idx - ] - .data.numpy() - .transpose() - ), + 'transformer': {}, + } + } + + for layer_idx in range(num_layers): + query = base.state_dict()[ + 'transformer.h.%d.attn.q_proj.weight' % layer_idx + ].data.numpy() + key = base.state_dict()[ + 'transformer.h.%d.attn.k_proj.weight' % layer_idx + ].data.numpy() + value = base.state_dict()[ + 'transformer.h.%d.attn.v_proj.weight' % layer_idx + ].data.numpy() + wc = np.stack((query, key, value)) + wc = np.reshape( + wc, [3, num_heads, dims_per_head, num_heads * dims_per_head] + ) + wc = np.transpose(wc, (0, 3, 1, 2)) + + w_post = base.state_dict()[ + 'transformer.h.%d.attn.out_proj.weight' % layer_idx + ].data.numpy() + w_post = np.reshape( + w_post, [num_heads * dims_per_head, num_heads, dims_per_head] + ) + layer_weight = { + 'self_attention': { + 'combined_qkv': { + 'w': wc, }, - 'bias': { - 'b': base.state_dict()[ - 'transformer.h.%d.mlp.fc_in.bias' % layer_idx - ].data.numpy(), + 'post': { + 'w': w_post, }, }, - 'ffn_layer2': { - 'linear': { - 'w': ( - base.state_dict()[ - 'transformer.h.%d.mlp.fc_out.weight' % layer_idx - ] - .data.numpy() - .transpose() - ), + 'ff_layer': { + 'ffn_layer1': { + 'linear': { + 'w': ( + base.state_dict()[ + 'transformer.h.%d.mlp.fc_in.weight' % layer_idx + ] + .data.numpy() + .transpose() + ), + }, + 'bias': { + 'b': base.state_dict()[ + 'transformer.h.%d.mlp.fc_in.bias' % layer_idx + ].data.numpy(), + }, }, - 'bias': { - 'b': base.state_dict()[ - 'transformer.h.%d.mlp.fc_out.bias' % layer_idx - ].data.numpy(), + 'ffn_layer2': { + 'linear': { + 'w': ( + base.state_dict()[ + 'transformer.h.%d.mlp.fc_out.weight' % layer_idx + ] + .data.numpy() + .transpose() + ), + }, + 'bias': { + 'b': base.state_dict()[ + 'transformer.h.%d.mlp.fc_out.bias' % layer_idx + ].data.numpy(), + }, }, }, - }, - 'layer_norm': { - 'scale': base.state_dict()[ - 'transformer.h.%d.ln_1.weight' % layer_idx - ].data.numpy(), - 'bias': base.state_dict()[ - 'transformer.h.%d.ln_1.bias' % layer_idx - ].data.numpy(), - }, - } - jax_weights['lm']['transformer']['x_layers_%d' % layer_idx] = layer_weight + 'layer_norm': { + 'scale': base.state_dict()[ + 'transformer.h.%d.ln_1.weight' % layer_idx + ].data.numpy(), + 'bias': base.state_dict()[ + 'transformer.h.%d.ln_1.bias' % layer_idx + ].data.numpy(), + }, + } + jax_weights['lm']['transformer']['x_layers_%d' % layer_idx] = layer_weight - print(f'Saving the pax model to {pax_model_path}') - jax_states = train_states.TrainState( - step=0, mdl_vars={'params': jax_weights}, opt_states={} - ) - device_mesh = py_utils.create_device_mesh([1, 1, num_gpus]) - global_mesh = jax.sharding.Mesh(device_mesh, ['replica', 'data_mdl2', 'mdl']) + print(f'Saving the pax model to {pax_model_path}') + jax_states = train_states.TrainState( + step=0, mdl_vars={'params': jax_weights}, opt_states={} + ) + device_mesh = py_utils.create_device_mesh([1, 1, num_gpus]) + global_mesh = jax.sharding.Mesh(device_mesh, ['replica', 'data_mdl2', 'mdl']) - # Identity pjit is needed to output a GDA model_states. - def identity(x): - return x + # Identity pjit is needed to output a GDA model_states. + def identity(x): + return x - pjitted_identity = pjit.pjit(identity, in_shardings=None, out_shardings=None) - with global_mesh: - jax_states_gda = pjitted_identity(jax_states) + pjitted_identity = pjit.pjit(identity, in_shardings=None, out_shardings=None) + with global_mesh: + jax_states_gda = pjitted_identity(jax_states) - checkpoints.save_checkpoint( - jax_states_gda, - pax_model_path, - checkpoint_type=checkpoints.CheckpointType.GDA, - ) - print('done') + checkpoints.save_checkpoint( + jax_states_gda, + pax_model_path, + checkpoint_type=checkpoints.CheckpointType.GDA, + ) + print('done') if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--base-model-path', type=str, required=True) - parser.add_argument('--pax-model-path', type=str, required=True) - args = parser.parse_args() + parser = argparse.ArgumentParser() + parser.add_argument('--base-model-path', type=str, required=True) + parser.add_argument('--pax-model-path', type=str, required=True) + args = parser.parse_args() - convert(args.base_model_path, args.pax_model_path) + convert(args.base_model_path, args.pax_model_path) diff --git a/script/get-ml-model-huggingface-zoo/download_model.py b/script/get-ml-model-huggingface-zoo/download_model.py index 87f9b25aeb..c355c11ee7 100644 --- a/script/get-ml-model-huggingface-zoo/download_model.py +++ b/script/get-ml-model-huggingface-zoo/download_model.py @@ -19,90 +19,90 @@ f.write(f"CM_ML_MODEL_FILE_WITH_PATH={os.path.join(os.getcwd(),'')}") else: - subfolder = os.environ.get('CM_HF_SUBFOLDER', '') - full_subfolder = os.environ.get('CM_HF_FULL_SUBFOLDER', '') + subfolder = os.environ.get('CM_HF_SUBFOLDER', '') + full_subfolder = os.environ.get('CM_HF_FULL_SUBFOLDER', '') - model_filename = os.environ.get('CM_MODEL_ZOO_FILENAME', '') - if model_filename == '': - model_filename = 'model.onnx' + model_filename = os.environ.get('CM_MODEL_ZOO_FILENAME', '') + if model_filename == '': + model_filename = 'model.onnx' - model_filenames = model_filename.split(',') if ',' in model_filename else [model_filename] + model_filenames = model_filename.split(',') if ',' in model_filename else [model_filename] - base_model_filepath = None + base_model_filepath = None - files = [] - if full_subfolder!='': + files = [] + if full_subfolder!='': - from huggingface_hub import HfFileSystem - fs = HfFileSystem() + from huggingface_hub import HfFileSystem + fs = HfFileSystem() - # List all files in a directory - path = model_stub+'/'+full_subfolder + # List all files in a directory + path = model_stub+'/'+full_subfolder - print ('') - print ('Listing files in {} ...'.format(path)) + print ('') + print ('Listing files in {} ...'.format(path)) - def list_hf_files(path): - all_files = [] - - xrevision = None if revision == '' else revision - files=fs.ls(path, revision=xrevision) #, detail=False) + def list_hf_files(path): + all_files = [] - for f in files: - fname = f['name'] - fdir = f['type'] == 'directory' + xrevision = None if revision == '' else revision + files=fs.ls(path, revision=xrevision) #, detail=False) - if fdir: - all_files += list_hf_files(fname) - else: - all_files.append(fname) + for f in files: + fname = f['name'] + fdir = f['type'] == 'directory' - return all_files - - - files=list_hf_files(path) + if fdir: + all_files += list_hf_files(fname) + else: + all_files.append(fname) - print ('') - print ('Found {} files'.format(len(files))) - - for f in files: + return all_files - remove = len(model_stub)+1 - if revision!='': - remove+=len(revision)+1 + files=list_hf_files(path) - ff = f[remove:] + print ('') + print ('Found {} files'.format(len(files))) - if ff not in model_filenames: - model_filenames.append(ff) + for f in files: + remove = len(model_stub)+1 - print ('') - for model_filename in model_filenames: + if revision!='': + remove+=len(revision)+1 - print("Downloading file {} / {} ...".format(model_stub, model_filename)) + ff = f[remove:] - extra_dir = os.path.dirname(model_filename) + if ff not in model_filenames: + model_filenames.append(ff) - if extra_dir!='' and not os.path.exists(extra_dir): - os.makedirs(extra_dir) - - xrevision = None if revision == '' else revision - xsubfolder = None if subfolder == '' else subfolder - - downloaded_path = hf_hub_download(repo_id=model_stub, - subfolder=xsubfolder, - filename=model_filename, - revision=xrevision, - cache_dir=os.getcwd()) - print(downloaded_path) - if not base_model_filepath: - base_model_filepath = downloaded_path - + print ('') + for model_filename in model_filenames: - print ('') - - with open('tmp-run-env.out', 'w') as f: - f.write(f"CM_ML_MODEL_FILE_WITH_PATH={base_model_filepath}") + print("Downloading file {} / {} ...".format(model_stub, model_filename)) + + extra_dir = os.path.dirname(model_filename) + + if extra_dir!='' and not os.path.exists(extra_dir): + os.makedirs(extra_dir) + + + xrevision = None if revision == '' else revision + xsubfolder = None if subfolder == '' else subfolder + + downloaded_path = hf_hub_download(repo_id=model_stub, + subfolder=xsubfolder, + filename=model_filename, + revision=xrevision, + cache_dir=os.getcwd()) + print(downloaded_path) + if not base_model_filepath: + base_model_filepath = downloaded_path + + + print ('') + + with open('tmp-run-env.out', 'w') as f: + f.write(f"CM_ML_MODEL_FILE_WITH_PATH={base_model_filepath}") diff --git a/script/get-ml-model-mobilenet/customize.py b/script/get-ml-model-mobilenet/customize.py index 5571383453..6aaac4cfaf 100644 --- a/script/get-ml-model-mobilenet/customize.py +++ b/script/get-ml-model-mobilenet/customize.py @@ -18,8 +18,8 @@ def preprocess(i): print ('Downloading from {}'.format(url)) - r = cm.access({'action':'download_file', - 'automation':'utils,dc2743f8450541e3', + r = cm.access({'action':'download_file', + 'automation':'utils,dc2743f8450541e3', 'url':url}) if r['return']>0: return r diff --git a/script/get-ml-model-neuralmagic-zoo/customize.py b/script/get-ml-model-neuralmagic-zoo/customize.py index 7ba85a9d58..4e912f00e8 100644 --- a/script/get-ml-model-neuralmagic-zoo/customize.py +++ b/script/get-ml-model-neuralmagic-zoo/customize.py @@ -6,7 +6,7 @@ def preprocess(i): os_info = i['os_info'] env = i['env'] - + automation = i['automation'] cm = automation.cmind @@ -22,7 +22,7 @@ def preprocess(i): for v in variations: if '#' not in v: variation_models.append(v) - + return {'return':1, 'error':'ENV CM_MODEL_ZOO_STUB is not set. Please select variation from {}'.format(str(variation_models))} return {'return':0} @@ -32,7 +32,7 @@ def postprocess(i): os_info = i['os_info'] env = i['env'] - + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] onnx_path = os.path.join(env['CM_ML_MODEL_FILE_WITH_PATH'], "model.onnx") diff --git a/script/get-ml-model-retinanet-nvidia/nvidia_patch_retinanet_efficientnms.py b/script/get-ml-model-retinanet-nvidia/nvidia_patch_retinanet_efficientnms.py index e076e4072e..32a61ec375 100644 --- a/script/get-ml-model-retinanet-nvidia/nvidia_patch_retinanet_efficientnms.py +++ b/script/get-ml-model-retinanet-nvidia/nvidia_patch_retinanet_efficientnms.py @@ -41,7 +41,7 @@ # (PluginField("score_activation", nullptr, PluginFieldType::kINT32, 1)); # (PluginField("box_coding", nullptr, PluginFieldType::kINT32, 1)); -node_attrs = { +node_attrs = { "background_class": -1, "score_threshold" : 0.05, "iou_threshold" : 0.5, diff --git a/script/get-ml-model-retinanet/customize.py b/script/get-ml-model-retinanet/customize.py index cc875212a4..0c588e1ae5 100644 --- a/script/get-ml-model-retinanet/customize.py +++ b/script/get-ml-model-retinanet/customize.py @@ -27,4 +27,3 @@ def postprocess(i): env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] return {'return':0} - diff --git a/script/get-ml-model-retinanet/node-precision-info.py b/script/get-ml-model-retinanet/node-precision-info.py index 100a64ecbb..3e4c0066ac 100644 --- a/script/get-ml-model-retinanet/node-precision-info.py +++ b/script/get-ml-model-retinanet/node-precision-info.py @@ -63,7 +63,7 @@ def main(args): print(f"Node precision info successfully printed out to {args.output}") - + if __name__ == "__main__": args = parse_args() main(args) diff --git a/script/get-ml-model-rnnt/customize.py b/script/get-ml-model-rnnt/customize.py index 65961f1565..57a8d34e5c 100644 --- a/script/get-ml-model-rnnt/customize.py +++ b/script/get-ml-model-rnnt/customize.py @@ -17,8 +17,8 @@ def preprocess(i): print ('Downloading from {}'.format(url)) - r = cm.access({'action':'download_file', - 'automation':'utils,dc2743f8450541e3', + r = cm.access({'action':'download_file', + 'automation':'utils,dc2743f8450541e3', 'url':url}) if r['return']>0: return r diff --git a/script/get-ml-model-tiny-resnet/customize.py b/script/get-ml-model-tiny-resnet/customize.py index 4e690eaf15..32bf59c7d0 100644 --- a/script/get-ml-model-tiny-resnet/customize.py +++ b/script/get-ml-model-tiny-resnet/customize.py @@ -22,4 +22,3 @@ def postprocess(i): env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] return {'return':0} - diff --git a/script/get-mlperf-inference-src/customize.py b/script/get-mlperf-inference-src/customize.py index 8373279528..75186cdf44 100644 --- a/script/get-mlperf-inference-src/customize.py +++ b/script/get-mlperf-inference-src/customize.py @@ -21,15 +21,15 @@ def preprocess(i): env["CM_GIT_URL"] = "https://github.com/mlcommons/inference" elif env.get('CM_GIT_CHECKOUT', '' ) != '' and env.get('CM_TMP_GIT_CHECKOUT', '' ) != '' and env.get('CM_GIT_CHECKOUT', '' )!=env.get('CM_TMP_GIT_CHECKOUT', '' ): # if checkout branch is assigned inside version and custom branch is also specified - return {"return":1, "error":"Conflicting branches between version assigned and user specified."} + return {"return":1, "error":"Conflicting branches between version assigned and user specified."} elif env.get('CM_GIT_URL', '' ) != '' and env.get('CM_TMP_GIT_URL', '' ) != '' and env.get('CM_GIT_URL', '' )!=env.get('CM_TMP_GIT_URL', '' ): # if GIT URL is assigned inside version and custom branch is also specified - return {"return":1, "error":"Conflicting URL's between version assigned and user specified."} - + return {"return":1, "error":"Conflicting URL's between version assigned and user specified."} + if env.get('CM_VERSION','') == '': env['CM_VERSION'] = "custom" - - # check whether branch and url is specified, + + # check whether branch and url is specified, # if not try to assign the values specified in version parameters, # if version parameters does not have the value to a parameter, set the default one if env.get('CM_GIT_CHECKOUT', '' ) == '': @@ -37,13 +37,13 @@ def preprocess(i): env["CM_GIT_CHECKOUT"] = env["CM_TMP_GIT_CHECKOUT"] else: env["CM_GIT_CHECKOUT"] = "master" - + if env.get('CM_GIT_URL', '' ) == '': if env.get('CM_TMP_GIT_URL', '' ) != '': env["CM_GIT_URL"] = env["CM_TMP_GIT_URL"] else: env["CM_GIT_URL"] = "https://github.com/mlcommons/inference" - + if env.get("CM_MLPERF_LAST_RELEASE", '') == '': env["CM_MLPERF_LAST_RELEASE"] = "v4.1" diff --git a/script/get-mlperf-inference-sut-description/customize.py b/script/get-mlperf-inference-sut-description/customize.py index 1da58c7d14..d0a0fed54a 100644 --- a/script/get-mlperf-inference-sut-description/customize.py +++ b/script/get-mlperf-inference-sut-description/customize.py @@ -18,7 +18,7 @@ def preprocess(i): hw_name = env['CM_HW_NAME'] - backend = env.get('CM_MLPERF_BACKEND', '') + backend = env.get('CM_MLPERF_BACKEND', '') backend_version = env.get('CM_MLPERF_BACKEND_VERSION', '') sut_suffix = '' backend_name = '' @@ -79,9 +79,9 @@ def preprocess(i): if os_name_string=='' and os_info['platform'] == 'windows': import platform os_name_string = str(platform.platform()) - + state['CM_SUT_META']['operating_system'] = os_name_string - + state['CM_SUT_META']['other_software_stack'] = "Python: " + python_version + ", " + compiler + "-" + compiler_version if state['CM_SUT_META'].get('system_name','') == '': @@ -100,8 +100,8 @@ def preprocess(i): if env.get('CM_MLPERF_DEVICE','') == "gpu" or env.get('CM_MLPERF_DEVICE','') == "cuda": if env.get('CM_CUDA_VERSION','') != '': cuda_version = " , CUDA " + env['CM_CUDA_VERSION'] - state['CM_SUT_META']['other_software_stack'] += cuda_version - + state['CM_SUT_META']['other_software_stack'] += cuda_version + if 'cm_cuda_device_prop' in state: state['CM_SUT_META']['accelerator_frequency'] = state['cm_cuda_device_prop']['Max clock rate'] state['CM_SUT_META']['accelerator_memory_capacity'] = str(int(state['cm_cuda_device_prop']['Global memory'])/(1024*1024.0*1024)) + " GB" @@ -115,7 +115,7 @@ def preprocess(i): if physical_cores_per_node == None or physical_cores_per_node == '': if os_info['platform'] == 'windows': physical_cores_per_node = '1' - + state['CM_SUT_META']['host_processor_core_count'] = physical_cores_per_node if state['CM_SUT_META'].get('host_processor_model_name', '') == '': diff --git a/script/get-mlperf-inference-sut-description/get_memory_info.py b/script/get-mlperf-inference-sut-description/get_memory_info.py index aeadde557b..29e2058000 100644 --- a/script/get-mlperf-inference-sut-description/get_memory_info.py +++ b/script/get-mlperf-inference-sut-description/get_memory_info.py @@ -53,8 +53,7 @@ meminfo = [] for item in memory: meminfo.append( "; ".join(item['info'])) - + meminfo_string =", ".join(meminfo) with open("tmp-run-env.out", "w") as f: f.write(f"CM_HOST_MEM_INFO={meminfo_string}") - diff --git a/script/get-mlperf-inference-utils/mlperf_utils.py b/script/get-mlperf-inference-utils/mlperf_utils.py index 2af647eab1..071dd26f8f 100644 --- a/script/get-mlperf-inference-utils/mlperf_utils.py +++ b/script/get-mlperf-inference-utils/mlperf_utils.py @@ -198,7 +198,7 @@ def get_result_string(version, model, scenario, result_path, has_power, sub_res, test_pass = checker.check_compliance_perf_dir(test_path) if test != "TEST06" else True if test_pass and test in [ "TEST01", "TEST06" ]: #test_pass = checker.check_compliance_acc_dir(test_path, mlperf_model, config) - pass # accuracy truncation script is done after submission generation. We assume here that it'll pass + pass # accuracy truncation script is done after submission generation. We assume here that it'll pass if test_pass: result[test] = "passed" else: @@ -242,8 +242,8 @@ def get_result_string(version, model, scenario, result_path, has_power, sub_res, return result_string, result def get_result_table(results): - - + + headers = ["Model", "Scenario", "Accuracy", "Throughput", "Latency (in ms)", "Power Efficiency (in samples/J)", "TEST01", "TEST04"] table = [] for model in results: @@ -260,7 +260,7 @@ def get_result_table(results): row.append("-") if results[model][scenario].get('performance'): - + if "stream" in scenario.lower(): if float(results[model][scenario]['performance']) == 0: row.append("-") diff --git a/script/get-nvidia-mitten/customize.py b/script/get-nvidia-mitten/customize.py index d38c8c2ca5..cc9342a50a 100644 --- a/script/get-nvidia-mitten/customize.py +++ b/script/get-nvidia-mitten/customize.py @@ -6,7 +6,7 @@ def preprocess(i): os_info = i['os_info'] # TBD - + return {'return':0} def postprocess(i): @@ -17,6 +17,6 @@ def postprocess(i): # TBD cur_dir = os.getcwd() - + return {'return':0} diff --git a/script/get-onnxruntime-prebuilt/customize.py b/script/get-onnxruntime-prebuilt/customize.py index 786bc8122c..5fb09c9218 100644 --- a/script/get-onnxruntime-prebuilt/customize.py +++ b/script/get-onnxruntime-prebuilt/customize.py @@ -12,17 +12,17 @@ def preprocess(i): hostos=env['CM_HOST_OS_TYPE'] ext = '.tgz' - + if hostos =='darwin': hostos='osx' - elif hostos =='windows': - hostos='win' - ext = '.zip' + elif hostos =='windows': + hostos='win' + ext = '.zip' device=env.get('CM_ONNXRUNTIME_DEVICE','') if device!='': machine+='-'+device version = env['CM_VERSION'] - + FOLDER = 'onnxruntime-{}-{}-{}'.format(hostos, machine, version) FILENAME = FOLDER + ext @@ -37,7 +37,7 @@ def preprocess(i): env['FILENAME'] = FILENAME env['URL'] = URL - + return {'return':0} @@ -64,7 +64,7 @@ def postprocess(i): env['+LD_LIBRARY_PATH'].append(lib_path) env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) - if hostos =='windows': + if hostos =='windows': # For dynamic libraries env['+PATH'] = [lib_path] diff --git a/script/get-platform-details/customize.py b/script/get-platform-details/customize.py index c618fa0194..faeaa802f5 100644 --- a/script/get-platform-details/customize.py +++ b/script/get-platform-details/customize.py @@ -15,7 +15,7 @@ def preprocess(i): if not check_installation("numactl",os_info): env['CM_INSTALL_NUMACTL'] = 'True' - + #if not check_installation("cpupower",os_info): env['CM_INSTALL_CPUPOWER'] = 'True' @@ -24,7 +24,7 @@ def preprocess(i): env['CM_PLATFORM_DETAILS_DIR_PATH'] = os.getcwd() if env.get('CM_PLATFORM_DETAILS_FILE_NAME', '') == '': env['CM_PLATFORM_DETAILS_FILE_NAME'] = "system-info.txt" - env['CM_PLATFORM_DETAILS_FILE_PATH'] = os.path.join(env['CM_PLATFORM_DETAILS_DIR_PATH'], env['CM_PLATFORM_DETAILS_FILE_NAME']) + env['CM_PLATFORM_DETAILS_FILE_PATH'] = os.path.join(env['CM_PLATFORM_DETAILS_DIR_PATH'], env['CM_PLATFORM_DETAILS_FILE_NAME']) return {'return':0} @@ -38,5 +38,5 @@ def postprocess(i): os_info = i['os_info'] automation = i['automation'] - + return {'return':0} diff --git a/script/get-preprocessed-dataset-generic/src/generic_preprocess.py b/script/get-preprocessed-dataset-generic/src/generic_preprocess.py index 752895db88..68aa28997e 100644 --- a/script/get-preprocessed-dataset-generic/src/generic_preprocess.py +++ b/script/get-preprocessed-dataset-generic/src/generic_preprocess.py @@ -100,7 +100,7 @@ def preprocess_files(selected_filenames, source_dir, destination_dir, crop_perce # NHWC -> NCHW. if data_layout == 'nchw': image_data = image_data[:,:,0:3].transpose(2, 0, 1) - + # Value 1 for quantization to int8 if quantize == 1: image_data = quantize_to_int8(image_data, quant_scale, quant_offset) diff --git a/script/get-preprocessed-dataset-generic/src/preprocess_object_detection_dataset.py b/script/get-preprocessed-dataset-generic/src/preprocess_object_detection_dataset.py index ef28014b94..8821bc0d5f 100644 --- a/script/get-preprocessed-dataset-generic/src/preprocess_object_detection_dataset.py +++ b/script/get-preprocessed-dataset-generic/src/preprocess_object_detection_dataset.py @@ -164,4 +164,3 @@ def preprocess(): if __name__ == "__main__": preprocess() - diff --git a/script/get-preprocessed-dataset-imagenet/customize.py b/script/get-preprocessed-dataset-imagenet/customize.py index f744e1330f..6a7a992556 100644 --- a/script/get-preprocessed-dataset-imagenet/customize.py +++ b/script/get-preprocessed-dataset-imagenet/customize.py @@ -23,9 +23,9 @@ def preprocess(i): "val_map.txt")) preprocessed_path = env['CM_DATASET_PREPROCESSED_PATH'] - + if env.get('CM_DATASET_TYPE', '') == "validation" and not exists(os.path.join(preprocessed_path, "val_map.txt")): - shutil.copy(os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt"), + shutil.copy(os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt"), os.path.join(preprocessed_path, "val_map.txt")) if env.get('CM_DATASET_TYPE', '') == "calibration": diff --git a/script/get-preprocessed-dataset-imagenet/preprocess.py b/script/get-preprocessed-dataset-imagenet/preprocess.py index aa804d19d9..6acb29a739 100644 --- a/script/get-preprocessed-dataset-imagenet/preprocess.py +++ b/script/get-preprocessed-dataset-imagenet/preprocess.py @@ -35,6 +35,6 @@ image_format=img_format, pre_process = pre_process, use_cache=True, - count=count, + count=count, threads=threads, preprocessed_dir=preprocessed_dir) diff --git a/script/get-preprocessed-dataset-openimages/nvidia_preprocess.py b/script/get-preprocessed-dataset-openimages/nvidia_preprocess.py index 4df55f3cd4..423bfe7594 100644 --- a/script/get-preprocessed-dataset-openimages/nvidia_preprocess.py +++ b/script/get-preprocessed-dataset-openimages/nvidia_preprocess.py @@ -48,7 +48,7 @@ def loader(fpath): def quantizer(image): # Dynamic range of image is [-2.64064, 2.64064] based on calibration cache. - # Calculated by: + # Calculated by: # np.uint32(int("3caa54fc", base=16)).view(np.dtype('float32')).item() * 127.0 max_abs = 2.64064 image_int8 = image.clip(-max_abs, max_abs) / max_abs * 127.0 diff --git a/script/get-python3/customize.py b/script/get-python3/customize.py index 5d07f6ac86..ab00dc9c99 100644 --- a/script/get-python3/customize.py +++ b/script/get-python3/customize.py @@ -102,7 +102,7 @@ def postprocess(i): found_path_root = os.path.dirname(found_path) if from_virtual: - # Clean PATH (it will be in activate script) + # Clean PATH (it will be in activate script) # but keep LD_LIBRARY_PATH and C_INCLUDE_PATH from the native python for k in ['+PATH']: if k in env: diff --git a/script/get-qaic-software-kit/customize.py b/script/get-qaic-software-kit/customize.py index fedeaaf763..00aced2dda 100644 --- a/script/get-qaic-software-kit/customize.py +++ b/script/get-qaic-software-kit/customize.py @@ -54,7 +54,7 @@ def postprocess(i): env['CM_QAIC_RUNNER_PATH'] = os.path.join(env['CM_QAIC_SOFTWARE_KIT_PATH'], "build", "utils", "qaic-runner") if '+PATH' not in env: - env['+PATH'] = [] + env['+PATH'] = [] env['+PATH'].append(env['CM_QAIC_RUNNER_PATH']) env['CM_QAIC_RUNNER_PATH'] = os.path.join(env['CM_QAIC_RUNNER_PATH'], "qaic-runner") diff --git a/script/get-rclone-config/customize.py b/script/get-rclone-config/customize.py index 92ac95147d..576b6b73f8 100644 --- a/script/get-rclone-config/customize.py +++ b/script/get-rclone-config/customize.py @@ -14,7 +14,7 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') if env.get('CM_RCLONE_CONFIG_CMD', '') != '': - env['CM_RUN_CMD'] = env['CM_RCLONE_CONFIG_CMD'] + env['CM_RUN_CMD'] = env['CM_RCLONE_CONFIG_CMD'] return {'return':0} diff --git a/script/get-rclone/customize.py b/script/get-rclone/customize.py index eef1ea9be6..c33fdd7a82 100644 --- a/script/get-rclone/customize.py +++ b/script/get-rclone/customize.py @@ -53,7 +53,7 @@ def preprocess(i): cur_dir = os.getcwd() path_bin = os.path.join(cur_dir, file_name) env['CM_RCLONE_BIN_WITH_PATH'] = path_bin - + if not env.get('+PATH', []): env['+PATH'] = [] env['+PATH'].append(cur_dir) @@ -63,8 +63,8 @@ def preprocess(i): env['+PATH'].append(cur_dir) - r = automation.run_native_script({'run_script_input':run_script_input, - 'env':env, + r = automation.run_native_script({'run_script_input':run_script_input, + 'env':env, 'script_name':install_script}) if r['return']>0: return r else: @@ -106,7 +106,7 @@ def postprocess(i): for section in config.sections(): if section not in default_config.sections(): default_config[section] = config[section] - + with open(default_config_path, 'w') as configfile: default_config.write(configfile) print({section: dict(default_config[section]) for section in default_config.sections()}) @@ -128,5 +128,5 @@ def postprocess(i): # Was downloaded and extracted by CM env['CM_RCLONE_BIN_WITH_PATH'] = path_bin env['+PATH']=[cur_dir] - + return {'return':0, 'version': version} diff --git a/script/get-rocm-devices/customize.py b/script/get-rocm-devices/customize.py index 06020b4643..03a0efd4ea 100644 --- a/script/get-rocm-devices/customize.py +++ b/script/get-rocm-devices/customize.py @@ -9,7 +9,7 @@ def preprocess(i): if str(env.get('CM_DETECT_USING_HIP-PYTHON', '')).lower() in [ "1", "yes", "true"]: i['run_script_input']['script_name'] = 'detect' - return {'return':0} + return {'return':0} def postprocess(i): @@ -47,16 +47,16 @@ def postprocess(i): if gpu_id < 0: continue - gpu[gpu_id][key] = val + gpu[gpu_id][key] = val p[key] = val key_env = 'CM_ROCM_DEVICE_PROP_'+key.upper().replace(' ','_') env[key_env] = val - + state['cm_rocm_num_devices'] = gpu_id + 1 env['CM_ROCM_NUM_DEVICES'] = gpu_id + 1 state['cm_rocm_device_prop'] = p state['cm_rocm_devices_prop'] = gpu - - return {'return':0} + + return {'return':0} diff --git a/script/get-rocm/customize.py b/script/get-rocm/customize.py index 667c29f4da..dac6707cf2 100644 --- a/script/get-rocm/customize.py +++ b/script/get-rocm/customize.py @@ -22,7 +22,7 @@ def preprocess(i): 'env_path_key':'CM_ROCM_BIN_WITH_PATH', 'run_script_input':i['run_script_input'], 'recursion_spaces':recursion_spaces}) - if r['return'] >0 : + if r['return'] >0 : if r['return'] == 16: env['CM_REQUIRE_INSTALL'] = "yes" return {'return': 0} diff --git a/script/get-sys-utils-cm/customize.py b/script/get-sys-utils-cm/customize.py index 893384b648..3c8aa3c910 100644 --- a/script/get-sys-utils-cm/customize.py +++ b/script/get-sys-utils-cm/customize.py @@ -20,7 +20,7 @@ def preprocess(i): # Windows has moved to get-sys-utils-min and will be always run with "detect,os"! - + if os_info['platform'] == 'windows': print ('') print ('This script is not used on Windows') @@ -43,19 +43,19 @@ def preprocess(i): # url = env['CM_PACKAGE_WIN_URL'] # # urls = [url] if ';' not in url else url.split(';') -# +# # print ('') # print ('Current directory: {}'.format(os.getcwd())) -# +# # for url in urls: -# +# # url = url.strip() # # print ('') # print ('Downloading from {}'.format(url)) # -# r = cm.access({'action':'download_file', -# 'automation':'utils,dc2743f8450541e3', +# r = cm.access({'action':'download_file', +# 'automation':'utils,dc2743f8450541e3', # 'url':url}) # if r['return']>0: return r # @@ -63,8 +63,8 @@ def preprocess(i): # # print ('Unzipping file {}'.format(filename)) # -# r = cm.access({'action':'unzip_file', -# 'automation':'utils,dc2743f8450541e3', +# r = cm.access({'action':'unzip_file', +# 'automation':'utils,dc2743f8450541e3', # 'filename':filename}) # if r['return']>0: return r # diff --git a/script/get-sys-utils-min/customize.py b/script/get-sys-utils-min/customize.py index a8b9020c50..f9d4a29d76 100644 --- a/script/get-sys-utils-min/customize.py +++ b/script/get-sys-utils-min/customize.py @@ -27,19 +27,19 @@ def preprocess(i): url = env['CM_PACKAGE_WIN_URL'] urls = [url] if ';' not in url else url.split(';') - + print ('') print ('Current directory: {}'.format(os.getcwd())) - + for url in urls: - + url = url.strip() print ('') print ('Downloading from {}'.format(url)) - r = cm.access({'action':'download_file', - 'automation':'utils,dc2743f8450541e3', + r = cm.access({'action':'download_file', + 'automation':'utils,dc2743f8450541e3', 'url':url}) if r['return']>0: return r @@ -47,8 +47,8 @@ def preprocess(i): print ('Unzipping file {}'.format(filename)) - r = cm.access({'action':'unzip_file', - 'automation':'utils,dc2743f8450541e3', + r = cm.access({'action':'unzip_file', + 'automation':'utils,dc2743f8450541e3', 'filename':filename}) if r['return']>0: return r diff --git a/script/get-tensorrt/customize.py b/script/get-tensorrt/customize.py index 57b2497efa..df74a08fd6 100644 --- a/script/get-tensorrt/customize.py +++ b/script/get-tensorrt/customize.py @@ -14,71 +14,71 @@ def preprocess(i): #Not enforcing dev requirement for now if env.get('CM_TENSORRT_TAR_FILE_PATH','')=='' and env.get('CM_TENSORRT_REQUIRE_DEV1', '') != 'yes' and env.get('CM_HOST_PLATFORM_FLAVOR', '') != 'aarch64': - if os_info['platform'] == 'windows': - extra_pre='' - extra_ext='lib' - else: - extra_pre='lib' - extra_ext='so' - - libfilename = extra_pre + 'nvinfer.' +extra_ext - env['CM_TENSORRT_VERSION'] = 'vdetected' - - if env.get('CM_TMP_PATH', '').strip() != '': - path = env.get('CM_TMP_PATH') - if os.path.exists(os.path.join(path, libfilename)): - env['CM_TENSORRT_LIB_PATH'] = path - return {'return': 0} - - if not env.get('CM_TMP_PATH'): - env['CM_TMP_PATH'] = '' - - if os_info['platform'] == 'windows': - if env.get('CM_INPUT','').strip()=='' and env.get('CM_TMP_PATH','').strip()=='': - # Check in "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA" - paths = [] - for path in ["C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA", "C:\\Program Files (x86)\\NVIDIA GPU Computing Toolkit\\CUDA"]: - if os.path.isdir(path): - dirs = os.listdir(path) - for dr in dirs: - path2 = os.path.join(path, dr, 'lib') - if os.path.isdir(path2): - paths.append(path2) - - if len(paths)>0: - tmp_paths = ';'.join(paths) - tmp_paths += ';'+os.environ.get('PATH','') - - env['CM_TMP_PATH'] = tmp_paths - env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' - - else: - # paths to cuda are not always in PATH - add a few typical locations to search for - # (unless forced by a user) - - if env.get('CM_INPUT','').strip()=='': - if env.get('CM_TMP_PATH','').strip()!='': - env['CM_TMP_PATH']+=':' - - env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' - - for lib_path in env.get('+CM_HOST_OS_DEFAULT_LIBRARY_PATH', []): - if(os.path.exists(lib_path)): - env['CM_TMP_PATH']+=':'+lib_path - - r = i['automation'].find_artifact({'file_name': libfilename, - 'env': env, - 'os_info':os_info, - 'default_path_env_key': 'LD_LIBRARY_PATH', - 'detect_version':False, - 'env_path_key':'CM_TENSORRT_LIB_WITH_PATH', - 'run_script_input':i['run_script_input'], - 'recursion_spaces':recursion_spaces}) - if r['return'] >0 : - if os_info['platform'] == 'windows': - return r - else: - return {'return':0} + if os_info['platform'] == 'windows': + extra_pre='' + extra_ext='lib' + else: + extra_pre='lib' + extra_ext='so' + + libfilename = extra_pre + 'nvinfer.' +extra_ext + env['CM_TENSORRT_VERSION'] = 'vdetected' + + if env.get('CM_TMP_PATH', '').strip() != '': + path = env.get('CM_TMP_PATH') + if os.path.exists(os.path.join(path, libfilename)): + env['CM_TENSORRT_LIB_PATH'] = path + return {'return': 0} + + if not env.get('CM_TMP_PATH'): + env['CM_TMP_PATH'] = '' + + if os_info['platform'] == 'windows': + if env.get('CM_INPUT','').strip()=='' and env.get('CM_TMP_PATH','').strip()=='': + # Check in "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA" + paths = [] + for path in ["C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA", "C:\\Program Files (x86)\\NVIDIA GPU Computing Toolkit\\CUDA"]: + if os.path.isdir(path): + dirs = os.listdir(path) + for dr in dirs: + path2 = os.path.join(path, dr, 'lib') + if os.path.isdir(path2): + paths.append(path2) + + if len(paths)>0: + tmp_paths = ';'.join(paths) + tmp_paths += ';'+os.environ.get('PATH','') + + env['CM_TMP_PATH'] = tmp_paths + env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' + + else: + # paths to cuda are not always in PATH - add a few typical locations to search for + # (unless forced by a user) + + if env.get('CM_INPUT','').strip()=='': + if env.get('CM_TMP_PATH','').strip()!='': + env['CM_TMP_PATH']+=':' + + env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' + + for lib_path in env.get('+CM_HOST_OS_DEFAULT_LIBRARY_PATH', []): + if(os.path.exists(lib_path)): + env['CM_TMP_PATH']+=':'+lib_path + + r = i['automation'].find_artifact({'file_name': libfilename, + 'env': env, + 'os_info':os_info, + 'default_path_env_key': 'LD_LIBRARY_PATH', + 'detect_version':False, + 'env_path_key':'CM_TENSORRT_LIB_WITH_PATH', + 'run_script_input':i['run_script_input'], + 'recursion_spaces':recursion_spaces}) + if r['return'] >0 : + if os_info['platform'] == 'windows': + return r + else: + return {'return':0} if os_info['platform'] == 'windows': return {'return': 1, 'error': 'Windows is currently not supported!'} diff --git a/script/get-terraform/customize.py b/script/get-terraform/customize.py index c091322bc5..4c3c668b51 100644 --- a/script/get-terraform/customize.py +++ b/script/get-terraform/customize.py @@ -20,7 +20,7 @@ def preprocess(i): 'env_path_key':'CM_TERRAFORM_BIN_WITH_PATH', 'run_script_input':i['run_script_input'], 'recursion_spaces':recursion_spaces}) - if r['return'] >0 : + if r['return'] >0 : if r['return'] == 16: env['CM_REQUIRE_INSTALL'] = "yes" return {'return': 0} diff --git a/script/get-tvm-model/customize.py b/script/get-tvm-model/customize.py index 26732a279c..e776149dba 100644 --- a/script/get-tvm-model/customize.py +++ b/script/get-tvm-model/customize.py @@ -12,7 +12,7 @@ def preprocess(i): automation = i['automation'] quiet = (env.get('CM_QUIET', False) == 'yes') - + work_dir = env.get('CM_TUNE_TVM_MODEL_WORKDIR', '') if work_dir != '': @@ -31,8 +31,8 @@ def preprocess(i): if env.get('CM_TUNE_TVM_MODEL', '') != '': print("The \"tune-model\" variation is selected, but at the same time the path to the existing \"work_dir\" is also specified. The compiled model will be based on the found existing \"work_dir\".") env["CM_TUNE_TVM_MODEL"] = "no" - - + + return {'return':0} diff --git a/script/get-tvm-model/process.py b/script/get-tvm-model/process.py index 53543e0f83..0e89263665 100644 --- a/script/get-tvm-model/process.py +++ b/script/get-tvm-model/process.py @@ -5,7 +5,7 @@ if os.environ.get("CM_TVM_FRONTEND_FRAMEWORK", None) == "pytorch": import torch import torchvision - + import tvm from tvm import relay, meta_schedule from tvm.driver.tvmc.frontends import load_model @@ -74,13 +74,13 @@ def get_mod_params( else: tvmc_model = load_model(path=model_path, shape_dict=shape_dict) mod, params = tvm.relay.transform.DynamicToStatic()(tvmc_model.mod), tvmc_model.params - + input_layer_name_file = os.path.join(os.getcwd(), "input_layer_name") if not input_layer_name: input_layer_name = shape_dict.keys()[0] with open(input_layer_name_file, 'w') as file: file.write(input_layer_name) - + return mod, params def tune_model( @@ -117,7 +117,7 @@ def tune_model( evaluator_config=evaluator_config ), ) - + return work_dir, database @@ -140,7 +140,7 @@ def compile_model( ) build_conf["relay.backend.use_meta_schedule"] = True with tvm.transform.PassContext( - opt_level=opt_level, + opt_level=opt_level, config=build_conf ): lib = meta_schedule.relay_integration.compile_relay( @@ -152,19 +152,19 @@ def compile_model( ) else: with tvm.transform.PassContext( - opt_level=opt_level, - config=build_conf, + opt_level=opt_level, + config=build_conf, ): if use_vm: lib = tvm.relay.backend.vm.compile( - mod=mod, - target=target, + mod=mod, + target=target, params=params ) else: lib = tvm.relay.build( - mod, - target=target, + mod, + target=target, params=params ) return lib @@ -174,9 +174,9 @@ def serialize_vm( ) -> tvm.runtime.Module: path_consts = os.path.join( tempfile.mkdtemp( - dir=os.getcwd(), + dir=os.getcwd(), suffix="-tvm-tmp" - ), + ), "consts" ) code_path = os.path.join(os.getcwd(), "vm_exec_code.ro") @@ -224,9 +224,9 @@ def main() -> None: use_vm = os.environ.get('CM_TVM_USE_VM', 'no') == 'yes' if tune_model_flag: work_dir, database = tune_model( - mod=mod, - params=params, - target=tvm_target, + mod=mod, + params=params, + target=tvm_target, ) lib = compile_model( mod=mod, @@ -242,7 +242,7 @@ def main() -> None: lib = serialize_vm( vm_exec=lib ) - + with open(os.path.join(os.getcwd(), "tvm_executor"), "w") as file: file.write("virtual_machine" if use_vm else "graph_executor") lib.export_library(compiled_model) diff --git a/script/gui/app.py b/script/gui/app.py index 0f4f93d21f..1e15bfef64 100644 --- a/script/gui/app.py +++ b/script/gui/app.py @@ -7,7 +7,7 @@ import misc def main(): - + query_params = misc.get_params(st) script_path = os.environ.get('CM_GUI_SCRIPT_PATH','') @@ -32,8 +32,8 @@ def main(): print ('Searching CM scripts using tags "{}"'.format(script_tags)) - r = cmind.access({'action':'find', - 'automation':'script,5b4e0237da074764', + r = cmind.access({'action':'find', + 'automation':'script,5b4e0237da074764', 'tags':script_tags}) if r['return']>0: return r @@ -60,12 +60,12 @@ def main(): ii = {'st': st, 'params': query_params, - 'script_path': script_path, - 'script_alias': script_alias, - 'script_tags': script_tags, + 'script_path': script_path, + 'script_alias': script_alias, + 'script_tags': script_tags, 'script_meta': meta, 'skip_bottom': False} - + return script.page(ii) if __name__ == "__main__": diff --git a/script/gui/customize.py b/script/gui/customize.py index 9c920ab2c4..2d2789b363 100644 --- a/script/gui/customize.py +++ b/script/gui/customize.py @@ -25,8 +25,8 @@ def preprocess(i): print ('Searching CM scripts using tags "{}"'.format(script_tags)) - r = cm.access({'action':'find', - 'automation':'script', + r = cm.access({'action':'find', + 'automation':'script', 'tags':script_tags}) if r['return']>0: return r diff --git a/script/gui/graph.py b/script/gui/graph.py index f06109c78f..ab4fc2db61 100644 --- a/script/gui/graph.py +++ b/script/gui/graph.py @@ -88,7 +88,7 @@ def visualize(st, query_params, action = ''): if len(q_result_uid)>0: if q_result_uid[0]!='': result_uid = q_result_uid[0] - + v_experiment_name = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_NAME','') q_experiment_name = query_params.get('name',['']) if len(q_experiment_name)>0: @@ -107,15 +107,15 @@ def visualize(st, query_params, action = ''): # Check default # if v_experiment_tags == '' and v_experiment_name == '': # v_experiment_tags = 'mlperf-inference v4.0' - + v_experiment_tags = st.text_input('Select CM experiment tags separated by space:', value=v_experiment_tags, key='v_experiment_tags').strip() v_experiment_tags = v_experiment_tags.replace(',',' ') # Get all experiment names - ii = {'action':'find', + ii = {'action':'find', 'automation':'experiment,a0a2d123ef064bcb'} - # If name is given, do not use tags + # If name is given, do not use tags if v_experiment_name!='': ii['artifact']=v_experiment_name elif v_experiment_tags!='': @@ -135,7 +135,7 @@ def visualize(st, query_params, action = ''): x.meta.get('alias',''), x.meta['uid'] )): - + meta = l.meta if v_experiment_name!='' and (v_experiment_name == meta['alias'] or v_experiment_name == meta['uid']): @@ -149,15 +149,15 @@ def visualize(st, query_params, action = ''): experiments.append(name) index+=1 - + if len(lst_all) == 1: selection = 1 # Show experiment artifacts - experiment = st.selectbox('Select experiment from {} found:'.format(len(experiments)-1), - range(len(experiments)), + experiment = st.selectbox('Select experiment from {} found:'.format(len(experiments)-1), + range(len(experiments)), format_func=lambda x: experiments[x], - index=selection, + index=selection, key='experiment') @@ -173,7 +173,7 @@ def visualize(st, query_params, action = ''): results_with_password = [] passwords = [] results_meta = {} - + for experiment in lst: path = experiment.path @@ -184,7 +184,7 @@ def visualize(st, query_params, action = ''): if os.path.isfile(path_to_result): emeta = experiment.meta - + desc = {'path':path_to_result, 'experiment_dir': d, 'experiment_uid':emeta['uid'], @@ -205,7 +205,7 @@ def visualize(st, query_params, action = ''): add = True break - if add: + if add: pwd = experiment.meta.get('password_hash','') if pwd=='': results.append(desc) @@ -214,7 +214,7 @@ def visualize(st, query_params, action = ''): if pwd not in passwords: passwords.append(pwd) - + results_with_password.append(desc) # Check if password @@ -232,7 +232,7 @@ def visualize(st, query_params, action = ''): for result in results_with_password: if result['password_hash'] == password_hash2: results.append(result) - + # How to visualize selection if len(results)==0: st.markdown('No results found!') @@ -240,12 +240,12 @@ def visualize(st, query_params, action = ''): if st.session_state.get('tmp_cm_results','')=='': - st.session_state['tmp_cm_results']=len(results) + st.session_state['tmp_cm_results']=len(results) elif int(st.session_state['tmp_cm_results'])!=len(results): st.session_state['tmp_cm_results']=len(results) st.session_state['how']=0 - + how = '' if result_uid=='': @@ -263,13 +263,13 @@ def visualize(st, query_params, action = ''): how_selection = ['', '2d-static', '2d', 'bar'] how_selection_desc = ['', 'Scatter plot (static)', 'Scatter plot (interactive, slow - to be improved)', 'Bar plot (static)'] - + how_index = 0 if v_how!='' and v_how in how_selection: how_index = how_selection.index(v_how) how2 = st.selectbox('Select how to visualize {} CM experiment set(s):'.format(len(results)), - range(len(how_selection_desc)), + range(len(how_selection_desc)), format_func=lambda x: how_selection_desc[x], index = how_index, key = 'how') @@ -278,7 +278,7 @@ def visualize(st, query_params, action = ''): if how2 == '' or how2 == 0: return {'return':0} - how = how_selection[how2] + how = how_selection[how2] how = how.strip() @@ -286,17 +286,17 @@ def visualize(st, query_params, action = ''): all_values = [] keys = [] all_data = [] - + derived_metrics_value = query_params.get('derived_metrics',[''])[0].strip() - derived_metrics_value = st.text_input("Optional: add derived metrics in Python. Example: result['Accuracy2'] = result['Accuracy']*2", + derived_metrics_value = st.text_input("Optional: add derived metrics in Python. Example: result['Accuracy2'] = result['Accuracy']*2", value = derived_metrics_value).strip() for x in security: if x in derived_metrics_value: derived_metrics_value='' break - + error_shown2 = False for desc in results: path_to_result = desc['path'] @@ -317,11 +317,11 @@ def visualize(st, query_params, action = ''): if derived_metrics_value!='': try: - exec(derived_metrics_value) + exec(derived_metrics_value) except Exception as e: - if not error_shown2: - st.markdown('*Syntax error in derived metrics: {}*'.format(e)) - error_shown2 = True + if not error_shown2: + st.markdown('*Syntax error in derived metrics: {}*'.format(e)) + error_shown2 = True all_values.append(result) @@ -349,12 +349,12 @@ def visualize(st, query_params, action = ''): if filter_value!='': try: - if not eval(filter_value): - continue + if not eval(filter_value): + continue except Exception as e: - if not error_shown: - st.markdown('*Syntax error in filter: {}*'.format(e)) - error_shown = True + if not error_shown: + st.markdown('*Syntax error in filter: {}*'.format(e)) + error_shown = True # Check if 1 result UID is selected if result_uid!='' and result.get('uid','')!=result_uid: @@ -406,8 +406,8 @@ def visualize(st, query_params, action = ''): if x!='': st.write('
\n'+x+'\n
\n', unsafe_allow_html = True) - - + + x = '' for k in sorted_keys: x+='* **{}**: {}\n'.format(k,str(result[k])) @@ -457,8 +457,8 @@ def visualize(st, query_params, action = ''): '''.format(misc.make_url(experiment_alias_or_uid, action=action, md=False), result_uid) st.write(end_html, unsafe_allow_html=True) - - + + return {'return':0} @@ -479,7 +479,7 @@ def visualize(st, query_params, action = ''): q_axis_key_x = query_params.get('x',['']) if len(q_axis_key_x)>0: if q_axis_key_x[0]!='': - axis_key_x = q_axis_key_x[0] + axis_key_x = q_axis_key_x[0] i_axis_key_x = 0 if axis_key_x != '' and axis_key_x in keys: i_axis_key_x = keys.index(axis_key_x) if axis_key_x == '' and 'Result' in keys: i_axis_key_x = keys.index('Result') @@ -554,7 +554,7 @@ def visualize(st, query_params, action = ''): unique_style_values = {} # unique_styles = ['o','v','^','<','>','1','2','3','4','8','s','p','P','*','+','D'] - unique_styles = ['circle', 'square', 'diamond', 'cross', 'x', 'triangle', 'pentagon', 'hexagram', + unique_styles = ['circle', 'square', 'diamond', 'cross', 'x', 'triangle', 'pentagon', 'hexagram', 'star', 'hourglass', 'bowtie', 'asterisk', 'hash'] i_unique_style_values = 0 @@ -570,12 +570,12 @@ def visualize(st, query_params, action = ''): for result in values: if filter_value!='': try: - if not eval(filter_value): - continue + if not eval(filter_value): + continue except Exception as e: - if not error_shown: - st.markdown('*Syntax error in filter: {}*'.format(e)) - error_shown = True + if not error_shown: + st.markdown('*Syntax error in filter: {}*'.format(e)) + error_shown = True values2.append(result) @@ -597,7 +597,7 @@ def visualize(st, query_params, action = ''): cc = [] ss = [] io = [] - + t = 0 for result in values2: v = result @@ -661,14 +661,14 @@ def visualize(st, query_params, action = ''): fig = px.scatter(df, x=axis_key_x, y=axis_key_y, color=axis_key_c, symbol=axis_key_s, hover_name='info', height=1000) st.plotly_chart(fig, theme="streamlit", use_container_width=True) - - + + elif how == '2d': ##################################################################### # 2D interactive graph - very slow - need to be updated width = 1 - + t = 0 for result in values2: v = result @@ -720,7 +720,7 @@ def visualize(st, query_params, action = ''): experiment_uid = v.get('experiment_uid','') if experiment_uid!='' and experiment_uid not in experiment_uids: experiment_uids.append(experiment_uid) - + uid = v.get('uid','') if uid!='': xaction = 'action={}&'.format(action) if action!='' else '' @@ -728,7 +728,7 @@ def visualize(st, query_params, action = ''): if url!='': targets = [url] - plugins.connect(fig, OpenBrowserOnClick(graph, targets = targets)) + plugins.connect(fig, OpenBrowserOnClick(graph, targets = targets)) # Render graph fig_html = mpld3.fig_to_html(fig) @@ -772,7 +772,7 @@ def visualize(st, query_params, action = ''): if __name__ == "__main__": r = main() - if r['return']>0: - + if r['return']>0: + st.markdown("""---""") st.markdown('**Error detected by CM:** {}'.format(r['error'])) diff --git a/script/gui/misc.py b/script/gui/misc.py index a2b00ad233..33ffc92f64 100644 --- a/script/gui/misc.py +++ b/script/gui/misc.py @@ -79,14 +79,14 @@ def get_all_deps_tags(i): all_deps_tags.append(v) elif type(v) == dict: - r = get_all_deps_tags({'meta':v, 'all_deps_tags':all_deps_tags}) - all_deps_tags = r['all_deps_tags'] + r = get_all_deps_tags({'meta':v, 'all_deps_tags':all_deps_tags}) + all_deps_tags = r['all_deps_tags'] elif type(v) == list: - for vv in v: - if type(vv) == dict: - r = get_all_deps_tags({'meta':vv, 'all_deps_tags':all_deps_tags}) - all_deps_tags = r['all_deps_tags'] + for vv in v: + if type(vv) == dict: + r = get_all_deps_tags({'meta':vv, 'all_deps_tags':all_deps_tags}) + all_deps_tags = r['all_deps_tags'] return {'return':0, 'all_deps_tags':all_deps_tags} @@ -119,7 +119,7 @@ def make_selector(i): value2 = force if not hide: st.markdown('**{}:** {}'.format(desc, str(force))) - + else: if boolean: v = default @@ -173,25 +173,25 @@ def make_selector(i): def make_selection(st, selection, param_key, text, x_uid, force_index=0): x_meta = {} - + if len(selection)>0: - selection = sorted(selection, key = lambda v: v['name']) - - if x_uid != '': - x_meta = selection[0] - st.markdown('**Selected {}:** {}'.format(text, x_meta['name'])) - else: - x_selection = [{'name':''}] - x_selection += selection - - x_id = st.selectbox('Select {}:'.format(text), - range(len(x_selection)), - format_func=lambda x: x_selection[x]['name'], - index = force_index, - key = param_key) - - if x_id>0: - x_meta = x_selection[x_id] + selection = sorted(selection, key = lambda v: v['name']) + + if x_uid != '': + x_meta = selection[0] + st.markdown('**Selected {}:** {}'.format(text, x_meta['name'])) + else: + x_selection = [{'name':''}] + x_selection += selection + + x_id = st.selectbox('Select {}:'.format(text), + range(len(x_selection)), + format_func=lambda x: x_selection[x]['name'], + index = force_index, + key = param_key) + + if x_id>0: + x_meta = x_selection[x_id] return {'return':0, 'meta':x_meta} @@ -207,7 +207,7 @@ def get_with_complex_key_safe(meta, key): def get_with_complex_key(meta, key): j = key.find('.') - + if j<0: return meta.get(key) @@ -217,4 +217,3 @@ def get_with_complex_key(meta, key): return None return get_with_complex_key(meta[key0], key[j+1:]) - diff --git a/script/gui/playground.py b/script/gui/playground.py index 9c1a2f40cc..109d048469 100644 --- a/script/gui/playground.py +++ b/script/gui/playground.py @@ -33,7 +33,7 @@ def main(): """ - st.markdown(hide_streamlit_style, unsafe_allow_html=True) + st.markdown(hide_streamlit_style, unsafe_allow_html=True) # Set title (check extra user HTML to embed before title if needed) extra = os.environ.get('CM_GUI_EXTRA_HTML','') @@ -65,7 +65,7 @@ def main(): s = '\n\n'+r['string']+'\n\n' st.write(s, unsafe_allow_html=True) - + # Check action and basic menu action = params.get('action',['scripts'])[0].lower() @@ -100,7 +100,7 @@ def main(): style_action_scripts, style_action_howtorun, style_action_challenges, - style_action_experiments, + style_action_experiments, style_action_reproduce, style_action_contributors, style_action_reports, @@ -164,7 +164,7 @@ def main():
Powered by MLCommons Collective Mind
- """, + """, unsafe_allow_html=True) diff --git a/script/gui/playground_apps.py b/script/gui/playground_apps.py index 9af5fca444..bcde96578e 100644 --- a/script/gui/playground_apps.py +++ b/script/gui/playground_apps.py @@ -32,7 +32,7 @@ def page(st, params, action = ''): global initialized, external_module_path, external_module_meta end_html = '' - + st.markdown('----') st.markdown(announcement) diff --git a/script/gui/playground_beta.py b/script/gui/playground_beta.py index f5636404d2..9b2c526a8c 100644 --- a/script/gui/playground_beta.py +++ b/script/gui/playground_beta.py @@ -8,7 +8,7 @@ def page(st, params): current_script_path = os.environ.get('CM_TMP_CURRENT_SCRIPT_PATH', '') - + url_prefix = st.config.get_option('server.baseUrlPath')+'/' name = params.get('name',[''])[0].strip() @@ -17,16 +17,16 @@ def page(st, params): readme = os.path.join(current_script_path, 'playground_beta_README.md') md = '' - + if os.path.isfile(readme): - + r = cmind.utils.load_txt(readme) if r['return']>0: return r md += r['string'] md = md.replace('{{URL_PREFIX}}', url_prefix) - + # st.markdown(md) st.write(md, unsafe_allow_html = True) diff --git a/script/gui/playground_challenges.py b/script/gui/playground_challenges.py index 0a840403d5..7d628760ce 100644 --- a/script/gui/playground_challenges.py +++ b/script/gui/playground_challenges.py @@ -122,8 +122,8 @@ def page(st, params): challenges.append({'prefix':prefix, 'name':name, 'uid':l.meta['uid']}) - - + + # Show ongoing if open if len(ongoing)>0: @@ -132,19 +132,19 @@ def page(st, params): ongoing_without_hot = [] for row in ongoing: - if row.get('hot', False): - hot.append(row) - else: - ongoing_without_hot.append(row) - + if row.get('hot', False): + hot.append(row) + else: + ongoing_without_hot.append(row) + # Some info x = ''' Collaborative benchmarking and optimization of AI applications and systems - (latency, throughput, power consumption, accuracy, costs ...) + (latency, throughput, power consumption, accuracy, costs ...) is organized by MLCommons, - cKnowledge + cKnowledge and the cTuning foundation and powered by Collective Mind automation recipes. We deeply thank all our participants and contributors! @@ -161,42 +161,42 @@ def page(st, params): st.markdown('#### Hot challenges') md_tmp = '' - + for row in sorted(hot, key=lambda row: (int(row.get('orig_date_close', 9999999999)), row.get('sort', 0), row.get('name', ''), row.get('under_preparation', False) )): - x = row['name'] - x = x[0].upper() + x[1:] + x = row['name'] + x = x[0].upper() + x[1:] + + url = url_prefix + '?action=challenges&name={}'.format(row['uid']) - url = url_prefix + '?action=challenges&name={}'.format(row['uid']) + date_close = row.get('date_close','').strip() + y = ' (Closing date: **{}**)'.format(date_close) if date_close !='' else '' - date_close = row.get('date_close','').strip() - y = ' (Closing date: **{}**)'.format(date_close) if date_close !='' else '' + md_tmp += '* [{}]({}){}\n'.format(x, url, y) - md_tmp += '* [{}]({}){}\n'.format(x, url, y) - st.markdown(md_tmp) st.markdown('#### On-going challenges') - + # Continue all ind = 1 data = [] - + for row in sorted(ongoing_without_hot, key=lambda row: (int(row.get('orig_date_close', 9999999999)), row.get('sort', 0), row.get('name', ''), row.get('under_preparation', False) )): if row.get('skip',False): continue - + xrow = [] - + md = '' up = row.get('under_preparation', False) @@ -246,7 +246,7 @@ def page(st, params): # xrow.append(y) - + awards = '' trophies = row.get('trophies',False) @@ -264,7 +264,7 @@ def page(st, params): # xrow.append(awards) - if x!='': + if x!='': md += '     '+x # st.markdown(md) @@ -276,27 +276,27 @@ def page(st, params): import pandas as pd import numpy as np - + df = pd.DataFrame(data, columns=['Challenge', 'Closing date', 'Extension']) - + df.index+=1 # st.table(df) st.write(df.to_html(escape=False, justify='left'), unsafe_allow_html=True) # Show selector for all -# challenge = st.selectbox('View past benchmarking, optimization, reproducibility and replicability challenges:', -# range(len(challenges)), +# challenge = st.selectbox('View past benchmarking, optimization, reproducibility and replicability challenges:', +# range(len(challenges)), # format_func=lambda x: challenges[x], # index=0, key='challenge') # # if challenge>0: # artifact = artifacts[challenge] - - - - + + + + # Process 1 challenge if artifact is None: # st.markdown('#### Past or future challenges:') @@ -319,7 +319,7 @@ def page(st, params): '''.format(str(ind), prefix, url, name) st.write(x, unsafe_allow_html = True) - + ind+=1 @@ -390,7 +390,7 @@ def page(st, params): if meta.get('trophies', False): z+='* **MLCommons Collective Knowledge Contributor award:** Yes\n' - + prize_short = meta.get('prize_short','') if prize_short!='': z+='* **Prizes:** {}\n'.format(prize_short) @@ -430,13 +430,13 @@ def page(st, params): if tags!='': md+=' * '+misc.make_url(tags, action='experiments', key='tags')+'\n' elif name!='': - md+=' * '+misc.make_url(name, action='experiments')+'\n' + md+=' * '+misc.make_url(name, action='experiments')+'\n' z+=md+'\n' st.markdown(z) - - + + # Check if has text path = artifact.path @@ -496,7 +496,7 @@ def page(st, params): - - - + + + return {'return':0, 'end_html':end_html} diff --git a/script/gui/playground_challenges_with_prizes.py b/script/gui/playground_challenges_with_prizes.py index 5e8d2a1b57..80afce51e4 100644 --- a/script/gui/playground_challenges_with_prizes.py +++ b/script/gui/playground_challenges_with_prizes.py @@ -116,8 +116,8 @@ def page(st, params): challenges.append({'prefix':prefix, 'name':name, 'uid':l.meta['uid']}) - - + + # Show ongoing if open if len(ongoing)>0: ind = 1 @@ -136,16 +136,16 @@ def page(st, params): st.write(x, unsafe_allow_html = True) data = [] - + for row in sorted(ongoing, key=lambda row: (int(row.get('orig_date_close', 9999999999)), row.get('sort', 0), row.get('name', ''), row.get('under_preparation', False) )): if row.get('skip',False): continue - + xrow = [] - + md = '' up = row.get('under_preparation', False) @@ -195,7 +195,7 @@ def page(st, params): # xrow.append(y) - + awards = '' trophies = row.get('trophies',False) @@ -213,7 +213,7 @@ def page(st, params): xrow.append(awards) - if x!='': + if x!='': md += '     '+x # st.markdown(md) @@ -225,27 +225,27 @@ def page(st, params): import pandas as pd import numpy as np - + df = pd.DataFrame(data, columns=['Challenge', 'Closing date', 'Extension', 'Contributor award and prizes from MLCommons organizations, cTuning foundation and cKnowledge.org']) - + df.index+=1 # st.table(df) st.write(df.to_html(escape=False, justify='left'), unsafe_allow_html=True) # Show selector for all -# challenge = st.selectbox('View past benchmarking, optimization, reproducibility and replicability challenges:', -# range(len(challenges)), +# challenge = st.selectbox('View past benchmarking, optimization, reproducibility and replicability challenges:', +# range(len(challenges)), # format_func=lambda x: challenges[x], # index=0, key='challenge') # # if challenge>0: # artifact = artifacts[challenge] - - - - + + + + # Process 1 challenge if artifact is None: # st.markdown('#### Past or future challenges:') @@ -273,7 +273,7 @@ def page(st, params): '''.format(str(ind), prefix, url, name) st.write(x, unsafe_allow_html = True) - + ind+=1 @@ -344,7 +344,7 @@ def page(st, params): if meta.get('trophies', False): z+='* **MLCommons Collective Knowledge Contributor award:** Yes\n' - + prize_short = meta.get('prize_short','') if prize_short!='': z+='* **Prizes:** {}\n'.format(prize_short) @@ -384,13 +384,13 @@ def page(st, params): if tags!='': md+=' * '+misc.make_url(tags, action='experiments', key='tags') elif name!='': - md+=' * '+misc.make_url(name, action='experiments') + md+=' * '+misc.make_url(name, action='experiments') z+=md+'\n' st.markdown(z) - - + + # Check if has text path = artifact.path @@ -450,7 +450,7 @@ def page(st, params): - - - + + + return {'return':0, 'end_html':end_html} diff --git a/script/gui/playground_contributors.py b/script/gui/playground_contributors.py index 1c44f417bb..cc49c87a4a 100644 --- a/script/gui/playground_contributors.py +++ b/script/gui/playground_contributors.py @@ -97,10 +97,10 @@ def page(st, params): # Check if README md = '' - + readme = os.path.join(path, 'README.md') if os.path.isfile(readme): - + r = cmind.utils.load_txt(readme) if r['return']>0: return r @@ -111,7 +111,7 @@ def page(st, params): else: - st.markdown('**Warning:** Contributor "{}" not found!'.format(name)) + st.markdown('**Warning:** Contributor "{}" not found!'.format(name)) return {'return':0, 'end_html':end_html} @@ -153,7 +153,7 @@ def page_list(st, params): m = l.meta - # Skip from stats + # Skip from stats if m.get('skip', False): continue @@ -245,7 +245,7 @@ def page_list(st, params):
- Check on-going challenges + Check on-going challenges and register here to be added to this leaderboard. @@ -257,7 +257,7 @@ def page_list(st, params): st.write(x, unsafe_allow_html = True) st.write('
'+df.to_html(escape=False, justify='left')+'
', unsafe_allow_html=True) - + # from st_aggrid import AgGrid, GridOptionsBuilder, GridUpdateMode @@ -340,7 +340,7 @@ def calculate_points(meta): # Automatic challenges points += len(meta.get('challenges',[])) points += len(meta.get('ongoing',[])) - + return points @@ -354,5 +354,5 @@ def prepare_name(meta): md = '* '+misc.make_url(name, alias=alias)+'\n' elif org!='': md = '* *'+misc.make_url(org, alias=alias)+'*\n' - + return md diff --git a/script/gui/playground_howtorun.py b/script/gui/playground_howtorun.py index e533ac37fb..2442c93fd8 100644 --- a/script/gui/playground_howtorun.py +++ b/script/gui/playground_howtorun.py @@ -26,7 +26,7 @@ def main(): def page(st, params, action = ''): end_html = '' - + # Announcement # st.markdown('----') @@ -36,10 +36,10 @@ def page(st, params, action = ''): x = ''' - This interface will help you generate a command line or Python API - to run modular benchmarks composed from + This interface will help you generate a command line or Python API + to run modular benchmarks composed from automation recipes (CM scripts). - Note that this is a collaborative engineering effort + Note that this is a collaborative engineering effort to make sure that they work across all possible versions and configurations of models, data sets, software and hardware - please report encountered issues and provide feedback here @@ -51,16 +51,16 @@ def page(st, params, action = ''): '''.format(url_script) st.write(x, unsafe_allow_html = True) - + # st.markdown(announcement) - + ############################################################################################ # Select target hardware compute_uid = '' x = params.get('compute_uid',['']) if len(x)>0 and x[0]!='': compute_uid = x[0].strip() - + ii = {'action':'load_cfg', 'automation':'utils', 'tags':'benchmark,compute', @@ -83,7 +83,7 @@ def page(st, params, action = ''): bench_uid = '' x = params.get('bench_uid',['']) if len(x)>0 and x[0]!='': bench_uid = x[0].strip() - + ii = {'action':'load_cfg', 'automation':'utils', 'tags':'benchmark,list', @@ -105,7 +105,7 @@ def page(st, params, action = ''): xtags = set(compute_meta['tags'].split(',')) # st.markdown(str(xtags)) - + for s in selection: add = True @@ -118,7 +118,7 @@ def page(st, params, action = ''): if cc.issubset(xtags): add = True break - + if add: pruned_selection.append(s) @@ -130,7 +130,7 @@ def page(st, params, action = ''): j += 1 if q['uid'] == '39877bb63fb54725': force_bench_index = j - + r = misc.make_selection(st, pruned_selection, 'benchmark', 'benchmark', bench_uid, force_index = force_bench_index) if r['return']>0: return r @@ -170,7 +170,7 @@ def page(st, params, action = ''): script_alias = script_meta['alias'] repo_meta = script_obj.repo_meta - + url = repo_meta.get('url','') if url=='' and repo_meta.get('git', False): url = 'https://github.com/'+repo_meta['alias'].replace('@','/') @@ -185,7 +185,7 @@ def page(st, params, action = ''): url += repo_meta['prefix'] if not url.endswith('/'): url=url+'/' - + url += 'script/'+script_alias script_url = url @@ -193,7 +193,7 @@ def page(st, params, action = ''): if not bench_meta.get('skip_extra_urls', False): url_script = misc.make_url(script_name, key='name', action='scripts', md=False) url_script += '&gui=true' - + urls.append({'name': 'Universal CM GUI to run this benchmark', 'url': url_script}) @@ -207,7 +207,7 @@ def page(st, params, action = ''): urls.append({'name': 'Notes about how to run this benchmark from the command line', 'url': url_readme_extra}) - + # Check URLS if len(urls)>0: x = '\n' @@ -224,11 +224,11 @@ def page(st, params, action = ''): # Check if has customization extra = {} skip = False - + script_tags = script_meta.get('tags_help','') if script_tags =='': script_tags = ','.join(script_meta.get('tags',[])) - + if script_obj!=None: ii = {'st': st, 'params': params, @@ -253,8 +253,8 @@ def page(st, params, action = ''): found_automation_spec.loader.exec_module(tmp_module) # tmp_module=importlib.import_module('customize') except Exception as e: - st.markdown('WARNING: {}'.format(e)) - pass + st.markdown('WARNING: {}'.format(e)) + pass if tmp_module!=None: if hasattr(tmp_module, 'gui'): @@ -276,14 +276,14 @@ def page(st, params, action = ''): ii = {'st': st, 'params': params, - 'script_path': script_path, - 'script_alias': script_alias, - 'script_tags': script_tags, + 'script_path': script_path, + 'script_alias': script_alias, + 'script_tags': script_tags, 'script_meta': script_meta, 'script_repo_meta': script_repo_meta, 'skip_bottom': True, 'extra': extra} - + rr = script.page(ii) if rr['return']>0: return rr diff --git a/script/gui/playground_install.py b/script/gui/playground_install.py index ce9e31bd89..f5f53ca398 100644 --- a/script/gui/playground_install.py +++ b/script/gui/playground_install.py @@ -8,17 +8,17 @@ def page(st, params, extra): end_html = '' - + url_prefix = st.config.get_option('server.baseUrlPath')+'/' if not extra.get('skip_header', False): st.markdown('---') st.markdown('**Install [MLCommons Collective Mind automation framework](https://github.com/mlcommons/ck):**') - + md = '' - + ################################################################### # Select OS choices = [('Ubuntu, Debian and similar Linux', 'linux'), @@ -31,10 +31,10 @@ def page(st, params, extra): if extra.get('run_on_windows', False): host_os_selection = 3 - host_os = st.selectbox('Select your host OS:', + host_os = st.selectbox('Select your host OS:', range(len(choices)), - format_func = lambda x: choices[x][0], - index = host_os_selection, + format_func = lambda x: choices[x][0], + index = host_os_selection, key = 'install_select_host_os') host_os_index = choices[host_os][1] @@ -43,7 +43,7 @@ def page(st, params, extra): cur_script_file = __file__ cur_script_path = os.path.dirname(cur_script_file) - + notes = os.path.join(cur_script_path, 'install', host_os_index+'.md') if os.path.isfile(notes): @@ -62,8 +62,8 @@ def page(st, params, extra): need_user = ' --user' elif host_os_index == 'windows': python = 'python' - - + + ################################################################### # Select repository @@ -76,8 +76,8 @@ def page(st, params, extra): repo = st.selectbox('Select repository with [automation recipes (CM scripts)](https://access.cknowledge.org/playground/?action=scripts):', range(len(choices)), - format_func = lambda x: choices[x][0], - index=0, + format_func = lambda x: choices[x][0], + index=0, key='select_repo') repo_index = choices[repo][1] @@ -92,11 +92,11 @@ def page(st, params, extra): cm_repo = '--url=https://github.com/mlcommons/cm4mlops/archive/refs/tags/r20240416.zip --skip-zip-parent-dir' else: cm_repo = 'mlcommons@cm4mlops' - + x = '{} -m pip install cmind -U {}\n\n'.format(python, need_user) x += 'cm test core \n\n' x += 'cm pull repo {}\n\n'.format(cm_repo) - + clean_cm_cache = st.toggle('Clean CM cache', value=True, key = 'install_clean_cm_cache') cm_clean_cache = 'cm rm cache -f\n\n' if clean_cm_cache else '' @@ -110,13 +110,13 @@ def page(st, params, extra): python_ver=params.get('@adr.python.version', '') if python_venv_name == '': - use_python_venv = st.toggle('Use Python Virtual Environment for CM scripts?', value = False) - if use_python_venv: - python_venv_name = st.text_input('Enter some CM python venv name for your project:', value = "mlperf-v4.0") + use_python_venv = st.toggle('Use Python Virtual Environment for CM scripts?', value = False) + if use_python_venv: + python_venv_name = st.text_input('Enter some CM python venv name for your project:', value = "mlperf-v4.0") + + if python_ver_min == '': + python_ver_min = st.text_input('[Optional] Specify min version such as 3.8:') - if python_ver_min == '': - python_ver_min = st.text_input('[Optional] Specify min version such as 3.8:') - y = '' if python_venv_name!='':# or python_ver!='' or python_ver_min!='': y = 'cm run script "get sys-utils-cm"\n' diff --git a/script/gui/playground_reports.py b/script/gui/playground_reports.py index 43aed1f04d..d11276fb50 100644 --- a/script/gui/playground_reports.py +++ b/script/gui/playground_reports.py @@ -130,7 +130,7 @@ def page(st, params):
''' st.write(x, unsafe_allow_html = True) - + st.markdown(md) - + return {'return':0, 'end_html':end_html} diff --git a/script/gui/playground_reproduce.py b/script/gui/playground_reproduce.py index 28396b8961..525a49fd72 100644 --- a/script/gui/playground_reproduce.py +++ b/script/gui/playground_reproduce.py @@ -48,13 +48,13 @@ def page(st, params, action = ''): x = ''' - [Under development] This is a new project to reproduce modular benchmarks - across different models, data sets, software and hardware + [Under development] This is a new project to reproduce modular benchmarks + across different models, data sets, software and hardware via open challenges based on the ACM/cTuning reproducibility methodology and badges - and automatically compose + and automatically compose High-Performance and Cost-Efficient AI Systems with MLCommons' Collective Mind and MLPerf. - Note that this is a collaborative engineering effort + Note that this is a collaborative engineering effort - please report encountered issues and provide feedback here and get in touch via Discord! @@ -65,7 +65,7 @@ def page(st, params, action = ''): '''.format(url_benchmarks, url_challenges) st.write(x, unsafe_allow_html = True) - + return {'return':0} @@ -87,7 +87,7 @@ def page(st, params, action = ''): if test_uid == '': x = params.get('compute_uid',['']) if len(x)>0 and x[0]!='': compute_uid = x[0].strip() - + ii = {'action':'load_cfg', 'automation':'utils', 'tags':'benchmark,compute', @@ -211,7 +211,7 @@ def page(st, params, action = ''): if bench_uid != '': url_bench = url_benchmarks + '&bench_uid='+bench_uid st.markdown('[Link to benchmark GUI]({})'.format(url_bench)) - + # Check notes test_md = full_path[:-10]+'.md' if os.path.isfile(test_md): @@ -239,16 +239,16 @@ def page(st, params, action = ''): cmd = inp.get('cmd',[]) if len(cmd)>0: - xcmd = ' \\\n '.join(cmd) + xcmd = ' \\\n '.join(cmd) + + st.markdown(""" + **CM command line:** + ```bash + cm run script {} + ``` + """.format(xcmd)) - st.markdown(""" -**CM command line:** -```bash -cm run script {} -``` - """.format(xcmd)) - st.markdown(""" **CM input dictionary:** ```json @@ -264,7 +264,7 @@ def page(st, params, action = ''): ``` """.format(json.dumps(out, indent=2))) - + st.markdown(""" **Test meta:** @@ -283,7 +283,7 @@ def page(st, params, action = ''): html = '' all_data = [] - + # TBD: should be taken from a given benchmark dimensions = [] @@ -293,7 +293,7 @@ def page(st, params, action = ''): dimension_values = {} dimension_keys = [] - + if len(dimensions) == 0: keys = [('test', 'CM test', 400, 'leftAligned')] else: @@ -325,14 +325,14 @@ def page(st, params, action = ''): for s in selection: row = {} - + full_path = s['full_path'] test_uid = s['uid'] uid = s['uid'] url_test = misc.make_url(uid, key='test_uid', action='reproduce', md=False) - + bench_meta = s['main_meta'] inp = {} @@ -363,13 +363,13 @@ def page(st, params, action = ''): if len(k)>2 and k[2]=='tick': if v!=None and v!='': v = '✅' - + row[kk] = str(v) # Check ACM/IEEE functional badge url = '' - + x = '' if s.get('functional', False): x = '
'.format(url, badges['functional']['url']) @@ -393,10 +393,10 @@ def page(st, params, action = ''): url_bench = url_benchmarks + '&bench_uid='+bench_uid x = '
'.format(url_bench, badges['support_cm']['url']) row['support_cm'] = x - + # Check misc notes row['notes']=''+s.get('notes','')+'' - + # Finish row all_data.append(row) @@ -426,11 +426,11 @@ def page(st, params, action = ''): if bench_name!='': self_url+='&bench_name='+bench_name if test_uid!='': - self_url+='&test_uid='+test_uid + self_url+='&test_uid='+test_uid elif compute_uid!='': self_url+='&compute_uid='+compute_uid end_html='
Self link
'.format(self_url) - + return {'return': 0, 'end_html': end_html} diff --git a/script/gui/playground_scripts.py b/script/gui/playground_scripts.py index 6f4a4893bd..bf1ce13d9b 100644 --- a/script/gui/playground_scripts.py +++ b/script/gui/playground_scripts.py @@ -37,11 +37,11 @@ def page(st, params): x = ''' - Collective Mind is a collection of open-source, portable, extensible and ready-to-use - automation scripts with a human-friendly interface and minimal dependencies to make it easier to compose, benchmark and optimize + Collective Mind is a collection of open-source, portable, extensible and ready-to-use + automation scripts with a human-friendly interface and minimal dependencies to make it easier to compose, benchmark and optimize complex AI, ML and other applications and systems across diverse and continuously changing models, data sets, software and hardware. - Note that this is a collaborative engineering effort - to make sure that they work across all possible versions and configurations + Note that this is a collaborative engineering effort + to make sure that they work across all possible versions and configurations - please report encountered issues and provide feedback here and get in touch via Discord! @@ -57,7 +57,7 @@ def page(st, params): script_tags = st.text_input('Search open-source automation recipes by tags:', value=script_tags, key='script_tags').strip() # Searching automation recipes - + ii = {'action':'find', 'automation':'script,5b4e0237da074764'} @@ -89,7 +89,7 @@ def page(st, params): recipe = lst[0] meta = recipe.meta - + alias = meta['alias'] uid = meta['uid'] @@ -111,9 +111,9 @@ def page(st, params): ii = {'st': st, 'params': params, - 'script_path': script_path, - 'script_alias': script_alias, - 'script_tags': script_tags, + 'script_path': script_path, + 'script_alias': script_alias, + 'script_tags': script_tags, 'script_meta': meta, 'script_repo_meta': recipe.repo_meta, 'skip_bottom': True} @@ -121,7 +121,7 @@ def page(st, params): return script.page(ii) else: - + st.markdown('### CM script "{}" ({})'.format(alias, uid)) repo_meta = recipe.repo_meta @@ -138,9 +138,9 @@ def page(st, params): xtags = tags if len(variations)>0: - if xtags!='': - xtags+=' ' - xtags+=' '.join(variations) + if xtags!='': + xtags+=' ' + xtags+=' '.join(variations) x = ''' ```bash @@ -159,12 +159,12 @@ def page(st, params): cm docker script "{}" cm gui script "{}" ``` - + '''.format(extra_repo,xtags,xtags,xtags,xtags,xtags,xtags) - - + + # Check original link url = repo_meta.get('url','') @@ -186,7 +186,7 @@ def page(st, params): url += repo_meta['prefix'] if not url.endswith('/'): url=url+'/' - + url += 'script/'+alias # Check README.md @@ -209,23 +209,23 @@ def page(st, params): y = os.path.join(recipe.path, z) if os.path.isfile(y): url_meta_description = url+'/'+z - + url_gui = url_prefix_script+'&name='+alias+','+uid+'&gui=true' - + z = '* ***Check [open source code (Apache 2.0 license)]({}) at GitHub.***\n'.format(url) z += '* ***Check [detailed auto-generated README on GitHub]({}).***\n'.format(url_readme) z += '* ***Check [experimental GUI]({}) to run this script.***\n'.format(url_gui) z += '---\n' - + st.markdown(z) - + st.markdown('Default run on Linux, Windows, MacOS and any other OS (check [CM installation guide]({}) for more details):\n{}\n'.format(url_prefix + '?action=install', x)) st.markdown('*The [Collective Mind concept](https://doi.org/10.5281/zenodo.8105339) is to gradually improve portability and reproducibility of common automation recipes based on user feedback' ' while keeping the same human-friendly interface. If you encounter issues, please report them [here](https://github.com/mlcommons/ck/issues) ' ' to help this community project!*') - + if url_readme_extra!='': st.markdown('* See [extra README]({}) for this automation recipe at GitHub.'.format(url_readme_extra)) @@ -252,15 +252,15 @@ def page(st, params): continue url_deps = url_prefix_script+'&tags='+t - + x+='* [{}]({})\n'.format(t, url_deps) - + st.markdown(x) else: categories={} - + for l in sorted(lst, key=lambda x: ( x.meta.get('alias','') )): @@ -272,13 +272,13 @@ def page(st, params): categories[category]=[] categories[category].append(l) - + if len(categories)>1: category_selection = [''] + sorted(list(categories.keys()), key = lambda v: v.upper()) # Creating compute selector category_id = st.selectbox('Prune by category:', - range(len(category_selection)), + range(len(category_selection)), format_func=lambda x: category_selection[x], index = 0, key = 'category') @@ -291,7 +291,7 @@ def page(st, params): recipes = 0 for category in sorted(categories, key = lambda v: v.upper()): recipes += len(categories[category]) - + x = ''' Found {} automation recipes: @@ -299,10 +299,10 @@ def page(st, params): '''.format(str(recipes)) st.write(x, unsafe_allow_html = True) - + for category in sorted(categories, key = lambda v: v.upper()): md = '### {}'.format(category)+'\n' - + for recipe in categories[category]: meta = recipe.meta @@ -310,9 +310,9 @@ def page(st, params): uid = meta['uid'] url = url_prefix_script+'&name='+alias+','+uid - + md += '* [{}]({})'.format(alias, url)+'\n' st.markdown(md) - + return {'return':0, 'end_html':end_html} diff --git a/script/gui/script.py b/script/gui/script.py index 9a8bc0cfeb..0a279b3352 100644 --- a/script/gui/script.py +++ b/script/gui/script.py @@ -28,7 +28,7 @@ def page(i): if gui_func!='': ii = {'streamlit_module':st, 'meta':meta} - return cmind.utils.call_internal_module(None, os.path.join(script_path, 'dummy') , + return cmind.utils.call_internal_module(None, os.path.join(script_path, 'dummy') , 'customize', gui_func, ii) st.markdown("""---""") @@ -56,19 +56,19 @@ def page(i): url += repo_meta['prefix'] if not url.endswith('/'): url=url+'/' - + url += 'script/'+script_alias url_script = url hide = params.get('hide_script_customization', False) - + if script_alias!='': show_customize = st.toggle('**Customize input for the CM script "[{}]({})"**'.format(script_alias, url_script), value = not hide) hide = not show_customize - + # Check if found path and there is meta # TBD (Grigori): need to cache it using @st.cache variation_groups = {} @@ -94,7 +94,7 @@ def page(i): if alias!='': aliases = variation_alias.get(alias, []) - if variation_key not in aliases: + if variation_key not in aliases: aliases.append(variation_key) variation_alias[alias]=aliases @@ -133,66 +133,66 @@ def page(i): # Prepare variation_groups if len(variations)>0: - if not hide: - st.markdown('**Select variations to update multiple flags and environment variables:**') - - variation_groups_order = meta.get('variation_groups_order',[]) - for variation in sorted(variation_groups): - if variation not in variation_groups_order: - variation_groups_order.append(variation) - - for group_key in variation_groups_order: - group_key_cap = group_key.replace('-',' ').capitalize() - if not group_key.startswith('*'): - y = [''] - - index = 0 - selected_index = 0 - for variation_key in sorted(variation_groups[group_key]): - index += 1 - y.append(variation_key) - if variation_key in default_variations: - selected_index=index - - key2 = '~~'+group_key - - x = params.get(key2, None) - if x!=None and len(x)>0 and x[0]!=None: - x = x[0] - if x in y: - selected_index = y.index(x) if x in y else 0 - - if hide: - st_variations[key2] = sorted(y)[selected_index] - else: - st_variations[key2] = st.selectbox(group_key_cap, sorted(y), index=selected_index, key=key2) - - elif group_key == '*no-group*': - for variation_key in sorted(variation_groups[group_key]): - v = False - if variation_key in default_variations: - v=True - - key2 = '~'+variation_key - - x = params.get(key2, None) - if x!=None and len(x)>0 and x[0]!=None: - if x[0].lower()=='true': - v = True - elif x[0].lower()=='false': - v = False - - if hide: - st_variations[key2] = v - else: - st_variations[key2] = st.checkbox(variation_key.capitalize(), key=key2, value=v) + if not hide: + st.markdown('**Select variations to update multiple flags and environment variables:**') + + variation_groups_order = meta.get('variation_groups_order',[]) + for variation in sorted(variation_groups): + if variation not in variation_groups_order: + variation_groups_order.append(variation) + + for group_key in variation_groups_order: + group_key_cap = group_key.replace('-',' ').capitalize() + if not group_key.startswith('*'): + y = [''] + + index = 0 + selected_index = 0 + for variation_key in sorted(variation_groups[group_key]): + index += 1 + y.append(variation_key) + if variation_key in default_variations: + selected_index=index + + key2 = '~~'+group_key + + x = params.get(key2, None) + if x!=None and len(x)>0 and x[0]!=None: + x = x[0] + if x in y: + selected_index = y.index(x) if x in y else 0 + + if hide: + st_variations[key2] = sorted(y)[selected_index] + else: + st_variations[key2] = st.selectbox(group_key_cap, sorted(y), index=selected_index, key=key2) + + elif group_key == '*no-group*': + for variation_key in sorted(variation_groups[group_key]): + v = False + if variation_key in default_variations: + v=True + + key2 = '~'+variation_key + + x = params.get(key2, None) + if x!=None and len(x)>0 and x[0]!=None: + if x[0].lower()=='true': + v = True + elif x[0].lower()=='false': + v = False + + if hide: + st_variations[key2] = v + else: + st_variations[key2] = st.checkbox(variation_key.capitalize(), key=key2, value=v) # Prepare inputs input_desc=meta.get('input_description',{}) if len(input_desc)>0: - + sort_desc = {} sort_keys = [] for k in input_desc: @@ -203,7 +203,7 @@ def page(i): sort_keys = sorted(sort_desc, key = lambda k: sort_desc[k]) other_keys = sorted([k for k in input_desc if input_desc[k].get('sort',0)==0]) - + all_keys = [] if len(sort_keys)==0 else sort_keys all_keys += other_keys @@ -228,7 +228,7 @@ def page(i): 'st':st, 'st_inputs':st_inputs, 'hide':hide} - + r2 = misc.make_selector(ii) if r2['return']>0: return r2 @@ -262,13 +262,13 @@ def page(i): - + # Add extras to inputs add_to_st_inputs = extra.get('add_to_st_inputs',{}) if len(add_to_st_inputs)>0: st_inputs.update(add_to_st_inputs) - + ############################################################################ st.markdown("""---""") st.markdown('**Run this CM script (Linux/MacOS/Windows):**') @@ -281,7 +281,7 @@ def page(i): extra_faq_online = extra.get('extra_faq_online', '') if extra_faq_online != '': x+=' [ '+extra_faq_online+' ] ' - + if x !='': st.markdown('*'+x.strip()+'*') @@ -297,7 +297,7 @@ def page(i): var1 = '\\' host_os_flag = 'linux' - + show_cm_install = st.toggle('Install MLCommons Collective Mind', value=False) if show_cm_install: @@ -308,7 +308,7 @@ def page(i): r = playground_install.page(st, params, extra) if r['return']>0: return r - + st.markdown('---') @@ -333,17 +333,17 @@ def page(i): x = str(value) z = x - if ' ' in x or ':' in x or '/' in x or '\\' in x: + if ' ' in x or ':' in x or '/' in x or '\\' in x: x='"'+x+'"' flags+='='+x flags_dict[key2]=z - - - - + + + + ############################################################################ run_via_docker = False if not extra.get('skip_script_docker_func', False) and len(meta.get('docker',{}))>0: @@ -358,7 +358,7 @@ def page(i): ############################################################################ use_experiment_from_extra = extra.get('use_experiment', False) - + use_experiment = st.toggle('Use CM experiment for reproducibility', key='use_cm_experiment', value=use_experiment_from_extra) extra_cm_prefix = '' @@ -366,7 +366,7 @@ def page(i): cli = 'cm run experiment --tags={} -- {}\n '.format("repro,"+script_tags, var1) + cli ############################################################################ - + extra_setup = extra.get('extra_setup','').strip() if len(extra_setup)>2: show_extra_setup_notes = st.toggle('Show extra setup notes?', value = True) @@ -376,75 +376,75 @@ def page(i): st.markdown(extra_setup) # st.markdown('---') - + show_python_api = st.toggle('Run via Python API', value=False) # Python API if show_python_api: - - final_script_tags = script_tags - if len(selected_variations)>0: - for sv in selected_variations: - final_script_tags += ' '+sv - final_script_tags = final_script_tags.replace(' ',',') - - if use_experiment: - dd = { - 'action': 'run', - 'automation': 'experiment,a0a2d123ef064bcb', - 'tags': script_tags, - 'out': 'con' - } - - unparsed_cmd = ['cm', - 'run', - 'script,5b4e0237da074764', - '--tags='+final_script_tags] - - for flag in flags_dict: - value = flags_dict[flag] - unparsed_cmd.append('--' + flag + '=' + str(value)) - - dd['unparsed_cmd'] = unparsed_cmd - - else: - dd = { - 'action':action, - 'automation':'script,5b4e0237da074764', - } - - dd['tags']=final_script_tags - - dd['out']='con' - - dd.update(flags_dict) - - import json - dd_json=json.dumps(dd, indent=2) - dd_json=dd_json.replace(': true', ': True').replace(': false', ': False') - - y = 'import cmind\n' - y+= 'r = cmind.access('+dd_json+')\n' - y+= 'if r[\'return\']>0: print (r[\'error\'])\n' - - x=''' - ```python - {} - '''.format(y) - - # st.write(x.replace('\n','
\n'), unsafe_allow_html=True) - - st.markdown(x) - - - + + final_script_tags = script_tags + if len(selected_variations)>0: + for sv in selected_variations: + final_script_tags += ' '+sv + final_script_tags = final_script_tags.replace(' ',',') + + if use_experiment: + dd = { + 'action': 'run', + 'automation': 'experiment,a0a2d123ef064bcb', + 'tags': script_tags, + 'out': 'con' + } + + unparsed_cmd = ['cm', + 'run', + 'script,5b4e0237da074764', + '--tags='+final_script_tags] + + for flag in flags_dict: + value = flags_dict[flag] + unparsed_cmd.append('--' + flag + '=' + str(value)) + + dd['unparsed_cmd'] = unparsed_cmd + + else: + dd = { + 'action':action, + 'automation':'script,5b4e0237da074764', + } + + dd['tags']=final_script_tags + + dd['out']='con' + + dd.update(flags_dict) + + import json + dd_json=json.dumps(dd, indent=2) + dd_json=dd_json.replace(': true', ': True').replace(': false', ': False') + + y = 'import cmind\n' + y+= 'r = cmind.access('+dd_json+')\n' + y+= 'if r[\'return\']>0: print (r[\'error\'])\n' + + x=''' + ```python + {} + '''.format(y) + + # st.write(x.replace('\n','
\n'), unsafe_allow_html=True) + + st.markdown(x) + + + ############################################################################ show_cli = st.toggle('Run from the command line', value = True) if show_cli: # Add explicit button "Run" cli = st.text_area('', cli, height=600) - + if no_run=='' and st.button("Run in the new terminal"): cli = cli+var1+'--pause\n' @@ -471,8 +471,8 @@ def page(i): x = '''
- We would like to thank all Collective Mind users and contributors - for supporting this collaborative engineering effort -
+ We would like to thank all Collective Mind users and contributors + for supporting this collaborative engineering effort -
please don't hesitate report issues or suggest features at CM GitHub!
''' diff --git a/script/import-mlperf-inference-to-experiment/customize.py b/script/import-mlperf-inference-to-experiment/customize.py index 486bc76d15..fcacf3412f 100644 --- a/script/import-mlperf-inference-to-experiment/customize.py +++ b/script/import-mlperf-inference-to-experiment/customize.py @@ -150,12 +150,12 @@ def convert_summary_csv_to_experiment(path, version, env): v=True else: try: - v=float(v) + v=float(v) - if v==int(v): - v=int(v) + if v==int(v): + v=int(v) except ValueError: - pass + pass result[k] = v @@ -223,7 +223,7 @@ def convert_summary_csv_to_experiment(path, version, env): env_target_repo=env.get('CM_IMPORT_MLPERF_INFERENCE_TARGET_REPO','').strip() target_repo='' if env_target_repo=='' else env_target_repo+':' - + print ('') for name in experiment: print (' Preparing experiment artifact "{}"'.format(name)) diff --git a/script/import-mlperf-tiny-to-experiment/customize.py b/script/import-mlperf-tiny-to-experiment/customize.py index 8929cba8d7..bb31698f8c 100644 --- a/script/import-mlperf-tiny-to-experiment/customize.py +++ b/script/import-mlperf-tiny-to-experiment/customize.py @@ -248,26 +248,26 @@ def convert_repo_to_experiment(path, version, env): for line in lines: j = line.find('ulp-mlperf: ') if j>=0: - j1 = line.find(':', j+12) - if j1>=0: - accuracy_key = 'accuracy_'+line[j+12:j1] - value = line[j1+2:] + j1 = line.find(':', j+12) + if j1>=0: + accuracy_key = 'accuracy_'+line[j+12:j1] + value = line[j1+2:] - if value.endswith('%'): - value = value[:-1] - results[accuracy_key+'_metric']='%' + if value.endswith('%'): + value = value[:-1] + results[accuracy_key+'_metric']='%' - value = float(value) + value = float(value) - results[accuracy_key] = value + results[accuracy_key] = value - if not found: - # first value - results['Accuracy'] = value - results['_Accuracy'] = value + if not found: + # first value + results['Accuracy'] = value + results['_Accuracy'] = value - found = True + found = True if not found: print (' * Warning: accuracy not found in the file {}'.format(paccuracy)) diff --git a/script/import-mlperf-training-to-experiment/customize.py b/script/import-mlperf-training-to-experiment/customize.py index 19a69a6af8..18130ba86d 100644 --- a/script/import-mlperf-training-to-experiment/customize.py +++ b/script/import-mlperf-training-to-experiment/customize.py @@ -109,8 +109,8 @@ def preprocess(i): print ('Repo path: {}'.format(path)) print ('Repo version: {}'.format(version)) - r = automation.run_native_script({'run_script_input':run_script_input, - 'env':env, + r = automation.run_native_script({'run_script_input':run_script_input, + 'env':env, 'script_name':'run_mlperf_logger'}) if r['return']>0: return r @@ -158,12 +158,12 @@ def convert_summary_csv_to_experiment(path, version, env): v=True else: try: - v=float(v) + v=float(v) - if v==int(v): - v=int(v) + if v==int(v): + v=int(v) except ValueError: - pass + pass result[k] = v diff --git a/script/install-bazel/customize.py b/script/install-bazel/customize.py index d656e40bac..d94ba83c81 100644 --- a/script/install-bazel/customize.py +++ b/script/install-bazel/customize.py @@ -2,7 +2,7 @@ import os def preprocess(i): - + os_info = i['os_info'] env = i['env'] @@ -31,24 +31,24 @@ def preprocess(i): platform = env['CM_HOST_PLATFORM_FLAVOR'] ext = '.sh' - - filename = 'bazel-{}-{}{}-{}{}'.format(need_version, + + filename = 'bazel-{}-{}{}-{}{}'.format(need_version, prefix, xos, platform, ext) - + url = 'https://github.com/bazelbuild/bazel/releases/download/{}/{}'.format(need_version, filename) cur_dir = os.getcwd() - + if os_info['platform'] == 'windows': - bazel_bin = 'bazel.exe' + bazel_bin = 'bazel.exe' path = cur_dir else: bazel_bin = 'bazel' path = os.path.join(cur_dir, 'install', 'bin') - + env['CM_BAZEL_DOWNLOAD_URL'] = url env['CM_BAZEL_DOWNLOAD_FILE'] = filename diff --git a/script/install-cmake-prebuilt/customize.py b/script/install-cmake-prebuilt/customize.py index 263e667c47..85596e6e9f 100644 --- a/script/install-cmake-prebuilt/customize.py +++ b/script/install-cmake-prebuilt/customize.py @@ -46,17 +46,17 @@ def preprocess(i): package_name += '.zip' else: - package_name='cmake-' + need_version + '-linux-' + package_name='cmake-' + need_version + '-linux-' - if host_os_machine.startswith('arm') or host_os_machine.startswith('aarch'): - if host_os_bits=='64': - package_name += 'aarch64' - else: - return {'return':1, 'error':'this script doesn\'t support armv7'} - else: - package_name += 'x86_64' + if host_os_machine.startswith('arm') or host_os_machine.startswith('aarch'): + if host_os_bits=='64': + package_name += 'aarch64' + else: + return {'return':1, 'error':'this script doesn\'t support armv7'} + else: + package_name += 'x86_64' - package_name += '.tar.gz' + package_name += '.tar.gz' package_url = 'https://github.com/Kitware/CMake/releases/download/v' + need_version + '/' + package_name @@ -68,8 +68,8 @@ def preprocess(i): cm = automation.cmind - r = cm.access({'action':'download_file', - 'automation':'utils,dc2743f8450541e3', + r = cm.access({'action':'download_file', + 'automation':'utils,dc2743f8450541e3', 'url':package_url}) if r['return']>0: return r @@ -79,8 +79,8 @@ def preprocess(i): if os_info['platform'] == 'windows': print ('Unzipping file {}'.format(filename)) - r = cm.access({'action':'unzip_file', - 'automation':'utils,dc2743f8450541e3', + r = cm.access({'action':'unzip_file', + 'automation':'utils,dc2743f8450541e3', 'strip_folders':1, 'filename':filename}) if r['return']>0: return r diff --git a/script/install-intel-neural-speed-from-src/customize.py b/script/install-intel-neural-speed-from-src/customize.py index abb5680baf..c40b31af25 100644 --- a/script/install-intel-neural-speed-from-src/customize.py +++ b/script/install-intel-neural-speed-from-src/customize.py @@ -11,7 +11,7 @@ def preprocess(i): env = i['env'] env['CM_PYTHON_BIN_WITH_PATH'] = os.path.join(env['CM_CONDA_BIN_PATH'], "python") - + automation = i['automation'] recursion_spaces = i['recursion_spaces'] diff --git a/script/install-llvm-prebuilt/customize.py b/script/install-llvm-prebuilt/customize.py index 1550c0ed9e..17e9746925 100644 --- a/script/install-llvm-prebuilt/customize.py +++ b/script/install-llvm-prebuilt/customize.py @@ -61,95 +61,95 @@ def preprocess(i): input('Press Enter to continue!') else: - if host_os_machine.startswith('arm') or host_os_machine.startswith('aarch'): - if host_os_bits=='64': - package_name = 'clang+llvm-' + need_version + '-aarch64-linux-gnu.tar.xz' - else: - package_name = 'clang+llvm-' + need_version + '-armv7a-linux-gnueabihf.tar.xz' - else: - host_os_flavor = env['CM_HOST_OS_FLAVOR'] + if host_os_machine.startswith('arm') or host_os_machine.startswith('aarch'): + if host_os_bits=='64': + package_name = 'clang+llvm-' + need_version + '-aarch64-linux-gnu.tar.xz' + else: + package_name = 'clang+llvm-' + need_version + '-armv7a-linux-gnueabihf.tar.xz' + else: + host_os_flavor = env['CM_HOST_OS_FLAVOR'] - host_os_version = env['CM_HOST_OS_VERSION'] + host_os_version = env['CM_HOST_OS_VERSION'] # if 'debian' in host_os_flavor: # return {'return':1, 'error':'debian is not supported yet'} # # else: - # Treat all Linux flavours as Ubuntu for now ... + # Treat all Linux flavours as Ubuntu for now ... - if True: - default_os = '22.04' + if True: + default_os = '22.04' - if len(need_version_split)>0: - hver = 0 - try: - hver = int(need_version_split[0]) - except: - pass + if len(need_version_split)>0: + hver = 0 + try: + hver = int(need_version_split[0]) + except: + pass - if hver>0: - if hver<16: - default_os='18.04' - else: - default_os='22.04' + if hver>0: + if hver<16: + default_os='18.04' + else: + default_os='22.04' - if need_version == '10.0.1': - default_os = '16.04' + if need_version == '10.0.1': + default_os = '16.04' - elif need_version == '11.0.0': - default_os = '20.04' + elif need_version == '11.0.0': + default_os = '20.04' - elif need_version == '11.0.1': - default_os = '16.04' - if host_os_version == '20.10': - default_os = '20.10' + elif need_version == '11.0.1': + default_os = '16.04' + if host_os_version == '20.10': + default_os = '20.10' - elif need_version == '12.0.0': - default_os = '16.04' - if host_os_version == '20.04' or host_os_version == '20.10': - default_os = '20.04' + elif need_version == '12.0.0': + default_os = '16.04' + if host_os_version == '20.04' or host_os_version == '20.10': + default_os = '20.04' - elif need_version == '12.0.1': - default_os = '16.04' - #if host_os_version.startswith('18') or host_os_version.startswith('20'): - # default_os = '18.04' + elif need_version == '12.0.1': + default_os = '16.04' + #if host_os_version.startswith('18') or host_os_version.startswith('20'): + # default_os = '18.04' - elif need_version == '13.0.0': - default_os = '16.04' - if host_os_version.startswith('20'): - default_os = '20.04' + elif need_version == '13.0.0': + default_os = '16.04' + if host_os_version.startswith('20'): + default_os = '20.04' - elif need_version == '13.0.1': - default_os = '18.04' + elif need_version == '13.0.1': + default_os = '18.04' - elif need_version == '14.0.0': - default_os = '18.04' + elif need_version == '14.0.0': + default_os = '18.04' - elif need_version == '15.0.6': - default_os = '18.04' + elif need_version == '15.0.6': + default_os = '18.04' - elif need_version == '16.0.0': - default_os = '18.04' + elif need_version == '16.0.0': + default_os = '18.04' - elif need_version == '16.0.4': - default_os = '22.04' + elif need_version == '16.0.4': + default_os = '22.04' - elif need_version == '17.0.2': - default_os = '22.04' + elif need_version == '17.0.2': + default_os = '22.04' - elif need_version == '17.0.2': - default_os = '22.04' + elif need_version == '17.0.2': + default_os = '22.04' - elif need_version == '17.0.4': - default_os = '22.04' + elif need_version == '17.0.4': + default_os = '22.04' - elif need_version == '17.0.5': - default_os = '22.04' + elif need_version == '17.0.5': + default_os = '22.04' - elif need_version == '17.0.6': - default_os = '22.04' + elif need_version == '17.0.6': + default_os = '22.04' - package_name = 'clang+llvm-' + need_version + '-x86_64-linux-gnu-ubuntu-' + default_os + '.tar.xz' + package_name = 'clang+llvm-' + need_version + '-x86_64-linux-gnu-ubuntu-' + default_os + '.tar.xz' package_url = 'https://github.com/llvm/llvm-project/releases/download/llvmorg-' + need_version + '/' + package_name @@ -161,8 +161,8 @@ def preprocess(i): cm = automation.cmind - r = cm.access({'action':'download_file', - 'automation':'utils,dc2743f8450541e3', + r = cm.access({'action':'download_file', + 'automation':'utils,dc2743f8450541e3', 'url':package_url}) if r['return']>0: return r @@ -204,5 +204,5 @@ def postprocess(i): # if cur_dir_include not in env['+CPLUS_INCLUDE_PATH']: # env['+CPLUS_INCLUDE_PATH'].append(cur_dir_include) - + return {'return':0, 'version': version} diff --git a/script/install-pip-package-for-cmind-python/customize.py b/script/install-pip-package-for-cmind-python/customize.py index 1630bc5428..97c88f8402 100644 --- a/script/install-pip-package-for-cmind-python/customize.py +++ b/script/install-pip-package-for-cmind-python/customize.py @@ -17,7 +17,7 @@ def install(package): run_cmd = [sys.executable, "-m", "pip", "install", package] run_cmd += additional_install_options r = subprocess.run(run_cmd, check=True) - + return {'return':0} def preprocess(i): diff --git a/script/install-python-venv/customize.py b/script/install-python-venv/customize.py index 84fe4984ad..e6b5993ed3 100644 --- a/script/install-python-venv/customize.py +++ b/script/install-python-venv/customize.py @@ -45,7 +45,7 @@ def preprocess(i): env['CM_TMP_FAIL_IF_NOT_FOUND'] = 'yes' - r = automation.update_deps({'deps':meta['post_deps'], + r = automation.update_deps({'deps':meta['post_deps'], 'update_deps':{'register-python': {'extra_cache_tags':','.join(add_python_extra_cache_tags)}}}) if r['return']>0: return r diff --git a/script/install-qaic-compute-sdk-from-src/customize.py b/script/install-qaic-compute-sdk-from-src/customize.py index f6d4b1ab92..12deb42c17 100644 --- a/script/install-qaic-compute-sdk-from-src/customize.py +++ b/script/install-qaic-compute-sdk-from-src/customize.py @@ -32,7 +32,7 @@ def postprocess(i): #env['CM_QAIC_RUNNER_PATH'] = os.path.join(env['CM_QAIC_SOFTWARE_KIT_PATH'], "build", "utils", "qaic-runner") if '+PATH' not in env: - env['+PATH'] = [] + env['+PATH'] = [] env['CM_QAIC_COMPUTE_SDK_INSTALL_PATH'] = os.path.join(os.getcwd(), "src", "install", "qaic-compute-"+env['CM_QAIC_COMPUTE_SDK_INSTALL_MODE']) diff --git a/script/launch-benchmark/customize.py b/script/launch-benchmark/customize.py index 5db5e9f817..285f2889ea 100644 --- a/script/launch-benchmark/customize.py +++ b/script/launch-benchmark/customize.py @@ -53,25 +53,25 @@ def load_cfg(i): prune_key_uid = prune.get('key_uid', '') prune_uid = prune.get('uid', '') prune_list = prune.get('list',[]) - + # Checking individual files inside CM entry selection = [] - + if i.get('skip_files', False): for l in lst: - meta = l.meta - full_path = l.path - - meta['full_path']=full_path - - add = True - - if prune_key!='' and prune_key_uid!='': - if prune_key_uid not in meta.get(prune_key, []): - add = False - - if add: - selection.append(meta) + meta = l.meta + full_path = l.path + + meta['full_path']=full_path + + add = True + + if prune_key!='' and prune_key_uid!='': + if prune_key_uid not in meta.get(prune_key, []): + add = False + + if add: + selection.append(meta) else: for l in lst: path = l.path @@ -102,7 +102,7 @@ def load_cfg(i): r = process_base(meta, full_path) if r['return']>0: return r meta = r['meta'] - + uid = meta['uid'] # Check pruning @@ -111,7 +111,7 @@ def load_cfg(i): if len(prune)>0: if prune_uid!='' and uid != prune_uid: add = False - + if add and len(prune_list)>0 and uid not in prune_list: add = False @@ -122,7 +122,7 @@ def load_cfg(i): meta['full_path']=full_path add_all_tags = copy.deepcopy(all_tags) - + name = meta.get('name','') if name=='': name = ' '.join(meta.get('tags',[])) @@ -135,7 +135,7 @@ def load_cfg(i): add_all_tags += [v.lower() for v in name.split(' ')] else: add_all_tags += file_tags.split(',') - + meta['all_tags']=add_all_tags meta['main_meta']=main_meta @@ -155,17 +155,17 @@ def process_base(meta, full_path): filename = _base full_path_base = os.path.dirname(full_path) - + if not filename.endswith('.yaml') and not filename.endswith('.json'): return {'return':1, 'error':'_base file {} in {} must be .yaml or .json'.format(filename, full_path)} - + if ':' in _base: x = _base.split(':') name = x[0] full_path_base = base_path.get(name, '') if full_path_base == '': - + # Find artifact r = cmind.access({'action':'find', 'automation':'cfg', @@ -175,21 +175,21 @@ def process_base(meta, full_path): lst = r['list'] if len(lst)==0: - if not os.path.isfile(path): + if not os.path.isfile(path): return {'return':1, 'error':'_base artifact {} not found in {}'.format(name, full_path)} full_path_base = lst[0].path - + base_path[name] = full_path_base - + filename = x[1] - + # Load base path = os.path.join(full_path_base, filename) - if not os.path.isfile(path): + if not os.path.isfile(path): return {'return':1, 'error':'_base file {} not found in {}'.format(filename, full_path)} - + if path in base_path_meta: base = copy.deepcopy(base_path_meta[path]) else: @@ -232,7 +232,7 @@ def process_base(meta, full_path): def get_with_complex_key(meta, key): j = key.find('.') - + if j<0: return meta.get(key) @@ -272,7 +272,7 @@ def prepare_table(i): dimension_values = {} dimension_keys = [] - + if len(dimensions) == 0: keys = [('test', 'CM test', 400, 'leftAligned')] else: @@ -295,7 +295,7 @@ def prepare_table(i): # # if value!=None and value!='' and value not in dimension_values[key]: # dimension_values.append(value) - + # If dimensions, sort by dimensions for d in list(reversed(dimension_keys)): selection = sorted(selection, key = lambda x: get_with_complex_key_safe(selection, d)) @@ -314,9 +314,9 @@ def prepare_table(i): 'reproduced':'https://cTuning.org/images/results_reproduced_v1_1_small.png'} - - - + + + for s in selection: row = {} @@ -356,10 +356,10 @@ def prepare_table(i): if s.get('reproduced', False): x = '
'.format(url, badges_url['reproduced']) row['reproduced'] = x - + # Check misc notes row['notes']=s.get('notes','') - + # Finish row all_data.append(row) @@ -407,9 +407,9 @@ def gui(i): st.markdown('### {}'.format(title)) - - - + + + # Check if test uid is specified uid = '' x = params.get('uid',['']) @@ -422,10 +422,10 @@ def gui(i): compute_uid = '' x = params.get('compute_uid',['']) if len(x)>0 and x[0]!='': compute_uid = x[0].strip() - - - + + + ############################################################## # Check the first level of benchmarks ii = {'tags':'benchmark,run', 'skip_files':True, 'prune':{}} @@ -440,14 +440,14 @@ def gui(i): ii['prune']['key_uid']=compute_uid r=load_cfg(ii) - if r['return']>0: return r + if r['return']>0: return r lst = r['selection'] if len(lst)==0: st.markdown('Warning: no benchmarks found!') return {'return':0} - + test_meta = {} bench_id = 0 @@ -467,8 +467,8 @@ def gui(i): bench_id = 1 compute_uid = test_meta['compute_uid'] bench_supported_compute = [compute_uid] - - + + if uid == '': selection = sorted(lst, key = lambda v: v['name']) bench_selection = [{'name':''}] + selection @@ -488,14 +488,14 @@ def gui(i): break j+=1 - + bench_id = st.selectbox('Select benchmark:', - range(len(bench_selection)), + range(len(bench_selection)), format_func=lambda x: bench_selection[x]['name'], index = bench_id_index, key = 'bench') - + bench_supported_compute = [] bench_meta = {} if bench_id>0: @@ -513,7 +513,7 @@ def gui(i): x+='\n' st.markdown(x) - + ########################################################################################################### if True==True: ############################################################## @@ -528,27 +528,27 @@ def gui(i): if len(x) == 0: st.markdown('Warning: no supported compute selected!') return {'return':0} - + ii['prune']={'list':x} r=load_cfg(ii) - if r['return']>0: return r + if r['return']>0: return r selection = sorted(r['selection'], key = lambda v: v['name']) if len(selection) == 0 : st.markdown('Warning: no supported compute found!') return {'return':0} - + compute_selection = [{'name':''}] if len(selection)>0: - compute_selection += selection + compute_selection += selection compute_id_index = 0 if compute_uid == '' else 1 - + if uid == '': compute_id = st.selectbox('Select target hardware to benchmark:', - range(len(compute_selection)), + range(len(compute_selection)), format_func=lambda x: compute_selection[x]['name'], index = compute_id_index, key = 'compute') @@ -557,7 +557,7 @@ def gui(i): if compute_id>0: compute = compute_selection[compute_id] compute_uid = compute['uid'] - + compute_meta = {} for c in compute_selection: if c.get('uid','')!='': @@ -577,14 +577,14 @@ def gui(i): ii['prune']={'key':'compute_uid', 'key_uid':compute_uid} r=load_cfg(ii) - if r['return']>0: return r + if r['return']>0: return r selection = sorted(r['selection'], key = lambda v: v['name']) # Check how many and prune if len(selection) == 0: - st.markdown('No CM tests found') - return {'return':0} + st.markdown('No CM tests found') + return {'return':0} for s in selection: c_uid = s.get('compute_uid','') @@ -593,15 +593,15 @@ def gui(i): if c_tags!='': s['all_tags']+=c_tags.split(',') - s['compute_meta']=compute_meta[c_uid] + s['compute_meta']=compute_meta[c_uid] + - if len(selection)>1: # Update selection with compute tags test_tags = '' x = params.get('tags',['']) if len(x)>0 and x[0]!='': test_tags = x[0].strip() - + test_tags = st.text_input('Found {} CM tests. Prune them by tags:'.format(str(len(selection))), value=test_tags, key='test_tags').strip() if test_tags!='': @@ -621,24 +621,24 @@ def gui(i): if add: pruned_selection.append(s) - + selection = pruned_selection test_selection = [{'name':''}] + selection - - - + + + if len(selection)<200: # Creating compute selector test_id_index = 1 if len(selection)==1 else 0 - + test_id = st.selectbox('Select a test from {}:'.format(str(len(selection))), - range(len(test_selection)), + range(len(test_selection)), format_func=lambda x: test_selection[x]['name'], index = test_id_index, key = 'test') - - + + if test_id >0: test_meta = test_selection[test_id] else: @@ -646,19 +646,19 @@ def gui(i): # View many (table) ii = {'selection':selection, 'misc_module':misc} - + # Check if dimensions in the bench dimensions = bench_meta.get('dimensions', []) if len(dimensions)>0: - viewer_selection = ['benchmark specific', 'universal'] - - viewer = st.selectbox('Viewer:', viewer_selection, key = 'viewer') + viewer_selection = ['benchmark specific', 'universal'] + + viewer = st.selectbox('Viewer:', viewer_selection, key = 'viewer') + + if viewer == 'benchmark specific': + ii['dimensions'] = dimensions - if viewer == 'benchmark specific': - ii['dimensions'] = dimensions - else: - st.markdown('---') + st.markdown('---') r = prepare_table(ii) if r['return']>0: return r @@ -667,7 +667,7 @@ def gui(i): html=df.to_html(escape=False, justify='left') st.write(html, unsafe_allow_html = True) - + # st.dataframe(df, unsafe_allow_html = True) @@ -686,14 +686,14 @@ def gui(i): if c_tags!='': test_meta['all_tags']+=c_tags.split(',') - test_meta['compute_meta']=compute_meta[c_uid] + test_meta['compute_meta']=compute_meta[c_uid] + - if uid == '': st.markdown('---') uid = test_meta['uid'] - + # First, check if there is a README test_path = test_meta['full_path'] @@ -717,16 +717,15 @@ def gui(i): ``` """.format(json.dumps(test_meta, indent=2)) st.markdown(x) - - - + + + # Create self link # This misc module is in CM "gui" script x1 = misc.make_url(uid, key='uid', action='howtorun', md=False) end_html='
Self link
'.format(x1) return {'return':0, 'end_html': end_html} - diff --git a/script/prepare-training-data-bert/customize.py b/script/prepare-training-data-bert/customize.py index a0cf7beb2c..fbb57ea134 100644 --- a/script/prepare-training-data-bert/customize.py +++ b/script/prepare-training-data-bert/customize.py @@ -19,7 +19,7 @@ def preprocess(i): env['CM_BERT_CONFIG_DOWNLOAD_DIR'] = os.path.join(datadir, "phase1") env['CM_BERT_VOCAB_DOWNLOAD_DIR'] = os.path.join(datadir, "phase1") env['CM_BERT_DATA_DOWNLOAD_DIR'] = os.path.join(datadir, "download") - + env['CM_BERT_CHECKPOINT_DOWNLOAD_DIR'] = os.path.join(datadir, "phase1") if env.get("CM_TMP_VARIATION", "") == "nvidia": diff --git a/script/prepare-training-data-resnet/customize.py b/script/prepare-training-data-resnet/customize.py index 825a96df59..494bd894a0 100644 --- a/script/prepare-training-data-resnet/customize.py +++ b/script/prepare-training-data-resnet/customize.py @@ -19,7 +19,7 @@ def preprocess(i): env['MXNET_VER'] = env.get('CM_MXNET_VER', '22.08').replace("-", ".") env['CM_IMAGENET_LABELS_DOWNLOAD_DIR'] = env['CM_DATASET_IMAGENET_TRAIN_PATH'] - + if env.get("CM_TMP_VARIATION", "") == "nvidia": code_path = os.path.join(env['CM_NVIDIA_DEEPLEARNING_EXAMPLES_REPO_PATH'], 'MxNet', 'Classification', 'RN50v1.5') env['CM_RUN_DIR'] = code_path diff --git a/script/preprocess-mlperf-inference-submission/customize.py b/script/preprocess-mlperf-inference-submission/customize.py index 474e4a42ed..e7768c68fb 100644 --- a/script/preprocess-mlperf-inference-submission/customize.py +++ b/script/preprocess-mlperf-inference-submission/customize.py @@ -43,5 +43,5 @@ def postprocess(i): shutil.copytree(submission_dir, submission_backup) shutil.rmtree(submission_dir) os.rename(submission_processed, submission_dir) - + return {'return':0} diff --git a/script/print-any-text/customize.py b/script/print-any-text/customize.py index 093cafdcff..ec28a2c1d8 100644 --- a/script/print-any-text/customize.py +++ b/script/print-any-text/customize.py @@ -11,7 +11,7 @@ def postprocess(i): os_env_keys = env.get('CM_PRINT_ANY_OS_ENV_KEYS', '').strip() printed = False - for k,e,t in [(cm_env_keys, env, 'CM_ENV'), + for k,e,t in [(cm_env_keys, env, 'CM_ENV'), (os_env_keys, os.environ, 'OS_ENV')]: if k!='': @@ -27,4 +27,3 @@ def postprocess(i): print ('') return {'return':0} - diff --git a/script/print-croissant-desc/code.py b/script/print-croissant-desc/code.py index a475c5a6ec..480e388bbc 100644 --- a/script/print-croissant-desc/code.py +++ b/script/print-croissant-desc/code.py @@ -4,13 +4,13 @@ import mlcroissant as mlc def main(): - + url = os.environ.get('CM_PRINT_CROISSANT_URL', '') if url=='': print ('Error: --url is not specified') exit(1) - + ds = mlc.Dataset(url) metadata = ds.metadata.to_json() diff --git a/script/process-ae-users/code.py b/script/process-ae-users/code.py index 6437eaa5e4..4bc917ecb8 100644 --- a/script/process-ae-users/code.py +++ b/script/process-ae-users/code.py @@ -20,7 +20,7 @@ def main(): for user in sorted(users, key = lambda u: (u['last'].lower(), u['first'].lower())): full_name = user['first']+' '+user['last'] - + name = full_name + ' ('+user['affiliation']+')' print (name) @@ -64,7 +64,7 @@ def main(): print ('') print ('Saved HTML to {}'.format(fo)) - + cmind.utils.save_txt(fo, html) diff --git a/script/process-mlperf-accuracy/customize.py b/script/process-mlperf-accuracy/customize.py index bb5d7a2865..bb124cc02c 100644 --- a/script/process-mlperf-accuracy/customize.py +++ b/script/process-mlperf-accuracy/customize.py @@ -7,7 +7,7 @@ def preprocess(i): os_info = i['os_info'] xsep = ';' if os_info['platform'] == 'windows' else ':' - + env = i['env'] results_dir = env.get("CM_MLPERF_ACCURACY_RESULTS_DIR", "") @@ -83,7 +83,7 @@ def preprocess(i): checkpoint_path = env['CM_VLLM_SERVER_MODEL_NAME'] CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + accuracy_checker_file + "' --checkpoint-path '" + checkpoint_path + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ "' --dataset-file '" + env['CM_DATASET_PREPROCESSED_PATH'] + "'"+ " --dtype " + env.get('CM_ACCURACY_DTYPE', "int32") +" > '" + out_file + "'" - + elif dataset == "openorca-gsm8k-mbxp-combined": accuracy_checker_file = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "language", "mixtral-8x7b", "evaluate-accuracy.py") @@ -164,7 +164,7 @@ def postprocess(i): xsep = ';' if os_info['platform'] == 'windows' else ':' results_dir = env.get("CM_MLPERF_ACCURACY_RESULTS_DIR", "") - + results_dir_split = results_dir.split(xsep) for result_dir in results_dir_split: @@ -189,13 +189,12 @@ def postprocess(i): import json try: - z=json.loads(y) - state['app_mlperf_inference_accuracy']=z + z=json.loads(y) + state['app_mlperf_inference_accuracy']=z - break + break except ValueError as e: - pass + pass print ('') return {'return':0} - diff --git a/script/publish-results-to-dashboard/code.py b/script/publish-results-to-dashboard/code.py index ca981cec54..104c80131d 100644 --- a/script/publish-results-to-dashboard/code.py +++ b/script/publish-results-to-dashboard/code.py @@ -82,10 +82,10 @@ def main(): for k in x: env_key = x[k] if os.environ.get(env_key,'')!='': - result['cm_misc_input_'+k]=os.environ[env_key] + result['cm_misc_input_'+k]=os.environ[env_key] - wandb.init(entity = dashboard_user, - project = dashboard_project, + wandb.init(entity = dashboard_user, + project = dashboard_project, name = label) wandb.log(result) diff --git a/script/remote-run-commands/customize.py b/script/remote-run-commands/customize.py index 78676a2d0f..492fa4b5ca 100644 --- a/script/remote-run-commands/customize.py +++ b/script/remote-run-commands/customize.py @@ -45,5 +45,3 @@ def preprocess(i): def postprocess(i): return {'return':0} - - diff --git a/script/run-all-mlperf-models/customize.py b/script/run-all-mlperf-models/customize.py index 40f0fced40..fda731f9d7 100644 --- a/script/run-all-mlperf-models/customize.py +++ b/script/run-all-mlperf-models/customize.py @@ -93,7 +93,7 @@ def preprocess(i): - + return {'return':0} def postprocess(i): diff --git a/script/run-docker-container/customize.py b/script/run-docker-container/customize.py index d2f673ecb5..188cd72d90 100644 --- a/script/run-docker-container/customize.py +++ b/script/run-docker-container/customize.py @@ -21,8 +21,8 @@ def preprocess(i): else: CM_RUN_CMD="cm run script --tags=" + env['CM_DOCKER_RUN_SCRIPT_TAGS'] + ' --quiet' - r = cm.access({'action':'search', - 'automation':'script', + r = cm.access({'action':'search', + 'automation':'script', 'tags': env['CM_DOCKER_RUN_SCRIPT_TAGS']}) if len(r['list']) < 1: raise Exception('CM script with tags '+ env['CM_DOCKER_RUN_SCRIPT_TAGS'] + ' not found!') @@ -52,7 +52,7 @@ def preprocess(i): CMD += " 2> /dev/null" print (' '+CMD) print ('') - + try: docker_container = subprocess.check_output(CMD, shell=True).decode("utf-8") except Exception as e: @@ -81,7 +81,7 @@ def preprocess(i): print ('') print (' '+CMD) print ('') - + try: docker_image = subprocess.check_output(CMD, shell=True).decode("utf-8") except Exception as e: @@ -180,7 +180,7 @@ def postprocess(i): mount_parts = [mount_cmd[:j], mount_cmd[j+1:]] else: return {'return':1, 'error': 'Can\'t find separator : in a mount string: {}'.format(mount_cmd)} - + # mount_parts = mount_cmd.split(":") # if len(mount_parts) != 2: # return {'return': 1, 'error': 'Invalid mount {} specified'.format(mount_parts)} @@ -339,7 +339,7 @@ def update_docker_info(env): env['CM_DOCKER_IMAGE_NAME'] = docker_image_name docker_image_tag_extra = env.get('CM_DOCKER_IMAGE_TAG_EXTRA', '-latest') - + docker_image_tag = env.get('CM_DOCKER_IMAGE_TAG', docker_image_base.replace(':','-').replace('_','').replace("/","-") + docker_image_tag_extra) env['CM_DOCKER_IMAGE_TAG'] = docker_image_tag diff --git a/script/run-mlperf-inference-app/customize.py b/script/run-mlperf-inference-app/customize.py index d549789786..d2559d8b6a 100644 --- a/script/run-mlperf-inference-app/customize.py +++ b/script/run-mlperf-inference-app/customize.py @@ -20,7 +20,7 @@ def preprocess(i): state = i['state'] script_path = i['run_script_input']['path'] - if env.get('CM_RUN_DOCKER_CONTAINER', '') == "yes": + if env.get('CM_RUN_DOCKER_CONTAINER', '') == "yes": return {'return':0} if env.get('CM_DOCKER_IMAGE_NAME', '') == 'scc24': @@ -107,7 +107,7 @@ def preprocess(i): if env.get('OUTPUT_BASE_DIR', '') == '': env['OUTPUT_BASE_DIR'] = env.get('CM_MLPERF_INFERENCE_RESULTS_DIR', os.getcwd()) - + test_list = ["TEST01"] if env['CM_MODEL'] in ["resnet50", "sdxl"]: test_list.append("TEST04") @@ -125,7 +125,7 @@ def preprocess(i): variation_run_style= ",_" + env.get("CM_MLPERF_RUN_STYLE", "test") variation_reproducibility= ",_" + env["CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS"] if env.get("CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS","") != "" else "" variation_all_models= ",_all-models" if env.get("CM_MLPERF_ALL_MODELS","") == "yes" else "" - + if env.get("CM_MLPERF_MODEL_PRECISION", '') != '': variation_quantization_string= ",_" + env["CM_MLPERF_MODEL_PRECISION"] else: @@ -302,7 +302,7 @@ def preprocess(i): def get_valid_scenarios(model, category, mlperf_version, mlperf_path): import sys - + submission_checker_dir = os.path.join(mlperf_path, "tools", "submission") sys.path.append(submission_checker_dir) @@ -313,9 +313,9 @@ def get_valid_scenarios(model, category, mlperf_version, mlperf_path): import submission_checker as checker if "dlrm-99" in model: - model = model.replace("dlrm-99", "dlrm-v2-99") + model = model.replace("dlrm-99", "dlrm-v2-99") if "sdxl" in model: - model = "stable-diffusion-xl" + model = "stable-diffusion-xl" config = checker.MODEL_CONFIG @@ -398,7 +398,7 @@ def gui(i): bench_uid = bench_meta.get('uid','') st_inputs_custom = {} - + bench_input = bench_meta.get('bench_input', {}) end_html = '' @@ -410,7 +410,7 @@ def gui(i): # Here we can update params v = compute_meta.get('mlperf_inference_device') - if v!=None and v!='': + if v!=None and v!='': inp['device']['force'] = v if v in ['tpu', 'gaudi']: @@ -430,7 +430,7 @@ def gui(i): device = r.get('value2') inp['device']['force'] = device - + if device == 'cpu': inp['implementation']['choices']=['mlcommons-python', 'mlcommons-cpp', 'intel', 'ctuning-cpp-tflite'] @@ -465,7 +465,7 @@ def gui(i): if compliance == 'yes': st.markdown('*:red[See [online table with required compliance tests](https://github.com/mlcommons/policies/blob/master/submission_rules.adoc#5132-inference)].*') - + else: inp[y]['force'] = 'no' @@ -474,13 +474,13 @@ def gui(i): category = r.get('value2') inp['category']['force'] = category - + ############################################################################# # Implementation v = bench_input.get('mlperf_inference_implementation') - if v!=None and v!='': + if v!=None and v!='': inp['implementation']['force'] = v else: if device == 'cuda': @@ -586,24 +586,24 @@ def gui(i): elif model == 'rnnt': github_doc_model = 'rnnt' - + elif model.startswith('dlrm-v2-'): github_doc_model = 'dlrm_v2' - + elif model.startswith('gptj-'): github_doc_model = 'gpt-j' - + elif model == 'sdxl': github_doc_model = 'stable-diffusion-xl' - + elif model.startswith('llama2-'): github_doc_model = 'llama2-70b' - + elif model.startswith('mixtral-'): github_doc_model = 'mixtral-8x7b' if github_doc_model == '': github_doc_model = model - + model_cm_url='https://github.com/mlcommons/ck/tree/master/docs/mlperf/inference/{}'.format(github_doc_model) extra_notes_online = '[Extra notes online]({})\n'.format(model_cm_url) @@ -637,7 +637,7 @@ def gui(i): # Benchmark version script_meta_variations = script_meta['variations'] - + choices = [''] + [k for k in script_meta_variations if script_meta_variations[k].get('group','') == 'benchmark-version'] desc = {'choices': choices, 'default':choices[0], 'desc':'Force specific benchmark version?'} r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_version', 'desc':desc}) @@ -651,8 +651,8 @@ def gui(i): if can_have_docker_flag: default_choice = 'yes - run in container' - - choices = [default_choice, 'no - run natively'] + + choices = [default_choice, 'no - run natively'] desc = {'choices': choices, 'default':choices[0], 'desc':'Should CM script prepare and run Docker container in interactive mode to run MLPerf? You can then copy/paste CM commands generated by this GUI to benchmark different models.'} r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_docker', 'desc':desc}) benchmark_docker = r.get('value2') @@ -660,7 +660,7 @@ def gui(i): if benchmark_docker == 'yes - run in container': add_to_st_inputs['@docker']=True add_to_st_inputs['@docker_cache']='no' - + ############################################################################# # Prepare submission st.markdown('---') @@ -687,9 +687,9 @@ def gui(i): x += '*:red[Note that if some results are INVALID due to too short run, you can rerun the same CM command and it should increase the length of the benchmark until you get valid result!]*\n' st.markdown(x) - + st.markdown('---') - + else: inp['submitter']['force']='' inp['clean']['default']=True @@ -701,18 +701,18 @@ def gui(i): measure = r.get('value2') x = '' - if measure == 'Performance': + if measure == 'Performance': x = 'performance-only' - elif measure == 'Accuracy': + elif measure == 'Accuracy': x = 'accuracy-only' - elif measure == 'Find Performance from a short run': + elif measure == 'Find Performance from a short run': x = 'find-performance' - elif measure == 'Performance and Accuracy': + elif measure == 'Performance and Accuracy': x = 'submission' - + params['~~submission-generation']=[x] - + ############################################################################# # Prepare scenario @@ -731,7 +731,7 @@ def gui(i): - + ############################################################################# # Short or full run @@ -790,7 +790,7 @@ def gui(i): # dashboard = r.get('value2', False) dashboard = st.toggle('Output results to W&B dashboard?', value = False) - + if dashboard: params['~dashboard']=['true'] @@ -809,7 +809,7 @@ def gui(i): - + # Hide customization by default params['hide_script_customization'] = True @@ -835,7 +835,7 @@ def gui(i): '@results_dir':'{{CM_EXPERIMENT_PATH3}}', '@submission_dir':'{{CM_EXPERIMENT_PATH3}}' }) - + inp['repro']['force'] = True extra['use_experiment'] = True @@ -846,7 +846,7 @@ def gui(i): debug = st.toggle('Debug and run MLPerf benchmark natively from command line after CM auto-generates CMD?', value=False) if debug: inp['debug']['force'] = True - + extra['add_to_st_inputs'] = add_to_st_inputs diff --git a/script/run-mlperf-inference-app/run_mobilenet.py b/script/run-mlperf-inference-app/run_mobilenet.py index b5259168a2..abad10a78b 100644 --- a/script/run-mlperf-inference-app/run_mobilenet.py +++ b/script/run-mlperf-inference-app/run_mobilenet.py @@ -101,6 +101,3 @@ if r['return'] > 0: print(r) #exit(1) - - - diff --git a/script/run-mlperf-inference-submission-checker/customize.py b/script/run-mlperf-inference-submission-checker/customize.py index c0e943c75e..72a8928921 100644 --- a/script/run-mlperf-inference-submission-checker/customize.py +++ b/script/run-mlperf-inference-submission-checker/customize.py @@ -12,7 +12,7 @@ def preprocess(i): submission_dir = env.get("CM_MLPERF_INFERENCE_SUBMISSION_DIR", "") version = env.get('CM_MLPERF_SUBMISSION_CHECKER_VERSION','') - + if submission_dir == "": return {'return': 1, 'error': 'Please set --env.CM_MLPERF_INFERENCE_SUBMISSION_DIR'} @@ -20,7 +20,7 @@ def preprocess(i): if ' ' in submitter: return {'return': 1, 'error': 'CM_MLPERF_SUBMITTER cannot contain a space. Please provide a name without space using --submitter input. Given value: {}'.format(submitter)} - if 'CM_MLPERF_SKIP_COMPLIANCE' in env: + if 'CM_MLPERF_SKIP_COMPLIANCE' in env: skip_compliance = " --skip_compliance" else: skip_compliance = "" diff --git a/script/run-mlperf-power-client/customize.py b/script/run-mlperf-power-client/customize.py index 6ec752b803..72ea87648c 100644 --- a/script/run-mlperf-power-client/customize.py +++ b/script/run-mlperf-power-client/customize.py @@ -32,8 +32,8 @@ def preprocess(i): timestamp if 'CM_MLPERF_POWER_MAX_AMPS' in env and 'CM_MLPERF_POWER_MAX_VOLTS' in env: - cmd = cmd + " --max-amps " + env['CM_MLPERF_POWER_MAX_AMPS'] + \ - " --max-volts " + env['CM_MLPERF_POWER_MAX_VOLTS'] + cmd = cmd + " --max-amps " + env['CM_MLPERF_POWER_MAX_AMPS'] + \ + " --max-volts " + env['CM_MLPERF_POWER_MAX_VOLTS'] env['CM_MLPERF_POWER_RUN_CMD'] = cmd diff --git a/script/run-mlperf-training-submission-checker/customize.py b/script/run-mlperf-training-submission-checker/customize.py index 393979b490..1b66bb7515 100644 --- a/script/run-mlperf-training-submission-checker/customize.py +++ b/script/run-mlperf-training-submission-checker/customize.py @@ -10,7 +10,7 @@ def preprocess(i): submission_dir = env.get("CM_MLPERF_SUBMISSION_DIR", "") version = env.get('CM_MLPERF_SUBMISSION_CHECKER_VERSION','v3.1') - + if submission_dir == "": return {'return': 1, 'error': 'Please set CM_MLPERF_SUBMISSION_DIR'} diff --git a/script/run-vllm-server/customize.py b/script/run-vllm-server/customize.py index aeffdc2002..be1a988aa4 100644 --- a/script/run-vllm-server/customize.py +++ b/script/run-vllm-server/customize.py @@ -393,7 +393,7 @@ def preprocess(i): cmd = f"{env['CM_PYTHON_BIN_WITH_PATH']} -m vllm.entrypoints.openai.api_server {cmd_args}" print(cmd) - + env['CM_VLLM_RUN_CMD'] = cmd return {'return':0} diff --git a/script/runtime-system-infos/customize.py b/script/runtime-system-infos/customize.py index 4caee358a6..a21ded5ce8 100644 --- a/script/runtime-system-infos/customize.py +++ b/script/runtime-system-infos/customize.py @@ -3,7 +3,7 @@ import shutil import psutil # used to measure the system infos(have not tested for obtaining gpu info) import csv # used to write the measurements to csv format as txt file -from datetime import datetime, timezone +from datetime import datetime, timezone import time import signal import sys @@ -34,7 +34,7 @@ def preprocess(i): if env.get("CM_RUN_DIR", "") == "": env['CM_RUN_DIR'] = os.getcwd() - + logs_dir = env.get('CM_LOGS_DIR', env['CM_RUN_DIR']) log_json_file_path = os.path.join(logs_dir, 'sys_utilisation_info.txt') @@ -50,7 +50,7 @@ def preprocess(i): csv_headers = ['timestamp', 'cpu_utilisation', 'total_memory_gb', 'used_memory_gb'] # done to be made available to signal_handler function in case of kill signals - # as of now handles for only SIGTERM + # as of now handles for only SIGTERM global f while True: with open(log_json_file_path, 'a', newline='') as f: diff --git a/script/set-sqlite-dir/code.py b/script/set-sqlite-dir/code.py index 319f23a92f..dcff6e4a82 100644 --- a/script/set-sqlite-dir/code.py +++ b/script/set-sqlite-dir/code.py @@ -1,2 +1 @@ import sqlite3 - diff --git a/script/set-venv/customize.py b/script/set-venv/customize.py index a8517a366e..1763fb00d3 100644 --- a/script/set-venv/customize.py +++ b/script/set-venv/customize.py @@ -17,7 +17,7 @@ def preprocess(i): cur_dir = os.getcwd() name = env.get('CM_NAME', '') - if name == '': + if name == '': artifacts = i.get('input', {}).get('artifacts', []) if len(artifacts)>0: name = artifacts[0] diff --git a/script/test-cm-core/src/script/test_docker.py b/script/test-cm-core/src/script/test_docker.py index 9473997ad5..0663cd54e4 100644 --- a/script/test-cm-core/src/script/test_docker.py +++ b/script/test-cm-core/src/script/test_docker.py @@ -3,9 +3,9 @@ import cmind as cm import check as checks -r = cm.access({'action':'run', - 'automation':'script', - 'tags': 'run,docker,container', +r = cm.access({'action':'run', + 'automation':'script', + 'tags': 'run,docker,container', 'add_deps_recursive': { 'compiler': {'tags': "gcc"} }, @@ -15,14 +15,14 @@ 'CM_DOCKER_RUN_SCRIPT_TAGS': 'app,image-classification,onnx,python', 'CM_DOCKER_IMAGE_BASE': 'ubuntu:22.04', 'CM_DOCKER_IMAGE_REPO': 'cknowledge' - }, + }, 'quiet': 'yes' }) checks.check_return(r) -r = cm.access({'action':'run', - 'automation':'script', - 'tags': 'run,docker,container', +r = cm.access({'action':'run', + 'automation':'script', + 'tags': 'run,docker,container', 'add_deps_recursive': { 'compiler': {'tags': "gcc"} }, @@ -32,7 +32,7 @@ 'CM_DOCKER_RUN_SCRIPT_TAGS': 'app,image-classification,onnx,python', 'CM_DOCKER_IMAGE_BASE': 'ubuntu:24.04', 'CM_DOCKER_IMAGE_REPO': 'local' - }, + }, 'quiet': 'yes' }) checks.check_return(r) diff --git a/script/test-cm-core/src/tutorials/test_tutorial_tvm.py b/script/test-cm-core/src/tutorials/test_tutorial_tvm.py index 930e3622df..9bf64562a6 100644 --- a/script/test-cm-core/src/tutorials/test_tutorial_tvm.py +++ b/script/test-cm-core/src/tutorials/test_tutorial_tvm.py @@ -21,4 +21,3 @@ 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', 'device': 'cpu', 'scenario': 'Offline', \ 'test_query_count': '500', 'clean': 'true', 'quiet': 'yes'}) checks.check_return(r) - diff --git a/script/test-cm-core/src/tutorials/test_tutorial_tvm_pip_ge.py b/script/test-cm-core/src/tutorials/test_tutorial_tvm_pip_ge.py index 0c9a4b9c33..7876adff54 100644 --- a/script/test-cm-core/src/tutorials/test_tutorial_tvm_pip_ge.py +++ b/script/test-cm-core/src/tutorials/test_tutorial_tvm_pip_ge.py @@ -18,4 +18,3 @@ 'submitter': 'Community', 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', \ 'device': 'cpu', 'scenario': 'Offline', 'test_query_count': '500', 'clean': 'true', 'quiet': 'yes'}) checks.check_return(r) - diff --git a/script/test-cm-core/src/tutorials/test_tutorial_tvm_pip_vm.py b/script/test-cm-core/src/tutorials/test_tutorial_tvm_pip_vm.py index 81069194d4..74ead5c364 100644 --- a/script/test-cm-core/src/tutorials/test_tutorial_tvm_pip_vm.py +++ b/script/test-cm-core/src/tutorials/test_tutorial_tvm_pip_vm.py @@ -20,4 +20,3 @@ 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', 'device': 'cpu', 'scenario': 'Offline', \ 'test_query_count': '500', 'clean': 'true', 'quiet': 'yes'}) checks.check_return(r) - diff --git a/script/test-debug/python/main.py b/script/test-debug/python/main.py index 0dfdf30f56..f913765647 100644 --- a/script/test-debug/python/main.py +++ b/script/test-debug/python/main.py @@ -22,4 +22,3 @@ print ('') print ("Hello World 2") - diff --git a/setup.py b/setup.py index 50e94bae6a..ce4f44890a 100644 --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ import os import shutil -# Try to use importlib.metadata for Python 3.8+ +# Try to use importlib.metadata for Python 3.8+ try: if sys.version_info >= (3, 8): from importlib.metadata import version, PackageNotFoundError @@ -48,11 +48,11 @@ def is_package_installed(self, package_name): except PackageNotFoundError: return False - + def install_system_packages(self): # List of packages to install via system package manager packages = [] - + git_status = self.command_exists('git') if not git_status: packages.append("git") @@ -71,7 +71,7 @@ def install_system_packages(self): pass else: packages.append("python3-venv") - + if packages: if self.system == 'Linux' or self.system == 'Darwin': manager, details = self.get_package_manager_details() @@ -123,7 +123,7 @@ def get_package_manager_details(self): # Checks if command exists(for installing required packages). # If the command exists, which returns 0, making the function return True. # If the command does not exist, which returns a non-zero value, making the function return False. - # NOTE: The standard output and standard error streams are redirected to PIPES so that it could be captured in future if needed. + # NOTE: The standard output and standard error streams are redirected to PIPES so that it could be captured in future if needed. def command_exists(self, command): if self.system == "Linux" or self.system == 'Darwin': return subprocess.call(['which', command], stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0 @@ -136,10 +136,10 @@ def custom_function(self): r = cmind.access({'action':'pull', 'automation':'repo', 'artifact':'mlcommons@cm4mlops', 'branch': 'mlperf-inference'}) print(r) if r['return'] > 0: - return r['return'] - + return r['return'] + def get_sys_platform(self): - self.system = platform.system() + self.system = platform.system() # Read long description and version def read_file(file_name, default=""): diff --git a/tests/script/check.py b/tests/script/check.py index aba7f7831c..26a86a309e 100644 --- a/tests/script/check.py +++ b/tests/script/check.py @@ -21,4 +21,3 @@ def check_key_value(d, key, value, absent_ok=False): raise Exception(f"{key} is missing. Current values are {d}") elif d[key] != value: raise Exception(f"{key} is not having the expected value of {value}. Current value is {d[key]}") - diff --git a/tests/script/test_docker.py b/tests/script/test_docker.py index 6b95143aae..3ea13608a3 100644 --- a/tests/script/test_docker.py +++ b/tests/script/test_docker.py @@ -3,19 +3,19 @@ import cmind as cm import check as checks -r = cm.access({'action':'run', - 'automation':'script', - 'tags': 'run,docker,container', +r = cm.access({'action':'run', + 'automation':'script', + 'tags': 'run,docker,container', 'add_deps_recursive': { 'compiler': {'tags': "gcc"} - }, + }, 'image_name':'cm-script-app-image-classification-onnx-py', 'env': { 'CM_DOCKER_RUN_SCRIPT_TAGS': 'app,image-classification,onnx,python', 'CM_MLOPS_REPO': 'mlcommons@cm4mlops', 'CM_MLOPS_REPO_BRANCH': 'mlperf-inference', 'CM_DOCKER_IMAGE_BASE': 'ubuntu:22.04' - }, + }, 'quiet': 'yes' }) diff --git a/tests/tutorials/test_tutorial_tvm.py b/tests/tutorials/test_tutorial_tvm.py index 0d02b87a55..f3857b8fad 100644 --- a/tests/tutorials/test_tutorial_tvm.py +++ b/tests/tutorials/test_tutorial_tvm.py @@ -21,4 +21,3 @@ 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', 'device': 'cpu', 'scenario': 'Offline', \ 'test_query_count': '500', 'clean': 'true', 'quiet': 'yes'}) checks.check_return(r) - diff --git a/tests/tutorials/test_tutorial_tvm_pip_ge.py b/tests/tutorials/test_tutorial_tvm_pip_ge.py index 47180fa774..adb7f960e8 100644 --- a/tests/tutorials/test_tutorial_tvm_pip_ge.py +++ b/tests/tutorials/test_tutorial_tvm_pip_ge.py @@ -18,4 +18,3 @@ 'submitter': 'Community', 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', \ 'device': 'cpu', 'scenario': 'Offline', 'test_query_count': '500', 'clean': 'true', 'quiet': 'yes'}) checks.check_return(r) - diff --git a/tests/tutorials/test_tutorial_tvm_pip_vm.py b/tests/tutorials/test_tutorial_tvm_pip_vm.py index b9e47152af..208185fda7 100644 --- a/tests/tutorials/test_tutorial_tvm_pip_vm.py +++ b/tests/tutorials/test_tutorial_tvm_pip_vm.py @@ -20,4 +20,3 @@ 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', 'device': 'cpu', 'scenario': 'Offline', \ 'test_query_count': '500', 'clean': 'true', 'quiet': 'yes'}) checks.check_return(r) - From 04fee4a6d946bf15bc86c0b01024a490c29d84a9 Mon Sep 17 00:00:00 2001 From: mlcommons-bot Date: Sat, 23 Nov 2024 16:28:31 +0000 Subject: [PATCH 2/3] [Automated Commit] Format Codebase --- automation/script/module.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/automation/script/module.py b/automation/script/module.py index 4b40d291a8..ab768f1ff4 100644 --- a/automation/script/module.py +++ b/automation/script/module.py @@ -1755,7 +1755,6 @@ def _run(self, i): x for x in cached_tags if not x.startswith('version-')] cached_tags.append('version-' + r['version']) - if len(r.get('add_extra_cache_tags', [])) > 0: for t in r['add_extra_cache_tags']: if t not in cached_tags: @@ -2131,7 +2130,6 @@ def _run(self, i): # Check if save json to file if repro_prefix != '': - with open(repro_prefix + '-README-cm.md', 'w', encoding='utf-8') as f: f.write(readme) @@ -5429,7 +5427,6 @@ def prepare_and_run_script_with_postprocessing(i, postprocess="postprocess"): logging.info(r['string']) logging.info("") - # Check where to report errors and failures repo_to_report = run_state.get( 'script_entry_repo_to_report_errors', '') From d8c521789c92fb409d7c5b65cdd309efcc492112 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sat, 23 Nov 2024 16:44:00 +0000 Subject: [PATCH 3/3] Update VERSION