Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Revert "Sync Mlperf inference" #633

Merged
merged 1 commit into from
Nov 30, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 4 additions & 10 deletions .github/workflows/format.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,22 +26,16 @@ jobs:
python3 -m pip install autopep8
for FILE in $(git diff --name-only ${{ github.event.before }} | grep -E '.*\.py$')
do
# Check if the file still exists in the working tree
if [ -f "$FILE" ]; then
autopep8 --in-place -a "$FILE"
git add "$FILE"
fi
autopep8 --in-place -a $FILE
git add $FILE
done

- name: Format modified C++ files
run: |
for FILE in $(git diff --name-only ${{ github.event.before }} | grep -E '.*\.(cc|cpp|h|hpp)$')
do
# Check if the file still exists in the working tree
if [ -f "$FILE" ]; then
clang-format -i -style=file $FILE
git add $FILE
fi
clang-format -i -style=file $FILE
git add $FILE
done

- name: Commit and create PR
Expand Down
1 change: 0 additions & 1 deletion automation/script/README-extra.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@

# CM "script" automation


<details>
<summary>Click here to see the table of contents.</summary>

Expand Down
85 changes: 3 additions & 82 deletions automation/script/module.py
Original file line number Diff line number Diff line change
Expand Up @@ -1311,7 +1311,7 @@ def _run(self, i):

r = self._call_run_deps(prehook_deps, self.local_env_keys, local_env_keys_from_meta, env, state, const, const_state, add_deps_recursive,
recursion_spaces + extra_recursion_spaces,
remembered_selections, variation_tags_string, True, debug_script_tags, verbose, show_time, extra_recursion_spaces, run_state)
remembered_selections, variation_tags_string, found_cached, debug_script_tags, verbose, show_time, extra_recursion_spaces, run_state)
if r['return'] > 0:
return r

Expand Down Expand Up @@ -1372,7 +1372,7 @@ def _run(self, i):

r = self._call_run_deps(posthook_deps, self.local_env_keys, clean_env_keys_post_deps, env, state, const, const_state, add_deps_recursive,
recursion_spaces + extra_recursion_spaces,
remembered_selections, variation_tags_string, True, debug_script_tags, verbose, show_time, extra_recursion_spaces, run_state)
remembered_selections, variation_tags_string, found_cached, debug_script_tags, verbose, show_time, extra_recursion_spaces, run_state)
if r['return'] > 0:
return r

Expand All @@ -1383,7 +1383,7 @@ def _run(self, i):
# Check chain of post dependencies on other CM scripts
r = self._call_run_deps(post_deps, self.local_env_keys, clean_env_keys_post_deps, env, state, const, const_state, add_deps_recursive,
recursion_spaces + extra_recursion_spaces,
remembered_selections, variation_tags_string, True, debug_script_tags, verbose, show_time, extra_recursion_spaces, run_state)
remembered_selections, variation_tags_string, found_cached, debug_script_tags, verbose, show_time, extra_recursion_spaces, run_state)
if r['return'] > 0:
return r

Expand Down Expand Up @@ -1605,82 +1605,6 @@ def _run(self, i):
if r['return'] > 0:
return r

# Prepare common input to prepare and run script
run_script_input = {
'path': path,
'bat_ext': bat_ext,
'os_info': os_info,
'const': const,
'state': state,
'const_state': const_state,
'reuse_cached': reuse_cached,
'recursion': recursion,
'recursion_spaces': recursion_spaces,
'remembered_selections': remembered_selections,
'tmp_file_run_state': self.tmp_file_run_state,
'tmp_file_run_env': self.tmp_file_run_env,
'tmp_file_state': self.tmp_file_state,
'tmp_file_run': self.tmp_file_run,
'local_env_keys': self.local_env_keys,
'local_env_keys_from_meta': local_env_keys_from_meta,
'posthook_deps': posthook_deps,
'add_deps_recursive': add_deps_recursive,
'remembered_selections': remembered_selections,
'found_script_tags': found_script_tags,
'variation_tags_string': variation_tags_string,
'found_cached': False,
'debug_script_tags': debug_script_tags,
'verbose': verbose,
'meta': meta,
'self': self
}

# Check if pre-process and detect
if str(meta.get('predeps', 'True')).lower() not in ["0", "false", "no"] and os.path.isfile(
path_to_customize_py): # possible duplicate execution - needs fix
r = utils.load_python_module(
{'path': path, 'name': 'customize'})
if r['return'] > 0:
return r

customize_code = r['code']

customize_common_input = {
'input': i,
'automation': self,
'artifact': script_artifact,
'customize': script_artifact.meta.get('customize', {}),
'os_info': os_info,
'recursion_spaces': recursion_spaces,
'script_tags': script_tags,
'variation_tags': variation_tags
}
run_script_input['customize_code'] = customize_code
run_script_input['customize_common_input'] = customize_common_input

if repro_prefix != '':
run_script_input['repro_prefix'] = repro_prefix
if ignore_script_error:
run_script_input['ignore_script_error'] = True
if 'predeps' in dir(customize_code) and not fake_run:

logging.debug(
recursion_spaces +
' - Running preprocess ...')

run_script_input['run_state'] = run_state

ii = copy.deepcopy(customize_common_input)
ii['env'] = env
ii['state'] = state
ii['meta'] = meta
# may need to detect versions in multiple paths
ii['run_script_input'] = run_script_input

r = customize_code.predeps(ii)
if r['return'] > 0:
return r

# Check chain of dependencies on other CM scripts
if len(deps) > 0:
logging.debug(recursion_spaces +
Expand All @@ -1702,8 +1626,6 @@ def _run(self, i):
# Clean some output files
clean_tmp_files(clean_files, recursion_spaces)

# Repeated code
'''
# Prepare common input to prepare and run script
run_script_input = {
'path': path,
Expand Down Expand Up @@ -1733,7 +1655,6 @@ def _run(self, i):
'meta': meta,
'self': self
}
'''
if os.path.isfile(
path_to_customize_py): # possible duplicate execution - needs fix
r = utils.load_python_module(
Expand Down
32 changes: 32 additions & 0 deletions challenge/add-derived-metrics-to-mlperf-inference/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
### Challenge

Check past MLPerf inference results in [this MLCommons repository](https://github.com/mlcommons/cm4mlperf-results)
and add derived metrics such as result/No of cores, power efficiency, device cost, operational costs, etc.

Add clock speed as a third dimension to graphs and improve Bar graph visualization.

Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md)
to run reference implementations of MLPerf inference benchmarks
using the CM automation language and use them as a base for your developments.

Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn more about our open-source project and long-term vision.


### Prizes

* *All contributors will receive 1 point for submitting valid results for 1 complete benchmark on one system.*
* *All contributors will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).*


### Organizers

* [MLCommons](https://cKnowledge.org/mlcommons-taskforce)
* [cTuning.org](https://www.linkedin.com/company/ctuning-foundation)
* [cKnowledge.org](https://www.linkedin.com/company/cknowledge)

### Results

All accepted results will be publicly available in the CM format with derived metrics
in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results),
in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments)
and at official [MLCommons website](https://mlcommons.org).
22 changes: 22 additions & 0 deletions challenge/add-derived-metrics-to-mlperf-inference/_cm.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
{
"alias": "add-derived-metrics-to-mlperf-inference",
"automation_alias": "challenge",
"automation_uid": "3d84abd768f34e08",
"date_close_extension": true,
"date_open": "20240204",
"points": 2,
"tags": [
"modularize",
"optimize",
"reproduce",
"replicate",
"benchmark",
"automate",
"derived-metrics",
"mlperf-inference",
"mlperf-inference-derived-metrics"
],
"title": "Add derived metrics to MLPerf inference benchmarks (power efficiency, results / No of cores, costs, etc)",
"trophies": true,
"uid": "c65b56d7770946ee"
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
20240220:
* A prototype of a GUI to generate CM commands to run MLPerf inference benchmarks is ready: [link](https://access.cknowledge.org/playground/?action=howtorun&bench_uid=39877bb63fb54725)
* A prototype of the infrastructure to reproduce MLPerf inference benchmark results is ready: [link](https://access.cknowledge.org/playground/?action=reproduce)
* On-going efforts: https://github.com/mlcommons/ck/issues/1052
21 changes: 21 additions & 0 deletions challenge/automate-mlperf-inference-v3.1-and-v4.0-2024/_cm.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
alias: automate-mlperf-inference-v3.1-and-v4.0-2024
uid: f89f152fc2614240

automation_alias: challenge
automation_uid: 3d84abd768f34e08

title: Add MLCommons CM workflows and unifed interface to automate MLPerf inference v3.1 and v4.0 benchmarks (Intel, Nvidia, Qualcomm, Arm64, TPU ...)

date_open: '20231215'
date_close: '20240315'

hot: true

tags:
- automate
- mlperf-inference-v3.1-and-v4.0
- 2024

experiments:
- tags: mlperf-inference,v3.1
- tags: mlperf-inference,v4.0
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
This challenge is under preparation. You can read about the motivation behind this challenge in our [invited talk at MLPerf-Bench @ HPCA'24](https://doi.org/10.5281/zenodo.10786893).

We plan to extend [MLCommons CM framework](https://github.com/mlcommons/ck)
to automatically compose high-performance and cost-efficient AI systems
based on MLPerf inference v4.0 results and [CM automation recipes](https://access.cknowledge.org/playground/?action=scripts).

* A prototype of a GUI to generate CM commands to run MLPerf inference benchmarks is ready: [link](https://access.cknowledge.org/playground/?action=howtorun&bench_uid=39877bb63fb54725)
* A prototype of the infrastructure to reproduce MLPerf inference benchmark results is ready: [link](https://access.cknowledge.org/playground/?action=reproduce)

Contact the [MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) for more details.
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
alias: compose-high-performance-and-cost-efficient-ai-systems-based-on-mlperf-4.0-2024
uid: 7c983102d89e4869

automation_alias: challenge
automation_uid: 3d84abd768f34e08

title: "Compose high-performance and cost-efficint AI systems using MLCommons' Collective Mind and MLPerf inference"

date_open: '20240101'

tags:
- compose
- ai
- systems
- mlperf-inference-v4.0
- cm
- mlcommons-cm
- mlperf
- v4.0
- performance
- energy
- cost

experiments:
- tags: mlperf-inference,v4.0
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
### Challenge

Connect CM workflows to run MLPerf inference benchmarks with [OpenBenchmarking.org](https://openbenchmarking.org).

Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md)
to run reference implementations of MLPerf inference benchmarks
using the CM automation language and use them as a base for your developments.

Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn more about our open-source project and long-term vision.


### Prizes

* *All contributors will receive 1 point for submitting valid results for 1 complete benchmark on one system.*
* *All contributors will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).*



### Organizers

* Michael Larabel
* Grigori Fursin
* [MLCommons](https://cKnowledge.org/mlcommons-taskforce)
* [cTuning.org](https://www.linkedin.com/company/ctuning-foundation)
* [cKnowledge.org](https://www.linkedin.com/company/cknowledge)

### Results

Results will be available at [OpenBenchmark.org](https://openbenchmarking.org)
and [MLCommons CK playgronud](https://access.cknowledge.org/playground/?action=experiments).
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
{
"alias": "connect-mlperf-inference-v3.1-with-openbenchmarking",
"automation_alias": "challenge",
"automation_uid": "3d84abd768f34e08",
"date_open": "20240101",
"date_close_extension": true,
"points": 2,
"tags": [
"modularize",
"optimize",
"reproduce",
"replicate",
"benchmark",
"automate",
"openbenchmarking",
"mlperf-inference",
"mlperf-inference-openbenchmarking"
],
"title": "Run MLPerf inference benchmarks using CM via OpenBenchmarking.org",
"trophies": true,
"uid": "534592626eb44efe"
}
23 changes: 23 additions & 0 deletions challenge/connect-mlperf-with-medperf/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
### Challenge

Evaluate models from [MLCommons MedPerf platform](https://www.medperf.org) in terms of latency, throughput, power consumption and other metrics
using MLPerf loadgen and MLCommons CM automation language.

See the [Nature 2023 article about MedPerf](https://www.nature.com/articles/s42256-023-00652-2)
and [ACM REP'23 keynote about CM](https://doi.org/10.5281/zenodo.8105339) to learn more about these projects.

Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md)
to run reference implementations of MLPerf inference benchmarks
using the CM automation language and use them as a base for your developments.


### Prizes

* *All contributors will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).*


### Organizers

* [cKnowledge.org](https://www.linkedin.com/company/cknowledge)
* [cTuning.org](https://www.linkedin.com/company/ctuning-foundation)
* [MLCommons](https://cKnowledge.org/mlcommons-taskforce)
26 changes: 26 additions & 0 deletions challenge/connect-mlperf-with-medperf/_cm.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
{
"alias": "connect-mlperf-with-medperf",
"automation_alias": "challenge",
"automation_uid": "3d84abd768f34e08",
"date_close_extension": true,
"date_open": "20240105",
"points": 2,
"tags": [
"modularize",
"optimize",
"reproduce",
"replicate",
"benchmark",
"automate",
"medperf",
"mlperf-inference",
"mlperf-inference-medperf",
"mlperf-inference-medperf",
"mlperf-inference-medperf-v3.1",
"mlperf-inference-medperf-v3.1-2023",
"v3.1"
],
"title": "Connect MedPerf with MLPerf and CM",
"trophies": true,
"uid": "c26d1fbf89164728"
}
Loading
Loading