From 0e1717e55242f6c08efc51eb5cb7ecc39445a5a2 Mon Sep 17 00:00:00 2001 From: Maksym H <1177472+mordamax@users.noreply.github.com> Date: Tue, 8 Oct 2024 15:20:19 +0100 Subject: [PATCH 01/10] [DNM] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b8ddf8427c9f..1952c3f29d1e 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ - +1
![SDK Logo](./docs/images/Polkadot_Logo_Horizontal_Pink_White.png#gh-dark-mode-only) From bcf5064f5fc68d4d9db6dd86f9852732c5d27999 Mon Sep 17 00:00:00 2001 From: GitHub Action Date: Tue, 8 Oct 2024 14:23:35 +0000 Subject: [PATCH 02/10] Update from mordamax running command 'prdoc --audience node_dev --bump patch' --- prdoc/pr_5977.prdoc | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 prdoc/pr_5977.prdoc diff --git a/prdoc/pr_5977.prdoc b/prdoc/pr_5977.prdoc new file mode 100644 index 000000000000..f22027ac31d7 --- /dev/null +++ b/prdoc/pr_5977.prdoc @@ -0,0 +1,6 @@ +title: '[DNM] testing /cmd' +doc: +- audience: + - Node Dev + description: null +crates: [] From 75f511e810ead8a94dd01805a1d483ca6f2cce1e Mon Sep 17 00:00:00 2001 From: Maksym H Date: Tue, 8 Oct 2024 22:42:03 +0100 Subject: [PATCH 03/10] flip continue-on-fail to fail-fast, save output to file + artifacts --- .github/scripts/cmd/cmd.py | 47 ++++++++++++++++++--------- .github/scripts/cmd/test_cmd.py | 20 ++++++------ .github/workflows/cmd.yml | 33 +++++++++++++++++-- docs/contributor/commands-readme.md | 5 --- docs/contributor/weight-generation.md | 34 ++++++++++--------- 5 files changed, 89 insertions(+), 50 deletions(-) diff --git a/.github/scripts/cmd/cmd.py b/.github/scripts/cmd/cmd.py index 01f36ea375c5..6a624bf4237b 100755 --- a/.github/scripts/cmd/cmd.py +++ b/.github/scripts/cmd/cmd.py @@ -15,12 +15,21 @@ runtimeNames = list(map(lambda x: x['name'], runtimesMatrix)) common_args = { - '--continue-on-fail': {"action": "store_true", "help": "Won't exit(1) on failed command and continue with next steps. "}, '--quiet': {"action": "store_true", "help": "Won't print start/end/failed messages in PR"}, '--clean': {"action": "store_true", "help": "Clean up the previous bot's & author's comments in PR"}, '--image': {"help": "Override docker image '--image docker.io/paritytech/ci-unified:latest'"}, } +def print_and_log(message, output_file='/tmp/cmd/command_output.log'): + print(message) + with open(output_file, 'a') as f: + f.write(message + '\n') + +def setup_logging(): + if not os.path.exists('/tmp/cmd'): + os.makedirs('/tmp/cmd') + open('/tmp/cmd/command_output.log', 'w') + parser = argparse.ArgumentParser(prog="/cmd ", description='A command runner for polkadot-sdk repo', add_help=False) parser.add_argument('--help', action=_HelpAction, help='help for help if you need some help') # help for help for arg, config in common_args.items(): @@ -28,6 +37,8 @@ subparsers = parser.add_subparsers(help='a command to run', dest='command') +setup_logging() + """ BENCH """ @@ -39,8 +50,8 @@ Runs benchmarks for pallet_balances and pallet_multisig for all runtimes which have these pallets. **--quiet** makes it to output nothing to PR but reactions %(prog)s --pallet pallet_balances pallet_xcm_benchmarks::generic --quiet - Runs bench for all pallets for westend runtime and continues even if some benchmarks fail - %(prog)s --runtime westend --continue-on-fail + Runs bench for all pallets for westend runtime and fails fast on first failed benchmark + %(prog)s --runtime westend --fail-fast Does not output anything and cleans up the previous bot's & author command triggering comments in PR %(prog)s --runtime westend rococo --pallet pallet_balances pallet_multisig --quiet --clean @@ -53,6 +64,7 @@ parser_bench.add_argument('--runtime', help='Runtime(s) space separated', choices=runtimeNames, nargs='*', default=runtimeNames) parser_bench.add_argument('--pallet', help='Pallet(s) space separated', nargs='*', default=[]) +parser_bench.add_argument('--fail-fast', help='Fail fast on first failed benchmark', action='store_true') """ FMT @@ -156,7 +168,9 @@ def main(): manifest_path = os.popen(search_manifest_path).read() if not manifest_path: print(f'-- pallet {pallet} not found in dev runtime') - exit(1) + if args.fail_fast: + print_and_log(f'Error: {pallet} not found in dev runtime') + sys.exit(1) package_dir = os.path.dirname(manifest_path) print(f'-- package_dir: {package_dir}') print(f'-- manifest_path: {manifest_path}') @@ -186,8 +200,9 @@ def main(): f"{config['bench_flags']}" print(f'-- Running: {cmd} \n') status = os.system(cmd) - if status != 0 and not args.continue_on_fail: - print(f'Failed to benchmark {pallet} in {runtime}') + + if status != 0 and args.fail_fast: + print_and_log(f'❌ Failed to benchmark {pallet} in {runtime}') sys.exit(1) # Otherwise collect failed benchmarks and print them at the end @@ -198,14 +213,14 @@ def main(): successful_benchmarks[f'{runtime}'] = successful_benchmarks.get(f'{runtime}', []) + [pallet] if failed_benchmarks: - print('❌ Failed benchmarks of runtimes/pallets:') + print_and_log('❌ Failed benchmarks of runtimes/pallets:') for runtime, pallets in failed_benchmarks.items(): - print(f'-- {runtime}: {pallets}') + print_and_log(f'-- {runtime}: {pallets}') if successful_benchmarks: - print('✅ Successful benchmarks of runtimes/pallets:') + print_and_log('✅ Successful benchmarks of runtimes/pallets:') for runtime, pallets in successful_benchmarks.items(): - print(f'-- {runtime}: {pallets}') + print_and_log(f'-- {runtime}: {pallets}') elif args.command == 'fmt': command = f"cargo +nightly fmt" @@ -213,8 +228,8 @@ def main(): nightly_status = os.system(f'{command}') taplo_status = os.system('taplo format --config .config/taplo.toml') - if (nightly_status != 0 or taplo_status != 0) and not args.continue_on_fail: - print('❌ Failed to format code') + if (nightly_status != 0 or taplo_status != 0): + print_and_log('❌ Failed to format code') sys.exit(1) elif args.command == 'update-ui': @@ -222,15 +237,15 @@ def main(): print(f'Updating ui with `{command}`') status = os.system(f'{command}') - if status != 0 and not args.continue_on_fail: - print('❌ Failed to format code') + if status != 0: + print_and_log('❌ Failed to update ui') sys.exit(1) elif args.command == 'prdoc': # Call the main function from ./github/scripts/generate-prdoc.py module exit_code = generate_prdoc.main(args) - if exit_code != 0 and not args.continue_on_fail: - print('❌ Failed to generate prdoc') + if exit_code != 0: + print_and_log('❌ Failed to generate prdoc') sys.exit(exit_code) print('🚀 Done') diff --git a/.github/scripts/cmd/test_cmd.py b/.github/scripts/cmd/test_cmd.py index 0316c7ff1bb4..faad3f261b9a 100644 --- a/.github/scripts/cmd/test_cmd.py +++ b/.github/scripts/cmd/test_cmd.py @@ -96,7 +96,7 @@ def test_bench_command_normal_execution_all_runtimes(self): command='bench', runtime=list(map(lambda x: x['name'], mock_runtimes_matrix)), pallet=['pallet_balances'], - continue_on_fail=False, + fail_fast=True, quiet=False, clean=False, image=None @@ -153,7 +153,7 @@ def test_bench_command_normal_execution(self): command='bench', runtime=['westend'], pallet=['pallet_balances', 'pallet_staking'], - continue_on_fail=False, + fail_fast=True, quiet=False, clean=False, image=None @@ -196,7 +196,7 @@ def test_bench_command_normal_execution_xcm(self): command='bench', runtime=['westend'], pallet=['pallet_xcm_benchmarks::generic'], - continue_on_fail=False, + fail_fast=True, quiet=False, clean=False, image=None @@ -232,7 +232,7 @@ def test_bench_command_two_runtimes_two_pallets(self): command='bench', runtime=['westend', 'rococo'], pallet=['pallet_balances', 'pallet_staking'], - continue_on_fail=False, + fail_fast=True, quiet=False, clean=False, image=None @@ -290,7 +290,7 @@ def test_bench_command_one_dev_runtime(self): command='bench', runtime=['dev'], pallet=['pallet_balances'], - continue_on_fail=False, + fail_fast=True, quiet=False, clean=False, image=None @@ -327,7 +327,7 @@ def test_bench_command_one_cumulus_runtime(self): command='bench', runtime=['asset-hub-westend'], pallet=['pallet_assets'], - continue_on_fail=False, + fail_fast=True, quiet=False, clean=False, image=None @@ -362,7 +362,7 @@ def test_bench_command_one_cumulus_runtime_xcm(self): command='bench', runtime=['asset-hub-westend'], pallet=['pallet_xcm_benchmarks::generic', 'pallet_assets'], - continue_on_fail=False, + fail_fast=True, quiet=False, clean=False, image=None @@ -400,7 +400,7 @@ def test_bench_command_one_cumulus_runtime_xcm(self): self.mock_system.assert_has_calls(expected_calls, any_order=True) - @patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='fmt', continue_on_fail=False), [])) + @patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='fmt'), [])) @patch('os.system', return_value=0) def test_fmt_command(self, mock_system, mock_parse_args): with patch('sys.exit') as mock_exit: @@ -410,7 +410,7 @@ def test_fmt_command(self, mock_system, mock_parse_args): mock_system.assert_any_call('cargo +nightly fmt') mock_system.assert_any_call('taplo format --config .config/taplo.toml') - @patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='update-ui', continue_on_fail=False), [])) + @patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='update-ui'), [])) @patch('os.system', return_value=0) def test_update_ui_command(self, mock_system, mock_parse_args): with patch('sys.exit') as mock_exit: @@ -419,7 +419,7 @@ def test_update_ui_command(self, mock_system, mock_parse_args): mock_exit.assert_not_called() mock_system.assert_called_with('sh ./scripts/update-ui-tests.sh') - @patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='prdoc', continue_on_fail=False), [])) + @patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='prdoc'), [])) @patch('os.system', return_value=0) def test_prdoc_command(self, mock_system, mock_parse_args): with patch('sys.exit') as mock_exit: diff --git a/.github/workflows/cmd.yml b/.github/workflows/cmd.yml index f8bc7cb5b606..00fec329e49f 100644 --- a/.github/workflows/cmd.yml +++ b/.github/workflows/cmd.yml @@ -368,6 +368,19 @@ jobs: git status git diff + if [ -f /tmp/cmd/command_output.log ]; then + CMD_OUTPUT=$(cat /tmp/cmd/command_output.log) + echo "cmd_output=$CMD_OUTPUT" >> $GITHUB_OUTPUT + echo "$CMD_OUTPUT" >> $GITHUB_STEP_SUMMARY + fi + + - name: Upload command output + if: ${{ always() }} + uses: actions/upload-artifact@v4 + with: + name: command-output + path: /tmp/cmd/command_output.log + - name: Commit changes run: | if [ -n "$(git status --porcelain)" ]; then @@ -413,35 +426,49 @@ jobs: uses: actions/github-script@v7 env: SUBWEIGHT: "${{ steps.subweight.outputs.result }}" + CMD_OUTPUT: "${{ steps.cmd.outputs.cmd_output }}" with: github-token: ${{ secrets.GITHUB_TOKEN }} script: | let runUrl = ${{ steps.build-link.outputs.run_url }} let subweight = process.env.SUBWEIGHT; + let cmdOutput = process.env.CMD_OUTPUT; - let subweightCollapsed = subweight + let subweightCollapsed = subweight.trim() !== '' ? `
\n\nSubweight results:\n\n${subweight}\n\n
` : ''; + let cmdOutputCollapsed = cmdOutput.trim() !== '' + ? `
\n\nCommand output:\n\n${cmdOutput}\n\n
` + : ''; + github.rest.issues.createComment({ issue_number: context.issue.number, owner: context.repo.owner, repo: context.repo.repo, - body: `Command "${{ steps.get-pr-comment.outputs.group2 }}" has finished ✅ [See logs here](${runUrl})${subweightCollapsed}` + body: `Command "${{ steps.get-pr-comment.outputs.group2 }}" has finished ✅ [See logs here](${runUrl})${subweightCollapsed}${cmdOutputCollapsed}` }) - name: Comment PR (Failure) if: ${{ failure() && !contains(github.event.comment.body, '--quiet') }} uses: actions/github-script@v7 + env: + CMD_OUTPUT: "${{ steps.cmd.outputs.cmd_output }}" with: github-token: ${{ secrets.GITHUB_TOKEN }} script: | let jobUrl = ${{ steps.build-link.outputs.job_url }} + let cmdOutput = process.env.CMD_OUTPUT; + + let cmdOutputCollapsed = cmdOutput.trim() !== '' + ? `
\n\nCommand output:\n\n${cmdOutput}\n\n
` + : ''; + github.rest.issues.createComment({ issue_number: context.issue.number, owner: context.repo.owner, repo: context.repo.repo, - body: `Command "${{ steps.get-pr-comment.outputs.group2 }}" has failed ❌! [See logs here](${jobUrl})` + body: `Command "${{ steps.get-pr-comment.outputs.group2 }}" has failed ❌! [See logs here](${jobUrl})${cmdOutputCollapsed}` }) - name: Add 😕 reaction on failure diff --git a/docs/contributor/commands-readme.md b/docs/contributor/commands-readme.md index 3a0fadc3bb25..52c554cc7098 100644 --- a/docs/contributor/commands-readme.md +++ b/docs/contributor/commands-readme.md @@ -24,11 +24,6 @@ By default, the Start and End/Failure of the command will be commented with the If you want to avoid, use this flag. Go to [Action Tab](https://github.com/paritytech/polkadot-sdk/actions/workflows/cmd.yml) to see the pipeline status. -2.`--continue-on-fail` to continue running the command even if something inside a command -(like specific pallet weight generation) are failed. -Basically avoids interruption in the middle with `exit 1` -The pipeline logs will include what is failed (like which runtimes/pallets), then you can re-run them separately or not. - 3.`--clean` to clean up all yours and bot's comments in PR relevant to `/cmd` commands. If you run too many commands, or they keep failing, and you're rerunning them again, it's handy to add this flag to keep a PR clean. diff --git a/docs/contributor/weight-generation.md b/docs/contributor/weight-generation.md index 77a570c94530..d5b407d5562d 100644 --- a/docs/contributor/weight-generation.md +++ b/docs/contributor/weight-generation.md @@ -19,51 +19,53 @@ In a PR run the actions through comment: To regenerate all weights (however it will take long, so don't do it unless you really need it), run the following command: + ```sh /cmd bench ``` To generate weights for all pallets in a particular runtime(s), run the following command: + ```sh /cmd bench --runtime kusama polkadot ``` For Substrate pallets (supports sub-modules too): + ```sh /cmd bench --runtime dev --pallet pallet_asset_conversion_ops ``` > **📝 Note**: The action is not being run right-away, it will be queued and run in the next available runner. -So might be quick, but might also take up to 10 mins (That's in control of Github). -Once the action is run, you'll see reaction 👀 on original comment, and if you didn't pass `--quiet` - -it will also send a link to a pipeline when started, and link to whole workflow when finished. +> So might be quick, but might also take up to 10 mins (That's in control of Github). +> Once the action is run, you'll see reaction 👀 on original comment, and if you didn't pass `--quiet` - +> it will also send a link to a pipeline when started, and link to whole workflow when finished. ---- +> **📝 Note**: It will try keep benchmarking even if some pallets failed, with the result of failed/successful pallets. +> +> If you want to fail fast on first failed benchmark, add `--fail-fast` flag to the command. -> **💡Hint #1** : if you run all runtimes or all pallets, it might be that some pallet in the middle is failed -to generate weights, thus it stops (fails) the whole pipeline. -> If you want, you can make it to continue running, even if some pallets are failed, add `--continue-on-fail` -flag to the command. The report will include which runtimes/pallets have failed, then you can re-run -them separately after all is done. +--- This way it runs all possible runtimes for the specified pallets, if it finds them in the runtime + ```sh /cmd bench --pallet pallet_balances pallet_xcm_benchmarks::generic pallet_xcm_benchmarks::fungible ``` If you want to run all specific pallet(s) for specific runtime(s), you can do it like this: + ```sh /cmd bench --runtime bridge-hub-polkadot --pallet pallet_xcm_benchmarks::generic pallet_xcm_benchmarks::fungible ``` - -> **💡Hint #2** : Sometimes when you run too many commands, or they keep failing and you're rerunning them again, -it's handy to add `--clean` flag to the command. This will clean up all yours and bot's comments in PR relevant to -/cmd commands. +> **💡Hint #1** : Sometimes when you run too many commands, or they keep failing and you're rerunning them again, +> it's handy to add `--clean` flag to the command. This will clean up all yours and bot's comments in PR relevant to +> /cmd commands. ```sh -/cmd bench --runtime kusama polkadot --pallet=pallet_balances --clean --continue-on-fail +/cmd bench --runtime kusama polkadot --pallet=pallet_balances --clean ``` -> **💡Hint #3** : If you have questions or need help, feel free to tag @paritytech/opstooling (in github comments) -or ping in [matrix](https://matrix.to/#/#command-bot:parity.io) channel. +> **💡Hint #2** : If you have questions or need help, feel free to tag @paritytech/opstooling (in github comments) +> or ping in [matrix](https://matrix.to/#/#command-bot:parity.io) channel. From a4d56188742d689d8fc2f79107fe474816380348 Mon Sep 17 00:00:00 2001 From: Maksym H Date: Tue, 8 Oct 2024 22:45:45 +0100 Subject: [PATCH 04/10] Update README.md --- README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/README.md b/README.md index 1952c3f29d1e..8016b6b37301 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,3 @@ -1
![SDK Logo](./docs/images/Polkadot_Logo_Horizontal_Pink_White.png#gh-dark-mode-only) From 23c65529e45b2b7676050b583fcaa6c4aee5891c Mon Sep 17 00:00:00 2001 From: Maksym H Date: Tue, 8 Oct 2024 22:46:24 +0100 Subject: [PATCH 05/10] Delete pr_5977.prdoc --- prdoc/pr_5977.prdoc | 6 ------ 1 file changed, 6 deletions(-) delete mode 100644 prdoc/pr_5977.prdoc diff --git a/prdoc/pr_5977.prdoc b/prdoc/pr_5977.prdoc deleted file mode 100644 index f22027ac31d7..000000000000 --- a/prdoc/pr_5977.prdoc +++ /dev/null @@ -1,6 +0,0 @@ -title: '[DNM] testing /cmd' -doc: -- audience: - - Node Dev - description: null -crates: [] From 6c5cb681524ef396124530dca7d136066a5ad26e Mon Sep 17 00:00:00 2001 From: Maksym H Date: Wed, 9 Oct 2024 12:44:35 +0100 Subject: [PATCH 06/10] add timeout to cmd --- .github/workflows/cmd.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/cmd.yml b/.github/workflows/cmd.yml index 00fec329e49f..d87aaa55137a 100644 --- a/.github/workflows/cmd.yml +++ b/.github/workflows/cmd.yml @@ -279,6 +279,7 @@ jobs: echo "The repository is ${{ steps.get-pr.outputs.repo }}" cmd: + timeout-minutes: 1800 # 30 hours as it could take a long time to run all the runtimes/pallets needs: [set-image, get-pr-branch] env: JOB_NAME: "cmd" From c1169b06ac051edaaeee909dbe5182a522eaf572 Mon Sep 17 00:00:00 2001 From: Maksym H Date: Wed, 9 Oct 2024 14:47:16 +0100 Subject: [PATCH 07/10] fix examples tasks name by convention --- substrate/bin/node/runtime/src/lib.rs | 2 +- substrate/frame/examples/tasks/src/mock.rs | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index c81921f844bd..ececf0d87b2d 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -2643,7 +2643,7 @@ mod benches { [pallet_contracts, Contracts] [pallet_revive, Revive] [pallet_core_fellowship, CoreFellowship] - [tasks_example, TasksExample] + [pallet_example_tasks, TasksExample] [pallet_democracy, Democracy] [pallet_asset_conversion, AssetConversion] [pallet_election_provider_multi_phase, ElectionProviderMultiPhase] diff --git a/substrate/frame/examples/tasks/src/mock.rs b/substrate/frame/examples/tasks/src/mock.rs index 33912bb5269c..9a1112946f69 100644 --- a/substrate/frame/examples/tasks/src/mock.rs +++ b/substrate/frame/examples/tasks/src/mock.rs @@ -18,7 +18,7 @@ //! Mock runtime for `tasks-example` tests. #![cfg(test)] -use crate::{self as tasks_example}; +use crate::{self as pallet_example_tasks}; use frame_support::derive_impl; use sp_runtime::testing::TestXt; @@ -29,7 +29,7 @@ type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( pub enum Runtime { System: frame_system, - TasksExample: tasks_example, + TasksExample: pallet_example_tasks, } ); @@ -48,7 +48,7 @@ where type Extrinsic = Extrinsic; } -impl tasks_example::Config for Runtime { +impl pallet_example_tasks::Config for Runtime { type RuntimeTask = RuntimeTask; type WeightInfo = (); } From a1cb7396e5348c501219d2a228b0ee34c69d0b94 Mon Sep 17 00:00:00 2001 From: Maksym H Date: Thu, 10 Oct 2024 15:45:33 +0100 Subject: [PATCH 08/10] log output multiline in workflow --- .github/workflows/cmd.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/workflows/cmd.yml b/.github/workflows/cmd.yml index c278ce62868d..99525807b15e 100644 --- a/.github/workflows/cmd.yml +++ b/.github/workflows/cmd.yml @@ -372,8 +372,12 @@ jobs: if [ -f /tmp/cmd/command_output.log ]; then CMD_OUTPUT=$(cat /tmp/cmd/command_output.log) - echo "cmd_output=$CMD_OUTPUT" >> $GITHUB_OUTPUT + # export to summary to display in the PR echo "$CMD_OUTPUT" >> $GITHUB_STEP_SUMMARY + # should be multiline, otherwise it captures the first line only + echo 'cmd_output<> $GITHUB_OUTPUT + echo "$CMD_OUTPUT" >> $GITHUB_OUTPUT + echo 'EOF' >> $GITHUB_OUTPUT fi - name: Upload command output @@ -435,6 +439,7 @@ jobs: let runUrl = ${{ steps.build-link.outputs.run_url }} let subweight = process.env.SUBWEIGHT; let cmdOutput = process.env.CMD_OUTPUT; + console.log(cmdOutput); let subweightCollapsed = subweight.trim() !== '' ? `
\n\nSubweight results:\n\n${subweight}\n\n
` From 6ba0a88571355ec439b1704dfb07ee55792d5d7e Mon Sep 17 00:00:00 2001 From: Maksym H Date: Thu, 10 Oct 2024 16:05:52 +0100 Subject: [PATCH 09/10] Update cmd.yml --- .github/workflows/cmd.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/cmd.yml b/.github/workflows/cmd.yml index 99525807b15e..1818cdc11bbc 100644 --- a/.github/workflows/cmd.yml +++ b/.github/workflows/cmd.yml @@ -279,7 +279,6 @@ jobs: echo "The repository is ${{ steps.get-pr.outputs.repo }}" cmd: - timeout-minutes: 1800 # 30 hours as it could take a long time to run all the runtimes/pallets needs: [set-image, get-pr-branch] env: JOB_NAME: "cmd" From da2c61ea490ef9935b853043a457f11fb6c868f7 Mon Sep 17 00:00:00 2001 From: Maksym H Date: Thu, 10 Oct 2024 22:43:13 +0100 Subject: [PATCH 10/10] Update weight-generation.md --- docs/contributor/weight-generation.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/contributor/weight-generation.md b/docs/contributor/weight-generation.md index d5b407d5562d..a22a55404a44 100644 --- a/docs/contributor/weight-generation.md +++ b/docs/contributor/weight-generation.md @@ -40,7 +40,7 @@ For Substrate pallets (supports sub-modules too): > So might be quick, but might also take up to 10 mins (That's in control of Github). > Once the action is run, you'll see reaction 👀 on original comment, and if you didn't pass `--quiet` - > it will also send a link to a pipeline when started, and link to whole workflow when finished. - +> > **📝 Note**: It will try keep benchmarking even if some pallets failed, with the result of failed/successful pallets. > > If you want to fail fast on first failed benchmark, add `--fail-fast` flag to the command.