From 3d640230b200090d9507b2cd5c9acdbf030cf898 Mon Sep 17 00:00:00 2001 From: Mak Date: Tue, 23 Jul 2024 13:20:43 +0100 Subject: [PATCH] fix outputs, xcm path, output help --- .github/scripts/cmd/_help.py | 19 +++++++ .github/scripts/cmd/cmd.py | 104 +++++++++++++++++++++++++++-------- .github/workflows/cmd.yml | 56 ++++++++++++++----- .gitignore | 1 + 4 files changed, 143 insertions(+), 37 deletions(-) create mode 100644 .github/scripts/cmd/_help.py diff --git a/.github/scripts/cmd/_help.py b/.github/scripts/cmd/_help.py new file mode 100644 index 0000000000..2f3b64200a --- /dev/null +++ b/.github/scripts/cmd/_help.py @@ -0,0 +1,19 @@ +import argparse + +class _HelpAction(argparse._HelpAction): + def __call__(self, parser, namespace, values, option_string=None): + parser.print_help() + + # retrieve subparsers from parser + subparsers_actions = [ + action for action in parser._actions + if isinstance(action, argparse._SubParsersAction)] + # there will probably only be one subparser_action, + # but better save than sorry + for subparsers_action in subparsers_actions: + # get all subparsers and print help + for choice, subparser in subparsers_action.choices.items(): + print("\n----> Command '{}'".format(choice)) + print(subparser.format_help()) + + parser.exit() diff --git a/.github/scripts/cmd/cmd.py b/.github/scripts/cmd/cmd.py index 18a6c23187..ad39eed83b 100755 --- a/.github/scripts/cmd/cmd.py +++ b/.github/scripts/cmd/cmd.py @@ -5,34 +5,62 @@ import json import argparse import tempfile +import _help + +_HelpAction = _help._HelpAction f = open('.github/workflows/runtimes-matrix.json', 'r') runtimesMatrix = json.load(f) runtimeNames = list(map(lambda x: x['name'], runtimesMatrix)) -parser = argparse.ArgumentParser(description='A command runner for polkadot runtimes repo') -parser.add_argument('--quiet', help="") -parser.add_argument('--clean', help="") +common_args = { + '--continue-on-fail': {"action": "store_true", "help": "Won't exit on failed command"}, + '--quiet': {"action": "store_true", "help": "Won't print start/end/failed messages in Pull Request"}, + '--clean': {"action": "store_true", "help": "Will clean up the previous bot's comments in Pull Request"}, +} + + +parser = argparse.ArgumentParser(prog="/cmd ", description='A command runner for polkadot runtimes repo', add_help=False) +parser.add_argument('--help', action=_HelpAction, help='help for help if you need some help') # help for help + subparsers = parser.add_subparsers(help='a command to run', dest='command') +""" + +BENCH + +""" parser_bench = subparsers.add_parser('bench', help='Runs benchmarks') + +for arg, config in common_args.items(): + parser_bench.add_argument(arg, **config) + parser_bench.add_argument('--runtime', help='Runtime(s) space separated', choices=runtimeNames, nargs='*') parser_bench.add_argument('--pallet', help='Pallet(s) space separated', nargs='*') +""" + +FMT + +""" parser_fmt = subparsers.add_parser('fmt', help='Formats code') +for arg, config in common_args.items(): + parser_fmt.add_argument(arg, **config) -args = parser.parse_args() +args, unknown = parser.parse_known_args() + +print(f'args: {args}') if args.command == 'bench': tempdir = tempfile.TemporaryDirectory() print(f'Created temp dir: {tempdir.name}') runtime_pallets_map = {} + failed_benchmarks = {} + successful_benchmarks = {} profile = "release" - os.system(f"cargo build -p chain-spec-generator --profile {profile} --features runtime-benchmarks") - # filter out only the specified runtime from runtimes if args.runtime: print(f'Provided runtimes: {args.runtime}') @@ -44,10 +72,12 @@ # loop over remaining runtimes to collect available pallets for runtime in runtimesMatrix.values(): + os.system(f"cargo build -p {runtime['package']} --profile {profile} --features runtime-benchmarks") print(f'-- listing pallets for benchmark for {runtime["name"]}') wasm_file = f"target/{profile}/wbuild/{runtime['package']}/{runtime['package'].replace('-', '_')}.wasm" - output = os.popen(f"frame-omni-bencher v1 benchmark pallet --all --list --runtime={wasm_file}").read() - raw_pallets = output.split('\n')[1:] # skip the first line with header + output = os.popen( + f"frame-omni-bencher v1 benchmark pallet --no-csv-header --all --list --runtime={wasm_file}").read() + raw_pallets = output.split('\n') all_pallets = set() for pallet in raw_pallets: @@ -88,26 +118,54 @@ for pallet in runtime_pallets_map[runtime]: config = runtimesMatrix[runtime] print(f'-- config: {config}') - output_path = f"./{config['path']}/src/weights/{pallet.replace('::', '_')}.rs"; + default_path = f"./{config['path']}/src/weights/{pallet.replace('::', '_')}.rs" + xcm_path = f"./{config['path']}/src/weights/xcm/{pallet.replace('::', '_')}.rs" + output_path = default_path if not pallet.startswith("pallet_xcm_benchmarks") else xcm_path print(f'-- benchmarking {pallet} in {runtime} into {output_path}') - os.system(f"frame-omni-bencher v1 benchmark pallet " - f"--extrinsic=* " - f"--runtime=target/{profile}/wbuild/{config['package']}/{config['package'].replace('-', '_')}.wasm " - f"--pallet={pallet} " - f"--header={header_path} " - f"--output={output_path} " - f"--wasm-execution=compiled " - f"--steps=50 " - f"--repeat=20 " - f"--heap-pages=4096 " - ) + status = os.system(f"frame-omni-bencher v1 benchmark pallet " + f"--extrinsic=* " + f"--runtime=target/{profile}/wbuild/{config['package']}/{config['package'].replace('-', '_')}.wasm " + f"--pallet={pallet} " + f"--header={header_path} " + f"--output={output_path} " + f"--wasm-execution=compiled " + f"--steps=50 " + f"--repeat=20 " + f"--heap-pages=4096 " + ) + if status != 0 and not args.continue_on_fail: + print(f'Failed to benchmark {pallet} in {runtime}') + sys.exit(1) + + # Otherwise collect failed benchmarks and print them at the end + # push failed pallets to failed_benchmarks + if status != 0: + failed_benchmarks[f'{runtime}'] = failed_benchmarks.get(f'{runtime}', []) + [pallet] + else: + successful_benchmarks[f'{runtime}'] = successful_benchmarks.get(f'{runtime}', []) + [pallet] + + if failed_benchmarks: + print('❌ Failed benchmarks of runtimes/pallets:') + for runtime, pallets in failed_benchmarks.items(): + print(f'-- {runtime}: {pallets}') + + if successful_benchmarks: + print('✅ Successful benchmarks of runtimes/pallets:') + for runtime, pallets in successful_benchmarks.items(): + print(f'-- {runtime}: {pallets}') tempdir.cleanup() elif args.command == 'fmt': nightly_version = os.getenv('RUST_NIGHTLY_VERSION') - os.system(f'cargo +nightly-{nightly_version} fmt') - os.system('taplo format --config .config/taplo.toml') + command = f"cargo +nightly-{nightly_version} fmt"; + print('Formatting with `{command}`') + nightly_status = os.system(f'{command}') + taplo_status = os.system('taplo format --config .config/taplo.toml') + + if (nightly_status != 0 or taplo_status != 0) and not args.continue_on_fail: + print('❌ Failed to format code') + sys.exit(1) -print('Done') +print('🚀 Done') diff --git a/.github/workflows/cmd.yml b/.github/workflows/cmd.yml index acbb3013d5..8e4633ef95 100644 --- a/.github/workflows/cmd.yml +++ b/.github/workflows/cmd.yml @@ -11,8 +11,32 @@ permissions: # allow the action to comment on the PR actions: read jobs: + help: + if: ${{ startsWith(github.event.comment.body, '/cmd') && contains(github.event.comment.body, '--help') }} + runs-on: ubuntu-latest + steps: + - name: Save output of help + id: help + run: | + echo "Command help: " + help=$(python3 .github/scripts/cmd/cmd.py --help) + + echo "help=$help" >> $GITHUB_OUTPUT + + - name: Comment PR (Help) + uses: actions/github-script@v7 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: `Command help: ${{ steps.help.outputs.help }}` + }) + cmd: - if: startsWith(github.event.comment.body, '/cmd') + if: ${{ startsWith(github.event.comment.body, '/cmd') && !contains(github.event.comment.body, '--help') }} runs-on: arc-runners-beefy-stg steps: - name: Install updates and protobuf-compiler @@ -37,22 +61,28 @@ jobs: - name: Build workflow link id: build-link run: | - workflowLink=$(curl -s \ + jobLink=$(curl -s \ -H "Authorization: token ${{ steps.commands_token.outputs.token }}" \ -H "Accept: application/vnd.github.v3+json" \ https://api.github.com/repos/${{ github.repository }}/actions/runs/${{ github.run_id }}/jobs | jq '.jobs[0].html_url') - echo "workflow_link=${workflowLink}" - echo "workflow_link=$workflowLink" >> $GITHUB_ENV + runLink=$(curl -s \ + -H "Authorization: token ${{ steps.commands_token.outputs.token }}" \ + -H "Accept: application/vnd.github.v3+json" \ + https://api.github.com/repos/${{ github.repository }}/actions/runs/${{ github.run_id }} | jq '.html_url') + + echo "job_url=${jobLink}" + echo "run_url=${runLink}" + echo "job_url=$job_url" >> $GITHUB_ENV + echo "run_url=$run_url" >> $GITHUB_ENV - name: Clean previous comments if: ${{ contains(github.event.comment.body, '--clean') }} uses: actions/github-script@v7 with: github-token: ${{ steps.commands_token.outputs.token }} - # github-token: ${{ secrets.GITHUB_TOKEN }} script: | - let workflowLink = ${{ env.workflow_link }} + let workflowLink = ${{ env.job_url }} github.rest.issues.listComments({ issue_number: context.issue.number, @@ -77,15 +107,14 @@ jobs: uses: actions/github-script@v7 with: github-token: ${{ steps.commands_token.outputs.token }} - # github-token: ${{ secrets.GITHUB_TOKEN }} script: | - let workflowLink = ${{ env.workflow_link }} + let job_url = ${{ env.job_url }} github.rest.issues.createComment({ issue_number: context.issue.number, owner: context.repo.owner, repo: context.repo.repo, - body: `Command "${{ steps.get-pr-comment.outputs.group2 }}" has started! Output: [Link to pipeline](${workflowLink})` + body: `Command "${{ steps.get-pr-comment.outputs.group2 }}" has started! Output: [Link to pipeline](${job_url})` }) - name: Checkout @@ -158,14 +187,13 @@ jobs: uses: actions/github-script@v7 with: github-token: ${{ steps.commands_token.outputs.token }} - # github-token: ${{ secrets.GITHUB_TOKEN }} script: | - let workflowLink = ${{ env.workflow_link }} + let runUrl = ${{ env.run_url }} github.rest.issues.createComment({ issue_number: context.issue.number, owner: context.repo.owner, repo: context.repo.repo, - body: `Command "${{ steps.get-pr-comment.outputs.group2 }}" has finished! Output: [Link to pipeline](${workflowLink})` + body: `Command "${{ steps.get-pr-comment.outputs.group2 }}" has finished! Output: [Link to pipeline](${runUrl})` }) - name: Comment PR (Failure) @@ -174,12 +202,12 @@ jobs: with: github-token: ${{ steps.commands_token.outputs.token }} script: | - let workflowLink = ${{ env.workflow_link }} + let jobUrl = ${{ env.job_url }} github.rest.issues.createComment({ issue_number: context.issue.number, owner: context.repo.owner, repo: context.repo.repo, - body: `Command "${{ steps.get-pr-comment.outputs.group2 }}" has failed! Output: [Link to pipeline](${workflowLink})` + body: `Command "${{ steps.get-pr-comment.outputs.group2 }}" has failed! Output: [Link to pipeline](${jobUrl})` }) diff --git a/.gitignore b/.gitignore index d77d6772c6..f2b9bfb233 100644 --- a/.gitignore +++ b/.gitignore @@ -41,3 +41,4 @@ target/ # act .secrets act_*.json +**/__pycache__/