diff --git a/.github/workflows/perf.yml b/.github/workflows/perf-accuracy.yml similarity index 78% rename from .github/workflows/perf.yml rename to .github/workflows/perf-accuracy.yml index 22e945efc72..832eae78087 100644 --- a/.github/workflows/perf.yml +++ b/.github/workflows/perf-accuracy.yml @@ -1,22 +1,15 @@ -name: Performance Benchmark Test +name: Performance-Accuracy Benchmark Test on: workflow_dispatch: # run on request (no need for PR) inputs: - benchmark-type: - type: choice - description: Benchmark type - options: - - accuracy - - speed - required: true model-type: type: choice description: Model type to run benchmark options: - default # speed, balance, accuracy models only - all # default + other models - default: all + default: default data-size: type: choice description: Dataset size to run benchmark @@ -39,10 +32,10 @@ on: - train - export - optimize - default: train + default: optimize jobs: - Regression-Tests: + Perf-Accuracy-Benchmark-Tests: strategy: fail-fast: false matrix: @@ -57,7 +50,7 @@ jobs: task: "anomaly" - toxenv_task: "cls" task: "classification" - name: Perf-Test-py310-${{ matrix.toxenv_task }} + name: Perf-Accuracy-Benchmark-Test-py310-${{ matrix.toxenv_task }} uses: ./.github/workflows/run_tests_in_tox.yml with: python-version: "3.10" @@ -65,15 +58,15 @@ jobs: toxenv-task: ${{ matrix.toxenv_task }} tests-dir: > tests/perf/test_${{ matrix.task }}.py - -k ${{ inputs.benchmark-type }} + -k accuracy --model-type ${{ inputs.model-type }} --data-root /home/validation/data/new/ --data-size ${{ inputs.data-size }} --num-repeat ${{ inputs.num-repeat }} --num-epoch ${{ inputs.num-epoch }} - --summary-csv .tox/perf-${{ inputs.benchmark-type }}-benchmark-${{ matrix.toxenv_task }}.csv + --summary-csv .tox/perf-accuracy-benchmark-${{ matrix.toxenv_task }}.csv runs-on: "['self-hosted', 'Linux', 'X64', 'dmount']" task: ${{ matrix.task }} timeout-minutes: 8640 upload-artifact: true - artifact-prefix: perf-${{ inputs.benchmark-type }}-benchmark + artifact-prefix: perf-accuracy-benchmark diff --git a/.github/workflows/perf-speed.yml b/.github/workflows/perf-speed.yml new file mode 100644 index 00000000000..ee74f3ab991 --- /dev/null +++ b/.github/workflows/perf-speed.yml @@ -0,0 +1,58 @@ +name: Performance-Speed Benchmark Test + +on: + workflow_dispatch: # run on request (no need for PR) + inputs: + model-type: + type: choice + description: Model type to run benchmark + options: + - default # speed, balance, accuracy models only + - all # default + other models + default: default + data-size: + type: choice + description: Dataset size to run benchmark + options: + - small + - medium + - large + - all + default: large + num-repeat: + description: Overrides default per-data-size number of repeat setting + default: 0 + num-epoch: + description: Overrides default per-model number of epoch setting + default: 0 + eval-upto: + type: choice + description: The last operation to evaluate. 'optimize' means all. + options: + - train + - export + - optimize + default: optimize + +jobs: + Perf-Speed-Benchmark-Tests: + name: Perf-Speed-Benchmark-Test-py310-all + uses: ./.github/workflows/run_tests_in_tox.yml + with: + python-version: "3.10" + toxenv-pyver: "py310" + toxenv-task: all + tests-dir: > + tests/perf/ + -k speed + --model-type ${{ inputs.model-type }} + --data-root /home/validation/data/new/ + --data-size ${{ inputs.data-size }} + --num-repeat ${{ inputs.num-repeat }} + --num-epoch ${{ inputs.num-epoch }} + --summary-csv .tox/perf-speed-benchmark-all.csv + runs-on: "['self-hosted', 'Linux', 'X64', 'dmount']" + task: all + timeout-minutes: 8640 + upload-artifact: true + artifact-prefix: perf-speed-benchmark